diff --git a/manga-py-stable_1.x/.codeclimate.yml b/manga-py-stable_1.x/.codeclimate.yml new file mode 100644 index 0000000..dc62399 --- /dev/null +++ b/manga-py-stable_1.x/.codeclimate.yml @@ -0,0 +1,23 @@ +exclude_paths: +- 'tests/' +- 'manga_py/storage/' +- 'manga_py/providers/' +- 'manga_py/gui/langs/*.json' +- 'helpers/' +- 'Dockerfile' +- 'LICENSE' +- 'README*' +- '*.yml' +- '*.txt' +- '.scrutinizer.yml' +- '.travis.yml' +- '.codeclimate.yml' +- '.gitignore' +- '.gitmodules' +languages: + Python: true +pep8: + enabled: true + checks: + E501: + enabled: false diff --git a/manga-py-stable_1.x/.gitignore b/manga-py-stable_1.x/.gitignore new file mode 100644 index 0000000..f46f7ca --- /dev/null +++ b/manga-py-stable_1.x/.gitignore @@ -0,0 +1,107 @@ +.idea/ +Manga/ +tests/temp/ + +manga_py/rebreakcaptcha + +manga_py/storage/.passwords.json +manga_py/storage/chromedriver +manga_py/storage/chromedriver.exe + +### Python template +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec +!helpers/manga.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject + diff --git a/manga-py-stable_1.x/.scrutinizer.yml b/manga-py-stable_1.x/.scrutinizer.yml new file mode 100644 index 0000000..47ae29d --- /dev/null +++ b/manga-py-stable_1.x/.scrutinizer.yml @@ -0,0 +1,39 @@ +checks: + python: + code_rating: true + duplicate_code: true + javascript: true + +build: + nodes: + analysis: + project_setup: + override: true + tests: + before: + - sudo apt remove chromium-browser -y + - sudo apt update + - sudo apt install -y dpkg + - pip3 install coverage + - pip3 install -r requirements_dev.txt + - wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb -O /tmp/chrome.deb + - sudo dpkg -i /tmp/chrome.deb + - sudo apt install -y -f --fix-missing + override: + - + command: 'coverage run --omit=manga_py/providers/*.py --source=manga_py run_tests.py' + coverage: + file: '.coverage' + format: 'py-cc' + - py-scrutinizer-run + environment: + python: 3.5.3 + node: 6.0.0 +filter: + excluded_paths: + - manga_py/crypt/sunday_webry_com.py + - manga_py/crypt/aes.js + - manga_py/crypt/aes_zp.js + - tests/*.py + - run_tests.py + - manga.py diff --git a/manga-py-stable_1.x/.travis.yml b/manga-py-stable_1.x/.travis.yml new file mode 100644 index 0000000..69b900a --- /dev/null +++ b/manga-py-stable_1.x/.travis.yml @@ -0,0 +1,63 @@ +env: + global: + - CC_TEST_REPORTER_ID=ff7add7a0f454aff7e13c739a06a7aba8e5c8229d3e776e051294341b4721871 +addons: + artifacts: true +language: python +dist: xenial +python: + - "3.5" + - "3.6" + - "3.7" +# - "nightly" +cache: pip +before_install: + - sudo apt-get -y install nodejs python-setuptools libwebp-dev + - python -V + - pwd + - chmod +x helpers/after_script.sh + - chmod +x helpers/before_deploy.sh + - chmod +x helpers/before_script.sh + - source ./helpers/before_script.sh +install: + - npm install -g sass node-sass html-minifier + - pip install --upgrade coverage codeclimate-test-reporter setuptools pyinstaller + - pip install -r requirements_dev.txt +before_script: + - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter + - chmod +x ./cc-test-reporter + - ./cc-test-reporter before-build +script: + - if [[ "$TRAVIS_TAG" = "" ]]; then coverage run --omit=manga_py/providers/*.py --source=manga_py run_tests.py; fi +after_script: + - source ./helpers/after_script.sh +before_deploy: + - echo "$allow_deploy" + - echo "Start make gh-pages content" + - source ./helpers/before_deploy.sh +deploy: + - provider: pages + edge: true + local-dir: helpers/gh_pages_content + target-branch: gh-pages + github-token: $GH_TOKEN + skip-cleanup: true + skip_cleanup: true + on: + branch: stable_1.x + condition: $TRAVIS_PYTHON_VERSION == "3.6" + tags: false + - provider: pypi + server: https://upload.pypi.org/legacy/ + user: 1271 + password: $PYPI_PASS + skip_cleanup: true + on: + branch: stable_1.x + tags: true + condition: $TRAVIS_PYTHON_VERSION == "3.6" +# fqdn: yuru-yuri.sttv.me +#see https://docs.travis-ci.com/user/deployment/pages/ +allow_failures: + - python: nightly + - python: 3.5 diff --git a/manga-py-stable_1.x/LICENSE b/manga-py-stable_1.x/LICENSE new file mode 100644 index 0000000..b3c0318 --- /dev/null +++ b/manga-py-stable_1.x/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 yuru-yuri + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/manga-py-stable_1.x/README.rst b/manga-py-stable_1.x/README.rst new file mode 100644 index 0000000..f9191eb --- /dev/null +++ b/manga-py-stable_1.x/README.rst @@ -0,0 +1,180 @@ +Manga-py |Travis CI result| +=================================== + +Universal assistant download manga. +''''''''''''''''''''''''''''''''''' + +Approximately 300 providers are available now. +'''''''''''''''''''''''''''''''''''''''''''''' + +|Scrutinizer CI result| |Scrutinizer CI coverage| |GitHub issues| + +|Code Climate| |Issue Count| |GitHub repo size| |PyPI - size| + +|PyPI - Python Version| |PyPi version| |PyPI - Downloads| + +Supported resources +------------------- + +see: + +- https://manga-py.com/manga-py/#resources-list +- https://manga-py.github.io/manga-py/#resources-list (alternative) +- https://yuru-yuri.github.io/manga-py/#resources-list (deprecated) + +Plans for improvement: +---------------------- + +see: + +- https://manga-py.com/manga-py/improvement.html +- https://manga-py.github.io/manga-py/improvement.html (alternative) + + +How to use +---------- + +Installation +~~~~~~~~~~~~ + +1) Download python 3.5+ https://www.anaconda.com/downloads +2) Install pip package: + + .. code:: bash + + pip install manga-py + +3) Run program: + +.. code:: bash + + manga-py http://manga.url/manga/name # For download manga + +Installation on the Android +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +See https://github.com/manga-py/manga-py/issues/48 + +Docker image: +~~~~~~~~~~~~~ +See: + +- https://hub.docker.com/r/mangadl/manga-py/tags?page=1&ordering=last_updated +- https://github.com/manga-py/manga-py-docker + + +Downloading manga +----------------- + +**:warning:For sites with cloudflare protect need installed Node.js** + +**:warning:Notice! By default, the mode of multithreaded image loading +is enabled** + +**To change this behavior, add the key --no-multi-threads** + +.. code:: bash + + # download to "./Manga/" directory + manga-py http://manga-url-here/manga-name + # download to "./Manga/Manga Name" directory + manga-py http://manga-url-here/manga-name --name 'Manga Name' + # or download to /manga/destination/path/ directory + manga-py http://manga-url-here/manga-name -d /manga/destination/path/ + # skip 3 volumes + manga-py --skip-volumes 3 http://manga-url-here/manga-name + # skip 3 volumes and download 2 volumes + manga-py --skip-volumes 3 --max-volumes 2 http://manga-url-here/manga-name + # reverse volumes downloading (24 -> 1) + manga-py --reverse-downloading http://manga-url-here/manga-name + # Disable progressbar + manga-py --no-progress http://manga-url-here/manga-name + + +Embedded example: +----------------- +https://github.com/manga-py/manga-py/blob/stable_1.x/embedded.md + +Help +---- + +.. code:: bash + + manga-py -h + # or + manga-py --help + +Suported by JetBrains +--------------------- +|JetBrains logo| + + +Manga-py Docker +--------------- + +1. Install docker + - Summary https://docs.docker.com/install/ + - Mac https://docs.docker.com/docker-for-mac/install/ + - Windows https://docs.docker.com/docker-for-windows/install/ + +2. Install manga-py + +.. code:: bash + docker pull mangadl/manga-py + + +3. Run it + +.. code:: bash + + docker run -it -v ${PWD}:/home/manga mangadl/manga-py + + +Or docker-compose: + +1. Install docker compose https://docs.docker.com/compose/install/ + +2. Download manga-py-docker https://github.com/manga-py/manga-py-docker/archive/master.zip + +3. Unzip it + +4. Run compose + +.. code:: bash + + # build docker + docker-compose build + # run it + docker-compose run manga_py + + +.. |Travis CI result| image:: https://travis-ci.com/manga-py/manga-py.svg?branch=stable_1.x + :target: https://travis-ci.com/manga-py/manga-py/branches +.. |Code Climate| image:: https://codeclimate.com/github/manga-py/manga-py/badges/gpa.svg + :target: https://codeclimate.com/github/manga-py/manga-py +.. |Issue Count| image:: https://codeclimate.com/github/manga-py/manga-py/badges/issue_count.svg + :target: https://codeclimate.com/github/manga-py/manga-py +.. |PyPI - Python Version| image:: https://img.shields.io/pypi/pyversions/manga-py.svg + :target: https://pypi.org/project/manga-py/ +.. |Scrutinizer CI result| image:: https://scrutinizer-ci.com/g/manga-py/manga-py/badges/quality-score.png?b=stable_1.x + :target: https://scrutinizer-ci.com/g/manga-py/manga-py +.. |Scrutinizer CI coverage| image:: https://scrutinizer-ci.com/g/manga-py/manga-py/badges/coverage.png?b=stable_1.x + :target: https://scrutinizer-ci.com/g/manga-py/manga-py +.. |GitHub issues| image:: https://img.shields.io/github/issues/manga-py/manga-py.svg + :target: https://github.com/manga-py/manga-py/issues +.. |PyPi version| image:: https://badge.fury.io/py/manga-py.svg + :alt: PyPI + :target: https://pypi.org/project/manga-py/ +.. |JetBrains logo| image:: https://github.com/yuru-yuri/manga-py/raw/stable_1.x/.github/jetbrains.png + :alt: JetBrains + :target: https://www.jetbrains.com/?from=manga-py +.. |MicroBadger Layers| image:: https://img.shields.io/microbadger/layers/mangadl/manga-py + :alt: MicroBadger Layers +.. |MicroBadger Size| image:: https://img.shields.io/microbadger/image-size/mangadl/manga-py + :alt: MicroBadger Size +.. |GitHub repo size| image:: https://img.shields.io/github/repo-size/manga-py/manga-py + :alt: GitHub repo size +.. |PyPI - Downloads| image:: https://img.shields.io/pypi/dm/manga-py + :alt: PyPI - Downloads +.. |PyPI - size| image:: https://img.shields.io/badge/dynamic/json?color=success&label=PyPI+size&query=%24.size&url=https://sttv.me/manga-py.json&?cacheSeconds=3600&suffix=+Kb + :alt: PyPI - size + diff --git a/manga-py-stable_1.x/docker-compose.yaml b/manga-py-stable_1.x/docker-compose.yaml new file mode 100644 index 0000000..fb5ac55 --- /dev/null +++ b/manga-py-stable_1.x/docker-compose.yaml @@ -0,0 +1,12 @@ +version: '3.0' + +services: + manga_py: + container_name: mangadl/manga-py + image: python + build: ./helpers/python + volumes: + - ./:/home/manga + +volumes: + manga_volume: diff --git a/manga-py-stable_1.x/embedded.md b/manga-py-stable_1.x/embedded.md new file mode 100644 index 0000000..84d25b0 --- /dev/null +++ b/manga-py-stable_1.x/embedded.md @@ -0,0 +1,72 @@ +### Use manga-py in your project + + +```python +from manga_py.parser import Parser +from manga_py.info import Info + + +my_awesome_handler = open('my-handler') + + +class MyAwesomeInfo(Info): + pass + + +# main class (you will have your own) +class MyAwesomeClass: + args = {} + """ + is just a Namespace or dict with arguments + (filled below. You can implement your implementation. The main thing is to have all keys possible) + see manga_py.cli.args.get_cli_arguments() + """ + + parser = None # the found parser gets here (see below) + + def get_info(self): + MyAwesomeInfo(self.args) # use the Info class from manga-py or overload the Info class from manga-py + + def start(self): + self.parser = Parser(self.args) + try: + self.parser.init_provider( + progress=self.progress, + log=self.print, + quest=self.quest, + quest_password=self.quest_password, + info=self.get_info(), + ) + except AttributeError as e: + raise e + self.parser.start() # provider main method + + def progress(self, items_count: int, current_item: int, re_init: bool = False): # the same progress function. re_init = True means "next chapter" + # simple progress + pass + + def print(self, text, **kwargs): + """ + Not used everywhere. Better reload global print method + """ + print(text, **kwargs, file=my_awesome_handler) + + def quest(self, variants: enumerate, title: str, select_type=0): # 0 = single, 1 = multiple + if select_type == 0: + print(' Question ') + return 'Answer' + else: + print(' Question multiple answers') + return [ + 'Answer 1', + 'Answer 2', + ... + ] + + def quest_password(self, title): + """ + used to ask user password + """ + print(title) + return 'my_awesome_password' +``` diff --git a/manga-py-stable_1.x/helpers/1.js b/manga-py-stable_1.x/helpers/1.js new file mode 100644 index 0000000..abce740 --- /dev/null +++ b/manga-py-stable_1.x/helpers/1.js @@ -0,0 +1,14 @@ +function ytc(y) { + var x = "", y = y.split(" "); + for (var i = 0, n = y.length; i < n; i++) x += String.fromCharCode(y[i]); + return x; +} + +function kxatz() { + for (i = ytaw.length - 1; i >= 0; i--) { + ytaw[i] = ytc(ytaw[i]); + var obj = $('#imgs .wrap_img:eq(' + i + ') img'), alt = $('#imgs').attr('data-alt'); + obj.attr('alt', alt + ' - ' + obj.attr('alt')); + obj.attr('data-src', ytaw[i]); + } +} \ No newline at end of file diff --git a/manga-py-stable_1.x/helpers/after_script.sh b/manga-py-stable_1.x/helpers/after_script.sh new file mode 100644 index 0000000..90f57b2 --- /dev/null +++ b/manga-py-stable_1.x/helpers/after_script.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +if [[ "$allow_deploy" = "true" ]] +then + coverage xml + ./cc-test-reporter after-build -t coverage.py --exit-code $TRAVIS_TEST_RESULT || true +fi diff --git a/manga-py-stable_1.x/helpers/animextremist_com.py b/manga-py-stable_1.x/helpers/animextremist_com.py new file mode 100644 index 0000000..dce361b --- /dev/null +++ b/manga-py-stable_1.x/helpers/animextremist_com.py @@ -0,0 +1,22 @@ +from requests import get +from os import system, path +from lxml.html import document_fromstring + +_path = path.dirname(path.dirname(path.realpath(__file__))) + +all_manga_list = None +n = 0 +base_path = 'http://animextremist.com/mangas-online/' +while n < 10: + try: + all_manga_list = document_fromstring(get(base_path)).cssselect('li > a + a') + break + except Exception: + pass + n += 1 + +for i in all_manga_list: + href = i.get('href') + print('Downloading %s' % href) + _str = 'cd {}; python3 manga.py --cli -i -u {}' + system(_str.format(_path, href)) diff --git a/manga-py-stable_1.x/helpers/before_deploy.sh b/manga-py-stable_1.x/helpers/before_deploy.sh new file mode 100644 index 0000000..0a8c0a5 --- /dev/null +++ b/manga-py-stable_1.x/helpers/before_deploy.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +if [[ "$allow_deploy" = "true" ]] +then + + if [[ "$TRAVIS_TAG" != "" ]] + then + echo "Skip build bin package" +# echo "Start build bin package" +# cp helpers/.builder.py . +# cp helpers/.providers_updater.py . +# cp helpers/manga.spec . +# python .providers_updater.py +# pyinstaller manga.spec --log-level CRITICAL -y -F + else + echo "Make gh-pages" + node-sass helpers/gh_pages_content/style.scss helpers/gh_pages_content/style.css --output-style compressed + html-minifier helpers/gh_pages_content/index.html --output helpers/gh_pages_content/index.html --html5 --remove-comments --remove-tag-whitespace --collapse-inline-tag-whitespace --remove-attribute-quotes --collapse-whitespace + html-minifier helpers/gh_pages_content/improvement.html --output helpers/gh_pages_content/improvement.html --html5 --remove-comments --remove-tag-whitespace --collapse-inline-tag-whitespace --remove-attribute-quotes --collapse-whitespace + git add -A + git commit -a -m upd + fi + +fi diff --git a/manga-py-stable_1.x/helpers/before_script.sh b/manga-py-stable_1.x/helpers/before_script.sh new file mode 100644 index 0000000..4f8cfb8 --- /dev/null +++ b/manga-py-stable_1.x/helpers/before_script.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +allow_deploy="false" + +py_version=$(python --version) + +if [[ "${py_version:7:-2}" = "3.5" ]] +then +allow_deploy="true" +fi +if [[ "$TRAVIS_TAG" = "" ]] +then wget 'https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb' -O /tmp/chrome.deb && sudo dpkg -i /tmp/chrome.deb && sudo apt-get install -y -f --fix-missing +fi diff --git a/manga-py-stable_1.x/helpers/gh_pages.py b/manga-py-stable_1.x/helpers/gh_pages.py new file mode 100644 index 0000000..9a63f03 --- /dev/null +++ b/manga-py-stable_1.x/helpers/gh_pages.py @@ -0,0 +1,118 @@ +from manga_py.providers import providers_list +from manga_py.fs import root_path +from manga_py.meta import __repo_name__ +from json import dumps +from datetime import datetime + +start_items = [ + # [ address, (0 - not worked, 1 - worked, 2 - alias), 'Comment'] + # ['http://bato.to', 0, ' - Batoto will be closing down permanently (Jan 18, 2018)'], + ['http://bulumanga.com', 0, ' - Closed'], + ['http://bwahahacomics.ru', 0, ' - Very little content. Possibly, will be done in the future.'], + ['http://com-x.life', 1, ' - One thread only!!! --no-multi-threads. '], + ['http://comic-walker.com', 0, ' - Maybe...'], + ['http://comico.jp', 1, ' - only public downloading now'], + ['http://comixology.com', 0, ' - Buy only. Not reading.'], + ['http://e-hentai.org', 1, ''], + ['http://eatmanga.me', 1, ''], + ['http://dm5.com', 0, ''], + ['http://gogomanga.co', 1, ''], + ['http://heavenmanga.biz', 2, '- See heavenmanga.site'], + ['http://hentai-chan.me', 1, '- Need fill access file'], + ['http://heymanga.me', 1, ''], + ['http://comic.k-manga.jp', 0, ' - Maybe...'], + ['http://japscan.com', 2, ' - See japscan.to'], + ['http://japscan.cc', 2, ' - See japscan.to'], + ['http://lhscans.com', 1, '- See rawlh.com'], + ['http://luscious.net', 1, ''], + ['http://lezhin.com', 0, ' - Maybe...'], + ['http://manga-zone.org', 0, ' - Will not be implemented'], + ['http://mangaall.com', 2, '- See mangatrue.com'], + ['http://mangaforall.com', 1, ''], + ['http://mangafreak.net', 1, ', site down now'], + ['http://mangahead.me', 1, ', site down now'], + ['http://mangaleader.com', 1, ' site down now'], + ['http://mangamove.com', 1, ', site down now'], + ['http://manganel.com', 1, ', site down now'], + ['http://mangaroot.com', 1, ', site down now, one thread only!!! --no-multi-threads'], + ['http://mangatail.com', 2, '- See mangatail.me'], + ['http://mangatrue.com', 1, ' - Site down now'], + ['http://mangaz.com', 0, ' - Maybe...'], + ['http://mg-zip.com', 0, ' - Will not be implemented'], + ['http://raw-zip.com', 0, ' - Will not be implemented'], + ['http://rawdevart.com', 1, ', very little content'], + ['http://s-manga.net', 0, ' - Maybe'], + ['http://sunday-webry.com', 0, ' - Not worked decryption images now. In develop.'], + ['http://tapas.io', 1, ', only public downloading now'], + ['http://tsumino.com', 1, ''], + ['http://zip.raw.im', 0, ' - Will not be implemented'], + ['http://rawlh.com', 1, '- See lhscan.net'], + ['http://8muses.com', 0, '- Need decode page.'], + ['http://mangago.me', 0, '- Need decode page.'], + + ['http://digitalteam1.altervista.org', 0, ' - Site down now'], + ['http://heymanga.me', 0, ' - Site down now'], + ['http://lector.dangolinenofansub.com', 0, ' - See kumanga.com'], + ['http://lector.ytnofan.com', 0, ' - Site down now'], + ['http://leomanga.com', 0, ' - Site down now'], + ['http://mang.as', 0, ' - Site down now'], + ['http://santosfansub.com', 0, ' - Site down now'], +] + + +_start_items = [i[0] for i in start_items] + + +def merge(*providers): + for p in providers: + yield from providers_list[p] + + +def clean(providers): + _list = {} + for i in providers: + _ = i.find('/') + if not ~_: + _ = i.strip('()') + else: + _ = i[:_].strip('()') + _list['http://' + _.replace(r'\.', '.')] = '' + return list(_list.keys()) + + +def aggregate(providers): + _list = [] + for i in providers: + if i not in _start_items: + _list.append([i, 1, '']) + return _list + + +def prepare_html(html): + with open(html, 'r') as r: + content = r.read() + with open(html, 'w') as w: + content = content.replace('__repo_name__', __repo_name__) + today = datetime.today() + content = content.replace('__last_update__', '{}/{:0>2}/{:0>2} {:0>2}-{:0>2}-{:0>2}'.format( + today.year, today.month, today.day, today.hour, today.minute, today.second + )) + w.write(content) + + +def build_providers(): + items = aggregate(clean(merge(*providers_list))) + start_items + items = sorted(items, key=lambda l: l[0]) + return dumps(items) + + +def main(): + path = root_path() + '/helpers/gh_pages_content/' + with open(path + 'providers.json', 'w') as w: + w.write(build_providers()) + prepare_html(path + 'index.html') + prepare_html(path + 'improvement.html') + + +# print(len(build_providers())) + diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/#463018.png b/manga-py-stable_1.x/helpers/gh_pages_content/#463018.png new file mode 100644 index 0000000..2b5dc3f Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/#463018.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/LICENSE b/manga-py-stable_1.x/helpers/gh_pages_content/LICENSE new file mode 100644 index 0000000..b3c0318 --- /dev/null +++ b/manga-py-stable_1.x/helpers/gh_pages_content/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 yuru-yuri + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/README.md b/manga-py-stable_1.x/helpers/gh_pages_content/README.md new file mode 100644 index 0000000..6e82868 --- /dev/null +++ b/manga-py-stable_1.x/helpers/gh_pages_content/README.md @@ -0,0 +1 @@ +Go to [README.md](https://github.com/manga-py/manga-py/blob/master/README_RU.md) diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/browserconfig.xml b/manga-py-stable_1.x/helpers/gh_pages_content/browserconfig.xml new file mode 100644 index 0000000..ee6b92d --- /dev/null +++ b/manga-py-stable_1.x/helpers/gh_pages_content/browserconfig.xml @@ -0,0 +1,2 @@ + +#ffffff diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/download_btns.js b/manga-py-stable_1.x/helpers/gh_pages_content/download_btns.js new file mode 100644 index 0000000..454e0d8 --- /dev/null +++ b/manga-py-stable_1.x/helpers/gh_pages_content/download_btns.js @@ -0,0 +1,58 @@ + +((d) => { + d.addEventListener('DOMContentLoaded', () => { + /** global: repoUrl */ + if(typeof repoUrl == 'undefined') + { + // example: https://api.github.com/repos/manga-py/manga-py/releases/latest + // example: https://api.github.com/repos/yuru-yuri/manga-py/releases/latest + return; + } + fetch(repoUrl) + .then(r => r.json()) + .then((r) => { + const links = d.querySelector('#download-links'); + const tar = links.querySelector('.tar'); + const zip = links.querySelector('.zip'); + + tar.setAttribute('href', r.tarball_url); + tar.setAttribute('active', 'true'); + zip.setAttribute('href', r.zipball_url); + zip.setAttribute('active', 'true'); + }); + const ul = d.querySelector('#supported-list'); + if(!ul) + { + return; + } + fetch('./providers.json') + .then(r => r.json()) + .then((r) => { + let html = '', m = 0, done = 0; + const sites = []; + for(let i in r) { + if (!r.hasOwnProperty(i)) continue; + m+=1; + html += '
  • ' + + '' + + r[i][0] + ' ' + + r[i][2] + '
  • '; + done += r[i][1] ? 1 : 0; + r[i][1] && sites.push(r[i][0]); + } + ul.innerHTML = ('') + html; + + let sitesLen = sites.length; + const buttonElement = document.querySelector('#random-site'); + buttonElement.setAttribute('target', '_blank'); + buttonElement.addEventListener('click', () => { + const idx = parseInt(Math.random() * sitesLen); + buttonElement.setAttribute('href', sites[idx]); + return true; + }); + }); + }); +})(document); diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon.ico b/manga-py-stable_1.x/helpers/gh_pages_content/favicon.ico new file mode 100644 index 0000000..bfa49da Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon.ico differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-144x144.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-144x144.png new file mode 100644 index 0000000..999a641 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-144x144.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-192x192.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-192x192.png new file mode 100644 index 0000000..6e423da Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-192x192.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-36x36.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-36x36.png new file mode 100644 index 0000000..e8f4b36 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-36x36.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-48x48.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-48x48.png new file mode 100644 index 0000000..472ed00 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-48x48.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-72x72.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-72x72.png new file mode 100644 index 0000000..d333f0e Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-72x72.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-96x96.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-96x96.png new file mode 100644 index 0000000..2b85b22 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/android-icon-96x96.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-114x114.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-114x114.png new file mode 100644 index 0000000..b29ed38 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-114x114.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-120x120.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-120x120.png new file mode 100644 index 0000000..3c26e6e Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-120x120.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-144x144.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-144x144.png new file mode 100644 index 0000000..999a641 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-144x144.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-152x152.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-152x152.png new file mode 100644 index 0000000..2d9527f Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-152x152.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-180x180.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-180x180.png new file mode 100644 index 0000000..b02ee21 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-180x180.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-57x57.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-57x57.png new file mode 100644 index 0000000..a329a13 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-57x57.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-60x60.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-60x60.png new file mode 100644 index 0000000..3bfa967 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-60x60.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-72x72.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-72x72.png new file mode 100644 index 0000000..d333f0e Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-72x72.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-76x76.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-76x76.png new file mode 100644 index 0000000..d489c71 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-76x76.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-precomposed.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-precomposed.png new file mode 100644 index 0000000..3a7bb39 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon-precomposed.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon.png new file mode 100644 index 0000000..3a7bb39 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/apple-icon.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/favicon-16x16.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/favicon-16x16.png new file mode 100644 index 0000000..3bc2424 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/favicon-16x16.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/favicon-32x32.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/favicon-32x32.png new file mode 100644 index 0000000..8b3779f Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/favicon-32x32.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/favicon-96x96.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/favicon-96x96.png new file mode 100644 index 0000000..2b85b22 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/favicon-96x96.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-144x144.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-144x144.png new file mode 100644 index 0000000..999a641 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-144x144.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-150x150.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-150x150.png new file mode 100644 index 0000000..3ce8d3c Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-150x150.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-310x310.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-310x310.png new file mode 100644 index 0000000..8732907 Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-310x310.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-70x70.png b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-70x70.png new file mode 100644 index 0000000..c5d552a Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/favicon/ms-icon-70x70.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/improvement.html b/manga-py-stable_1.x/helpers/gh_pages_content/improvement.html new file mode 100644 index 0000000..b5c150d --- /dev/null +++ b/manga-py-stable_1.x/helpers/gh_pages_content/improvement.html @@ -0,0 +1,252 @@ + + + + + + + + + Universal manga downloader :: Plans for improvement + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +

    Manga downloader

    +

    Plans for improvement

    + +
    + +
    +
    +
    +

    + Implement providers for the following resources: +

    +
      + +
    • + http://comicride.jp +
    • +
    • + http://exhentai.org +
    • +
    • + http://lhscans.com +
    • +
    • + http://https://imgur.com + +
    • +
    • + http://mangahost.cc +
    • +
    • + http://manga.madokami.al + +
    • +
    • + http://3asq.info +
    • +
    • + http://hamtruyen.com +
    • +
    • + http://nettruyen.com +
    • +
    • + http://truyenchon.com +
    • +
    • + http://manga.mexat.com +
    • +
    • + http://hamtruyen.com +
    • +
    • + http://reader.sworddemon-scans.org/directory/ +
    • +
    • + http://sworddemon-scans.org + + +
    • +
    • + http://choutensei.260mb.net + +
    • + + +
    • + http://yaoimangaonline.com +
    • +
    +

    Maybe

    +
      +
    • + http://comic-meteor.jp +
    • +
    • + http://mangaz.com +
    • +
    • + http://comic-polaris.jp +
    • +
    • + http://comic.mag-garden.co.jp +
    • +
    • + http://www.comic-valkyrie.com +
    • +
    • + http://seiga.nicovideo.jp/manga/official/biggangan +
    • +
    • + http://www.alphapolis.co.jp +
    • +
    • + http://urasunday.com/index.html +
    • +
    • + http://comic.naver.com/index.nhn +
    • +
    • + http://comic-walker.com +
    • +
    • + http://lezhin.com +
    • +
    • + http://mangaz.com +
    • +
    • + http://exhentai.org +
    • +
    • + http://gameofscanlation.moe +
    • + +
    • + http://lhtranslation.com +
    • +
    • + http://manga.madokami.al +
    • +
    • + http://corocoro.tv/webmanga/index.html +
    • +
    • + http://s-manga.net +
    • +
    • + http://sunday-webry.com +
    • +
    • + http://lhtranslation.com +
    • +
    • + http://ebookrenta.com +
    • +
    • + http://crunchyroll.com +
    • +
    • + http://buenaisla.net - Poor site structure + +
    • +
    • + http://nude-moon.me/ +
    • +
    + + + + +

    + Implement an online resource that allows you to download manga without a PC. + - Questionable +

    + +

    + Perhaps, to make a project on Heroku to implement the previous paragraph. + - Questionable +

    + +

    + Learn more sites with the ability to read manga online +

    + +
    +
    Latest update: __last_update__
    +
    +
    + + diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/index.html b/manga-py-stable_1.x/helpers/gh_pages_content/index.html new file mode 100644 index 0000000..8eaf155 --- /dev/null +++ b/manga-py-stable_1.x/helpers/gh_pages_content/index.html @@ -0,0 +1,85 @@ + + + + + + + + + Universal manga downloader + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +

    Manga downloader

    + +
    + +
    +
    + +
      +
      +
      +

      Questions

      +
        +
      • +
        How to offer more resources?
        +
        + Very simple. Write to me at sttv-pc@mail.ru or make a pull-request on   + github.com +
        +
      • +
      • +
        How to help?
        +
        + You can also make a   + pull-request, + or create an   + issue.
        + You can also crawl more + sites that have a manga online reading available. +
        +
      • +
      +
      +
      Latest update: __last_update__
      +
      + + diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/manifest.json b/manga-py-stable_1.x/helpers/gh_pages_content/manifest.json new file mode 100644 index 0000000..76e2569 --- /dev/null +++ b/manga-py-stable_1.x/helpers/gh_pages_content/manifest.json @@ -0,0 +1,41 @@ +{ + "name": "App", + "icons": [ + { + "src": ".\/favicon\/android-icon-36x36.png", + "sizes": "36x36", + "type": "image\/png", + "density": "0.75" + }, + { + "src": ".\/favicon\/android-icon-48x48.png", + "sizes": "48x48", + "type": "image\/png", + "density": "1.0" + }, + { + "src": ".\/favicon\/android-icon-72x72.png", + "sizes": "72x72", + "type": "image\/png", + "density": "1.5" + }, + { + "src": ".\/favicon\/android-icon-96x96.png", + "sizes": "96x96", + "type": "image\/png", + "density": "2.0" + }, + { + "src": ".\/favicon\/android-icon-144x144.png", + "sizes": "144x144", + "type": "image\/png", + "density": "3.0" + }, + { + "src": ".\/favicon\/android-icon-192x192.png", + "sizes": "192x192", + "type": "image\/png", + "density": "4.0" + } + ] +} \ No newline at end of file diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/ok_flag.svg b/manga-py-stable_1.x/helpers/gh_pages_content/ok_flag.svg new file mode 100644 index 0000000..901415f --- /dev/null +++ b/manga-py-stable_1.x/helpers/gh_pages_content/ok_flag.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/sprite_download.png b/manga-py-stable_1.x/helpers/gh_pages_content/sprite_download.png new file mode 100644 index 0000000..901068c Binary files /dev/null and b/manga-py-stable_1.x/helpers/gh_pages_content/sprite_download.png differ diff --git a/manga-py-stable_1.x/helpers/gh_pages_content/style.scss b/manga-py-stable_1.x/helpers/gh_pages_content/style.scss new file mode 100644 index 0000000..db447aa --- /dev/null +++ b/manga-py-stable_1.x/helpers/gh_pages_content/style.scss @@ -0,0 +1,355 @@ +/* http://meyerweb.com/eric/tools/css/reset/ + v2.0 | 20110126 + License: none (public domain) +*/ + +html, body, div, span, applet, object, iframe, +h1, h2, h3, h4, h5, h6, p, blockquote, pre, +a, abbr, acronym, address, big, cite, code, +del, dfn, em, img, ins, kbd, q, s, samp, +small, strike, strong, sub, sup, tt, var, +b, u, i, center, +dl, dt, dd, ol, ul, li, +fieldset, form, label, legend, +table, caption, tbody, tfoot, thead, tr, th, td, +article, aside, canvas, details, embed, +figure, figcaption, footer, header, hgroup, +menu, nav, output, ruby, section, summary, +time, mark, audio, video { + margin: 0; + padding: 0; + border: 0; + font-size: 100%; + font: inherit; + vertical-align: baseline; +} + +/* HTML5 display-role reset for older browsers */ +article, aside, details, figcaption, figure, +footer, header, hgroup, menu, nav, section { + display: block; +} + +body { + line-height: 1; +} + +ol, ul { + list-style: none; +} + +blockquote, q { + quotes: none; +} + +blockquote:before, blockquote:after, +q:before, q:after { + content: ''; + content: none; +} + +table { + border-collapse: collapse; + border-spacing: 0; +} + +body { + background: #fff; + font-size: 16px; + font-family: 'Roboto', 'Noto', sans-serif; +} + +h1, h2, h3, h4, h5, h6 { + display: block; + &.panel { + border-bottom: 1px solid #eaecef; + padding: .5rem .6rem; + max-width: 31.875rem; + } +} +.header h2.h2-panel { + font-size: 1.9rem; + max-width: 100%; + padding: .5rem .6rem; +} + +h1 { + font-weight: bold; + font-size: 2.6rem; + margin-bottom: .2rem; +} + +h2 { + font-weight: bold; + font-size: 2.2rem; +} + +h3 { + font-weight: bold; + font-size: 1.8rem; +} + +h4 { + font-weight: bold; + font-size: 1.4rem; +} + +h5 { + font-weight: bold; + font-size: 1.2rem; +} + +h6 { + font-size: 1.2rem; + color: #776; +} + +a[href] { + color: #7bc7ff; + font-size: 1.1rem; + text-decoration: underline; + &:hover { + text-decoration: none; + } +} + +ul.links { + & > li { + line-height: 3rem; + display: inline-flex; + margin-bottom: .5rem; + margin-right: .5rem; + } + a { + line-height: 1.25; + text-align: center; + white-space: nowrap; + vertical-align: middle; + user-select: none; + border: 1px solid #ccc; + padding: .5rem 1rem; + font-size: 1rem; + border-radius: .25rem; + transition: all .2s ease-in-out; + color: #292b2c; + background-color: #fff; + } +} + +ul, ul > li { + list-style: none; +} + +ul { + padding: 1rem 1rem; + background: #f6f8fa; +} + +hr { + padding-bottom: 0.3em; + font-size: 1.5em; + border: 0; + border-bottom: 1px solid #eaecef; + margin: 0; +} + +.questions { + font-size: 1.4rem; + li { + padding-left: 1rem; + & + li { + padding-top: 1rem; + } + } + .quest { + font-weight: bold; + padding-bottom: .3rem; + } +} + +ul.list li { + & + li { + padding-top: .5rem; + } + a { + color: #63a7d2; + line-height: 1.7rem; + } + $blue: rgba(66, 145, 219, 0.53); + $active-blue: rgba(186, 214, 241, 0.53); + label { + position: relative; + cursor: pointer; + padding-left: 1em; + } + input[type=checkbox] { + display: none; + //position: absolute; + //width: 1px; + //height: 1px; + //border: 0 none; + //background: transparent; + //margin: 0; + //margin-top: 5px; + & + label { + background: #fff no-repeat center left 2px; + display: inline-block; + width: 1rem; + height: 1.25rem; + top: -.01rem; + position: relative; + border-radius: 3px; + border: 2px solid $active-blue; + //transition: border .3s ease; + font-size: 1rem; + box-sizing: border-box; + margin-right: .4rem; + } + &:checked { + & + label { + background-image: url('ok_flag.svg'); + background-size: 75%; + border-color: $blue; + } + } + } +} + +.header { + position: relative; +} + +.download-links { + position: absolute; + top: .5rem; + right: 1rem; + a { + background: url('sprite_download.png'); + background-position-y: bottom; + display: inline-block; + width: 90px; + height: 70px; + text-indent: -5000px; + overflow: hidden; + opacity: .9; + & + a { + background-position-x: right; + } + &[active] { + opacity: 1; + } + } +} + +.container { + width: 100%; + margin: auto; + background: #f5f7fa; + min-height: 100vh; +} + +// Small devices (landscape phones, 576px and up) +@media (max-width: 767px) { + .header h1.panel, + .header h2 { + max-width: 20rem; + max-width: calc(100% - 185px); + font-size: 2.2rem; + } +} + +@media (max-width: 360px) { + .header h1.panel { + padding-top: 3rem; + } +} + +@media (max-width: 575px) { + .header h1.panel { + max-width: 16rem; + max-width: calc(100% - 165px); + } + + .download-links { + right: .1rem; + transform: scale(.9); + } +} + +@media (min-width: 576px) { + .container { + width: 550px; + } +} + +// Medium devices (tablets, 768px and up) +@media (min-width: 768px) { + .container { + width: 750px; + } +} + +// Large devices (desktops, 992px and up) +@media (min-width: 992px) { + .container { + width: 750px; + } +} + +// Extra large devices (large desktops, 1200px and up) +@media (min-width: 1200px) { + .container { + width: 750px; + } +} + +// touch devices +@media (pointer: coarse) { + ul.list li { + a { + font-size: 1.4rem; + } + & + li { + padding-top: 1rem; + } + } +} + +h4[data-symbol] { + margin-left: 1.3rem; + position: relative; + &:after { + font-weight: 500; + font-size: 1.2rem; + opacity: .9; + font-style: normal; + content: attr(data-symbol) ")"; + position: absolute; + left: -1rem; + top: 1.05rem; + } +} + +.h-ul { + padding: 1rem 1rem 0; + &.h-ul-two { + padding: 1rem 1rem 0 2rem; + } +} + +.content-inner-wrapper { + padding-left: .4rem; +} + +span.subcontent { + font-size: 1rem; + font-weight: normal; + color: #565656; +} + +.last-update { + text-align: center; + font-size: .7rem; + color: #ccc; + padding-bottom: 1rem; +} diff --git a/manga-py-stable_1.x/helpers/inmanga_com.py b/manga-py-stable_1.x/helpers/inmanga_com.py new file mode 100644 index 0000000..2749190 --- /dev/null +++ b/manga-py-stable_1.x/helpers/inmanga_com.py @@ -0,0 +1,22 @@ +from json import loads +from requests import get +from os import system, path + +_path = path.dirname(path.dirname(path.realpath(__file__))) + +all_manga_list = None +n = 0 +while n < 10: + try: + all_manga_list = loads(get('http://inmanga.com/OnMangaQuickSearch/Source/QSMangaList.json').text) + break + except Exception: + pass + n += 1 +if not all_manga_list: + print('Error! QSMangaList is not correct json?') + +for i in all_manga_list: + print('Downloading %s' % i['Name']) + _str = 'cd {}; python3 manga.py --cli -i -u http://inmanga.com/ver/manga/{}/{}' + system(_str.format(_path, i['Name'], i['Name'], i['Identification'])) diff --git a/manga-py-stable_1.x/helpers/python/Dockerfile b/manga-py-stable_1.x/helpers/python/Dockerfile new file mode 100644 index 0000000..e5279aa --- /dev/null +++ b/manga-py-stable_1.x/helpers/python/Dockerfile @@ -0,0 +1,161 @@ +FROM debian:jessie-slim + +# ensure local python is preferred over distribution python +ENV PATH /usr/local/bin:$PATH + +# http://bugs.python.org/issue19846 +# > At the moment, setting "LANG=C" on a Linux system *fundamentally breaks Python 3*, and that's not OK. +ENV LANG C.UTF-8 + +# runtime dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + netbase \ + && rm -rf /var/lib/apt/lists/* + +ENV GPG_KEY 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D +ENV PYTHON_VERSION 3.6.8 + +# if this is called "PIP_VERSION", pip explodes with "ValueError: invalid truth value ''" +ENV PYTHON_PIP_VERSION 19.1.1 + +ARG HOST_UID=1000 +ARG HOST_GID=1000 +ARG HOST_USER=manga +ARG HOST_GROUP=manga +ARG HOME='/home/manga' + +RUN groupadd -g $HOST_GID $HOST_GROUP \ + && groupadd sudonopswd \ + && useradd -m -l -g $HOST_GROUP -u $HOST_UID $HOST_USER + +RUN mkdir $HOME -p; \ + chown $HOST_USER:$HOST_GROUP $HOME + +RUN touch $HOME/.bashrc; \ + mkdir $HOME/Manga; \ + chown $HOST_USER:$HOST_GROUP $HOME/.bashrc; \ + chown $HOST_USER:$HOST_GROUP $HOME/Manga + +RUN set -ex \ + \ + && savedAptMark="$(apt-mark showmanual)" \ + && apt-get update && apt-get install -y --no-install-recommends \ + dpkg-dev \ + gcc \ + libbz2-dev \ + libc6-dev \ + libexpat1-dev \ + libffi-dev \ + libgdbm-dev \ + liblzma-dev \ + libncursesw5-dev \ + libreadline-dev \ + libsqlite3-dev \ + libssl-dev \ + make \ + tk-dev \ + build-essential \ + wget \ + xz-utils \ + zlib1g-dev \ + curl \ +# python autocomplete utilite + python3-argcomplete \ +# as of Stretch, "gpg" is no longer included by default + $(command -v gpg > /dev/null || echo 'gnupg dirmngr') \ + \ + && wget -O python.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz" \ + && wget -O python.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc" \ + && export GNUPGHOME="$(mktemp -d)" \ + && gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$GPG_KEY" \ + && gpg --batch --verify python.tar.xz.asc python.tar.xz \ + && { command -v gpgconf > /dev/null && gpgconf --kill all || :; } \ + && rm -rf "$GNUPGHOME" python.tar.xz.asc \ + && mkdir -p /usr/src/python \ + && tar -xJC /usr/src/python --strip-components=1 -f python.tar.xz \ + && rm python.tar.xz \ + \ + && cd /usr/src/python \ + && gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)" \ + && ./configure \ + --build="$gnuArch" \ + --enable-loadable-sqlite-extensions \ + --enable-shared \ + --with-system-expat \ + --with-system-ffi \ + --without-ensurepip \ + && make -j "$(nproc)" \ + && make install \ + && ldconfig \ + \ + && apt-mark auto '.*' > /dev/null \ + && apt-mark manual $savedAptMark \ + && find /usr/local -type f -executable -not \( -name '*tkinter*' \) -exec ldd '{}' ';' \ + | awk '/=>/ { print $(NF-1) }' \ + | sort -u \ + | xargs -r dpkg-query --search \ + | cut -d: -f1 \ + | sort -u \ + | xargs -r apt-mark manual; \ + \ + wget -O get-pip.py 'https://bootstrap.pypa.io/get-pip.py'; \ + \ + python3 get-pip.py \ + --disable-pip-version-check \ + --no-cache-dir \ + "pip==$PYTHON_PIP_VERSION" \ + ; \ + python3 -mpip --version \ + \ + && python3 --version \ +# Because it requires gcc + && python3 -mpip install manga-py -U \ + && curl -sL https://deb.nodesource.com/setup_12.x | bash - \ + && apt-get install -y nodejs \ + && apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false \ + && rm -rf /var/lib/apt/lists/* \ + \ + && find /usr/local -depth \ + \( \ + \( -type d -a \( -name test -o -name tests \) \) \ + -o \ + \( -type f -a \( -name '*.pyc' -o -name '*.pyo' \) \) \ + \) -exec rm -rf '{}' + \ + && rm -rf /usr/src/python + +# make some useful symlinks that are expected to exist +RUN cd /usr/local/bin \ + && ln -s idle3 idle \ + && ln -s pydoc3 pydoc \ + && ln -s python3 python \ + && ln -s python3-config python-config + +RUN set -ex; \ + \ + savedAptMark="$(apt-mark showmanual)"; \ + \ + apt-mark auto '.*' > /dev/null; \ + [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; \ + rm -rf /var/lib/apt/lists/*; \ + apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \ + \ + find /usr/local -depth \ + \( \ + \( -type d -a \( -name test -o -name tests \) \) \ + -o \ + \( -type f -a \( -name '*.pyc' -o -name '*.pyo' \) \) \ + \) -exec rm -rf '{}' +; \ + rm -f get-pip.py + +USER $HOST_USER +WORKDIR $HOME + +RUN echo 'Manga-py version: '; \ + manga-py --version; \ + rm -rf /tmp/.P* + +# docker run -it -v /tmp/destination:/home/manga mangadl/manga-py + +CMD ["bash"] + diff --git a/manga-py-stable_1.x/manga.py b/manga-py-stable_1.x/manga.py new file mode 100644 index 0000000..445527f --- /dev/null +++ b/manga-py-stable_1.x/manga.py @@ -0,0 +1,8 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +from manga_py import main + + +if __name__ == '__main__': + main() diff --git a/manga-py-stable_1.x/manga_py/__init__.py b/manga-py-stable_1.x/manga_py/__init__.py new file mode 100644 index 0000000..677dae8 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/__init__.py @@ -0,0 +1,103 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +import traceback +from atexit import register as atexit_register +from json import dumps +from os import makedirs, path +from shutil import rmtree +from sys import exit, stderr + +try: + from loguru import logger + catch = logger.catch +except ImportError: + def catch(x): + print('Setup in progress?') + +try: + from .cli import Cli + from .cli.args import get_cli_arguments + from .fs import get_temp_path, get_info + from .info import Info + from .meta import __version__ +except ImportError: + print('Setup in progress?', file=stderr) + +__author__ = 'Sergey Zharkov' +__license__ = 'MIT' +__email__ = 'sttv-pc@mail.ru' + + +@atexit_register +def before_shutdown(): + temp_dir = get_temp_path() + path.isdir(temp_dir) and rmtree(temp_dir) + + +def _init_cli(args, _info): + error_lvl = -5 + try: + _info.start() + cli_mode = Cli(args, _info) + cli_mode.start() + code = 0 + except Exception as e: + traceback.print_tb(e.__traceback__, error_lvl, file=stderr) + code = 1 + _info.set_error(e) + return code + + +def _run_util(args) -> tuple: + parse_args = args.parse_args() + _info = Info(parse_args) + code = _init_cli(args, _info) + + if parse_args.print_json: + _info = dumps( + _info.get(), + indent=2, + separators=(',', ': '), + sort_keys=True, + ) + else: + _info = [] + + return code, _info + + +def _update_all(args): + parse_args = args.parse_args() + parse_args.quiet or print('Update all', file=stderr) + multi_info = {} + + dst = parse_args.destination + json_info = get_info(dst) + + for i in json_info: + parse_args.manga_name = i['manga_name'] + parse_args.url = i['url'] + code, _info = _run_util(args) + multi_info[i['directory']] = _info + parse_args.quiet or (parse_args.print_json and print(multi_info)) + + +@catch +def main(): + # if ~__version__.find('alpha'): + # print('Alpha release! There may be errors!', file=stderr) + temp_path = get_temp_path() + path.isdir(temp_path) or makedirs(temp_path) + + args = get_cli_arguments() + parse_args = args.parse_args() + + code, _info = _run_util(args) + parse_args.quiet or (parse_args.print_json and print(_info)) + + exit(code) + + +if __name__ == '__main__': + main() diff --git a/manga-py-stable_1.x/manga_py/base_classes/__init__.py b/manga-py-stable_1.x/manga_py/base_classes/__init__.py new file mode 100644 index 0000000..3b6f31f --- /dev/null +++ b/manga-py-stable_1.x/manga_py/base_classes/__init__.py @@ -0,0 +1,8 @@ +from .abstract import Abstract +from .archive import Archive +from .base import Base +from .callbacks import Callbacks +from .cf_protect import CloudFlareProtect +from .chapter_helper import ChapterHelper +from .static import Static +from .web_driver import WebDriver diff --git a/manga-py-stable_1.x/manga_py/base_classes/abstract.py b/manga-py-stable_1.x/manga_py/base_classes/abstract.py new file mode 100644 index 0000000..0776a5f --- /dev/null +++ b/manga-py-stable_1.x/manga_py/base_classes/abstract.py @@ -0,0 +1,51 @@ +from abc import abstractmethod + + +class Abstract: + + @abstractmethod + def get_main_content(self): # call once + pass + + @abstractmethod + def get_manga_name(self) -> str: # call once + return '' + + @abstractmethod + def get_chapters(self) -> list: # call once + return [] + + def prepare_cookies(self): # if site with cookie protect + pass + + @abstractmethod + def get_files(self) -> list: # call ever volume loop + return [] + + # @abstractmethod + # def get_archive_name(self) -> str: + # pass + + # for chapters selected by manual (cli) + @abstractmethod + def get_chapter_index(self) -> str: + pass + + def book_meta(self) -> dict: + pass + + def before_download_chapter(self): + pass + + def get_cover(self): + pass + + def before_file_save(self, url, idx) -> str: # return url ! + return url + + def after_file_save(self, _path: str, idx: int): + pass + + @abstractmethod + def chapter_for_json(self) -> str: + pass diff --git a/manga-py-stable_1.x/manga_py/base_classes/archive.py b/manga-py-stable_1.x/manga_py/base_classes/archive.py new file mode 100644 index 0000000..d61e8b4 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/base_classes/archive.py @@ -0,0 +1,82 @@ +from os import path +from zipfile import ZipFile, ZIP_DEFLATED + +from manga_py.fs import is_file, make_dirs, basename, dirname, unlink, get_temp_path +# from PIL import Image as PilImage +from manga_py.image import Image + + +class Archive: + _archive = None + _writes = None + files = None + not_change_files_extension = False + no_webp = False + has_error = False + + def __init__(self): + self.files = [] + self._writes = {} + + def write_file(self, data, in_arc_name): + self._writes[in_arc_name] = data + + def add_file(self, file, in_arc_name=None): + if in_arc_name is None: + in_arc_name = basename(file) + self.files.append((file, in_arc_name)) + + def set_files_list(self, files): + self.files = files + + def add_book_info(self, data): + self.write_file('comicbook.xml', data) + + def __add_files(self): + for file in self.files: + if is_file(file[0]): + ext = self.__update_image_extension(file[0]) + if self.no_webp and ext[ext.rfind('.'):] == '.webp': + jpeg = ext[:ext.rfind('.')] + '.jpeg' + jpeg_path = path.join(dirname(file[0]), jpeg) + Image(file[0]).convert(jpeg_path) + file = jpeg_path, jpeg + elif ext: + file = file[0], ext + self._archive.write(*file) + + def __add_writes(self): + for file in self._writes: + self._archive.writestr(file, self._writes[file]) + + def add_info(self, data): + self.write_file(data, 'info.txt') + + def make(self, dst): + if not len(self.files) and not len(self._writes): + return + + make_dirs(dirname(dst)) + + self._archive = ZipFile(dst, 'w', ZIP_DEFLATED) + try: + self.__add_files() + self.__add_writes() + self._archive.close() + except OSError as e: + self._archive.close() + raise e + self._archive.close() + self._maked() + + def _maked(self): + for file in self.files: + unlink(file[0]) + + def __update_image_extension(self, filename) -> str: + fn, extension = path.splitext(filename) + if not self.not_change_files_extension: + ext = Image.real_extension(get_temp_path(filename)) + if ext: + extension = ext + return basename(fn + extension) diff --git a/manga-py-stable_1.x/manga_py/base_classes/base.py b/manga-py-stable_1.x/manga_py/base_classes/base.py new file mode 100644 index 0000000..150ffad --- /dev/null +++ b/manga-py-stable_1.x/manga_py/base_classes/base.py @@ -0,0 +1,157 @@ +import re +from os import path +from sys import stderr + +from loguru import logger +from lxml.html import HtmlElement + +from manga_py.http import Http +from manga_py.image import Image + + +class Base: + _storage = None + _params = None + _image_params = None + _http_kwargs = None + __http = None + + def __init__(self): + + self._storage = { + 'cookies': {}, + 'main_content': None, + 'chapters': [], + 'current_chapter': 0, + 'current_file': 0, + 'proxies': {}, + 'domain_uri': None, + } + self._params = { + 'destination': 'Manga', + 'cf-protect': False, + } + self._image_params = { + 'crop': (0, 0, 0, 0), + # 'crop': (left, upper, right, lower) + 'auto_crop': False, + # 'auto_crop': True, + } + self._http_kwargs = {} + + def _archive_type(self): + arc_type = 'zip' + if self._params['cbz']: + arc_type = 'cbz' + return arc_type + + def get_url(self): + return self._params['url'] + + @property + def domain(self) -> str: + try: + if not self._storage.get('domain_uri', None): + self._storage['domain_uri'] = re.search('(https?://[^/]+)', self._params['url']).group(1) + return self._storage.get('domain_uri', '') + except Exception: + print('url is broken!', file=stderr) + exit() + + @staticmethod + def image_auto_crop(src_path, dest_path=None): + image = Image(src_path=src_path) + image.crop_auto(dest_path=dest_path) + image.close() + + def image_manual_crop(self, src_path, dest_path=None): # sizes: (left, top, right, bottom) + if isinstance(self._image_params['crop'], tuple) != (0, 0, 0, 0): + image = Image(src_path=src_path) + image.crop_manual_with_offsets(offsets=self._image_params['crop'], dest_path=dest_path) + image.close() + + def _build_http_params(self, params): + if params is None: + params = {} + params.setdefault('allow_webp', not self._params.get('disallow_webp', None)) + params.setdefault('referer', self._storage.get('referer', self.domain)) + params.setdefault('user_agent', self._get_user_agent()) + params.setdefault('proxies', self._storage.get('proxies', None)) + params.setdefault('cookies', self._storage.get('cookies', None)) + params.setdefault('kwargs', self._http_kwargs) + return params + + def http(self, new=False, params=None) -> Http: + http_params = self._build_http_params(params) + if new: + http = Http(**http_params) + return http + elif not self.__http: + self.__http = Http(**http_params) + return self.__http + + def http_get(self, url: str, headers: dict = None, cookies: dict = None): + return self.http().get(url=url, headers=headers, cookies=cookies) + + def http_post(self, url: str, headers: dict = None, cookies: dict = None, data=()): + return self.http().post(url=url, headers=headers, cookies=cookies, data=data) + + def _get_user_agent(self): + ua_storage = self._storage.get('user_agent', None) + ua_params = self._params.get('user_agent', None) + if self._params.get('cf_protect', False): + return ua_storage + return ua_params + + @property + def chapter_id(self): + return self._storage.get('current_chapter', 0) + + @chapter_id.setter + def chapter_id(self, idx): + self._storage['current_chapter'] = idx + + @classmethod + def __normalize_chapters(cls, n, element): + if isinstance(element, HtmlElement): + return n(element.get('href')) + if isinstance(element, str): + return n(element) + return element + + def _prepare_chapters(self, chapters): + n = self.http().normalize_uri + items = [] + if chapters and len(chapters): + for i in chapters: + url = self.__normalize_chapters(n, i) + items.append(url) + else: + logger.warning('Chapters list empty. Check %s' % self.get_url()) + return items + + @property + def chapter(self): + return self._storage['chapters'][self.chapter_id] + + def get_current_file(self): + return self._storage['files'][self._storage['current_file']] + + def book_meta(self) -> dict: + return {} + + def _image_name(self, idx, filename): + if idx is None: + idx = self._storage['current_file'] + fn, extension = path.splitext(filename) + _path = '{:0>3}_{}'.format(idx, fn) + if self._params['rename_pages']: + _path = '{:0>3}'.format(idx) + return _path + extension + + def chapter_for_json(self) -> str: + return self.chapter + + def put_info_json(self, meta): + # manga_name, url, directory + pass diff --git a/manga-py-stable_1.x/manga_py/base_classes/callbacks.py b/manga-py-stable_1.x/manga_py/base_classes/callbacks.py new file mode 100644 index 0000000..f670909 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/base_classes/callbacks.py @@ -0,0 +1,35 @@ +from typing import Callable + + +class Callbacks: + def _call_files_progress_callback(self): + if callable(self.progress): + _max, _current = len(self._storage['files']), self._storage['current_file'] + self.progress(_max, _current, _current < 1) + + def set_quest_callback(self, callback: Callable): # Required call from initiator (CLI, GUI) + setattr(self, 'quest', callback) + + def set_progress_callback(self, callback: Callable): # Required call from initiator (CLI, GUI) + setattr(self, 'progress', callback) + + def set_log_callback(self, callback: Callable): # Required call from initiator (CLI, GUI) + setattr(self, 'log', callback) + + def set_quest_password_callback(self, callback: Callable): # Required call from iterator (CLI, GUI) + setattr(self, 'quest_password', callback) + + def quest(self, *args, **kwargs): + pass + + def quest_password(self, *args, **kwargs): + pass + + def progress(self, *args, **kwargs): + pass + + def log(self, *args, **kwargs): + pass + + def book_meta(self) -> dict: + return {} diff --git a/manga-py-stable_1.x/manga_py/base_classes/cf_protect.py b/manga-py-stable_1.x/manga_py/base_classes/cf_protect.py new file mode 100644 index 0000000..b50a36d --- /dev/null +++ b/manga-py-stable_1.x/manga_py/base_classes/cf_protect.py @@ -0,0 +1,18 @@ +from sys import stderr + +import cloudscraper + + +class CloudFlareProtect: + protector = [] + + def run(self, url): # pragma: no cover + + if not self.protector: + scraper = cloudscraper.create_scraper() + try: + self.protector = scraper.get_tokens(url) + except Exception as e: + print('CF error! %s' % e, file=stderr) + + return self.protector diff --git a/manga-py-stable_1.x/manga_py/base_classes/chapter_helper.py b/manga-py-stable_1.x/manga_py/base_classes/chapter_helper.py new file mode 100644 index 0000000..58a37dd --- /dev/null +++ b/manga-py-stable_1.x/manga_py/base_classes/chapter_helper.py @@ -0,0 +1,15 @@ +# cli chapters parser +class ChapterHelper: + chapters = '' + + def __init__(self, chapters: str): + self.chapters = chapters + if isinstance(self.chapters, str): + self.chapters = self.chapters.split(' ') + + def get_chapters(self, urls): + chapters = [] + for i, url in enumerate(urls): + if i in self.chapters: + chapters.append(urls) + return chapters diff --git a/manga-py-stable_1.x/manga_py/base_classes/static.py b/manga-py-stable_1.x/manga_py/base_classes/static.py new file mode 100644 index 0000000..a88c8ca --- /dev/null +++ b/manga-py-stable_1.x/manga_py/base_classes/static.py @@ -0,0 +1,41 @@ +from lxml.html import document_fromstring +from purifier.purifier import HTMLPurifier + + +class Static: + + @staticmethod + def _clear_html(body): + purifier = HTMLPurifier({ + 'div': ['*'], 'span': ['*'], + 'img': ['*'], 'a': ['*'], + 'h1': ['*'], 'h2': ['*'], + 'h3': ['*'], 'h4': ['*'], + 'h5': ['*'], 'h6': ['*'], + }) + return purifier.feed(body) + + @staticmethod + def document_fromstring(body, selector: str = None, idx: int = None): # pragma: no cover + result = document_fromstring(body) # todo + if isinstance(selector, str): + result = result.cssselect(selector) + if isinstance(idx, int): + result = result[idx] + return result + + @staticmethod + def _set_if_not_none(var, key, value): # pragma: no cover + if value is not None: + var[key] = value + + @staticmethod + def __test_ascii(i): + o = ord(i) + _ = 39 < o < 127 + _ = _ and o not in [42, 47, 92, 94] + return _ or o > 161 + + @staticmethod + def remove_not_ascii(value): + return "".join(i for i in value if i == '_' or Static.__test_ascii(i)) diff --git a/manga-py-stable_1.x/manga_py/base_classes/web_driver.py b/manga-py-stable_1.x/manga_py/base_classes/web_driver.py new file mode 100644 index 0000000..07de72e --- /dev/null +++ b/manga-py-stable_1.x/manga_py/base_classes/web_driver.py @@ -0,0 +1,48 @@ +from os import chmod +from sys import platform +from zipfile import ZipFile + +from requests import get + +from manga_py.fs import is_file, dirname, path_join, get_util_home_path + + +class WebDriver: + driver_version = '2.40' + + @staticmethod + def is_win(): + return ~platform.find('win32') + + def download_drivder(self): + url_prefix = 'https://chromedriver.storage.googleapis.com/' + url = '/chromedriver_linux64.zip' + if ~platform.find('darwin'): + url = '/chromedriver_mac64.zip' + if self.is_win(): + url = '/chromedriver_win32.zip' + + path = path_join(get_util_home_path(), 'driver.zip') + + with open(path, 'wb') as driver: + driver.write(get(url_prefix + self.driver_version + url).content) + driver.close() + with ZipFile(path) as file: + file.extractall(dirname(self._driver_path())) + + def _driver_path(self): + if self.is_win(): + driver = 'chromedriver.exe' + else: + driver = 'chromedriver' + return path_join(get_util_home_path(), driver) + + def get_driver(self): + from selenium import webdriver # need, if captcha detected + driver_path = self._driver_path() + if not is_file(driver_path): + self.download_drivder() + self.is_win() or chmod(driver_path, 0o755) + driver = webdriver.Chrome(executable_path=driver_path) + driver.set_window_size(500, 600) + return driver diff --git a/manga-py-stable_1.x/manga_py/cli/__init__.py b/manga-py-stable_1.x/manga_py/cli/__init__.py new file mode 100644 index 0000000..bc69a14 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/cli/__init__.py @@ -0,0 +1,95 @@ +import sys +from argparse import ArgumentParser +from getpass import getpass +from os import name as os_name + +from progressbar import ProgressBar + +from manga_py.fs import check_free_space, get_temp_path +from manga_py.parser import Parser + + +class Cli: # pragma: no cover + args = None + parser = None + _info = None + __progress_bar = None + + def __init__(self, args: ArgumentParser, info=None): + self.args = args.parse_args() + self.parser = Parser(args) + self._info = info + + space = self.args.min_free_space + if not check_free_space(get_temp_path(), space) or not check_free_space(self.args.destination, space): + raise OSError('No space left on device') + + def start(self): + try: + self.parser.init_provider( + progress=self.progress, + log=self.print, + quest=self.quest, + quest_password=self.quest_password, + info=self._info, + ) + except AttributeError as e: + print(e, file=sys.stderr) + print('Please check if your inputed domain is supported by manga-py: ', file=sys.stderr) + print('- https://manga-py.com/manga-py/#resources-list', file=sys.stderr) + print('- https://manga-py.github.io/manga-py/#resources-list (alternative)', file=sys.stderr) + print('- https://yuru-yuri.github.io/manga-py/ (deprecated)', file=sys.stderr) + print('Make sure that your inputed URL is correct\n\nTrace:', file=sys.stderr) + raise e + self.parser.start() + self.__progress_bar and self.__progress_bar.value > 0 and self.__progress_bar.finish() + self.args.quiet or self.print(' ') + + def __init_progress(self, items_count: int, re_init: bool): + if re_init or not self.__progress_bar: + if re_init: + self.__progress_bar.finish() + bar = ProgressBar() + self.__progress_bar = bar(range(items_count)) + self.__progress_bar.init() + + def progress(self, items_count: int, current_item: int, re_init: bool = False): + if not items_count: + return + if not self.args.no_progress and not self.args.print_json: + current_val = 0 + if self.__progress_bar: + current_val = self.__progress_bar.value + self.__init_progress(items_count, re_init and current_val > 0) + self.__progress_bar.update(current_item) + + def print(self, text, **kwargs): + if os_name == 'nt': + text = str(text).encode().decode(sys.stdout.encoding, 'ignore') + self.args.quiet or print(text, **kwargs) + + def _single_quest(self, variants, title): + self.print(title) + for v in variants: + self.print(v) + return input() + + def _multiple_quest(self, variants, title): + self.print('Accept - blank line + enter') + self.print(title) + for v in variants: + self.print(v) + result = [] + while True: + _ = input().strip() + if not len(_): + return result + result.append(_) + + def quest(self, variants: enumerate, title: str, select_type=0): # 0 = single, 1 = multiple + if select_type: + return self._multiple_quest(variants, title) + return self._single_quest(variants, title) + + def quest_password(self, title): + return getpass(title) diff --git a/manga-py-stable_1.x/manga_py/cli/args.py b/manga-py-stable_1.x/manga_py/cli/args.py new file mode 100644 index 0000000..d2fb1f2 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/cli/args.py @@ -0,0 +1,338 @@ +''' +manga-py module for CLI and its options. +''' + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, RawDescriptionHelpFormatter + +from manga_py.meta import __version__ + + +class DescriptionDefaultsHelpFormatter(ArgumentDefaultsHelpFormatter, + RawDescriptionHelpFormatter): + ''' + Class to format --help cli option with 2 features to output: + programm's description in a raw mode, + options default values. + ''' + + +def _image_args(args_parser): # pragma: no cover + args = args_parser.add_argument_group('Image options') + + args.add_argument( + '-E', + '--not-change-files-extension', + action='store_true', + help=( + 'Save downloaded files to archive "as is".' + ) + ) + + args.add_argument( + '-W', + '--no-webp', + action='store_true', + help=( + 'Convert `*.webp` images to `*.jpg` format.' + ) + ) + + +def _debug_args(args_parser): # pragma: no cover + args = args_parser.add_argument_group('Debug / Simulation options') + + args.add_argument( + '-h', + '--help', + action='help', + help=( + 'Show this help and exit.' + ) + ) + + args.add_argument( + '-j', + '--print-json', + action='store_true', + help=( + 'Print information about the results in the JSON format (after completion).' + ) + ) + + args.add_argument( + '-l', + '--simulate', + action='store_true', + help=( + 'Simulate running %(prog)s, where: ' + '1) do not download files and, ' + '2) do not write anything on disk.' + ) + ) + + args.add_argument( + '-i', + '--show-current-chapter-info', + action='store_true', + help=( + 'Show current processing chapter info.' + ) + ) + + args.add_argument( + '-b', + '--debug', + action='store_true', + help=( + 'Debug %(prog)s.' + ) + ) + + args.add_argument( + '-q', + '--quiet', + action='store_true', + help=( + 'Dont show any messages.' + ) + ) + + +def _downloading_args(args_parser): # pragma: no cover + args = args_parser.add_argument_group('Downloading options') + + args.add_argument( + '-s', + '--skip-volumes', + metavar='COUNT', + type=int, + help=( + 'Skip a total number, i.e. %(metavar)s, of volumes.' + ), + default=0 + ) + + args.add_argument( + '-m', + '--max-volumes', + metavar='COUNT', + type=int, + default=0, + help=( + 'Download a maximum number, i.e. %(metavar)s, of volumes. ' + 'E.g.: `--max-volumes 2` will download at most 2 volumes. ' + 'If %(metavar)s is `0` (zero) then it will download all available volumes.' + ) + ) + + args.add_argument( + '-a', + '--user-agent', + type=str, + help=( + 'Set an user-agent. ' + 'Don\'t work from protected sites.' + ) + ) + + args.add_argument( + '-x', + '--proxy', + type=str, + help=( + 'Set a http proxy.' + ) + ) + + args.add_argument( + '-e', + '--reverse-downloading', + action='store_true', + help=( + 'Download manga volumes in a reverse order. ' + 'By default, manga is downloaded in ascendent order ' + '(i.e. volume 00, volume 01, volume 02...). ' + 'If `--reverse-downloading` is actived, then manga is downloaded in descendent order ' + '(i.e. volume 99, volume 98, volume 97...).' + ) + ) + + args.add_argument( + '-w', + '--rewrite-exists-archives', + action='store_true', + help=( + '(Re)Download manga volume if it already exists locally in the directory destination. ' + 'Your manga files can be overwrited, so be careful.' + ) + ) + + args.add_argument( + '-t', + '--max-threads', + type=int, + default=None, + help=( + 'Set the maximum number of threads, i.e. MAX_THREADS, to be avaliable to manga-py. ' + 'Threads run in pseudo-parallel when execute the process to download the manga images.' + ) + ) + + args.add_argument( + '-f', + '--zero-fill', + action='store_true', + help=( + 'Pad a `-0` (dash-and-zero) at right for all downloaded manga volume filenames. ' + 'E.g. from `vol_001.zip` to `vol_001-0.zip`. ' + 'It is useful to standardize the filenames between: ' + '1) normal manga volumes (e.g. vol_006.zip) and, ' + '2) abnormal manga volumes (e.g. vol_006-5.zip). ' + 'An abnormal manga volume is a released volume like: ' + 'extra chapters, ' + 'bonuses, ' + 'updated, ' + 'typos corrected, ' + 'spelling errors corrected; ' + 'and so on.' + ) + ) + + args.add_argument( + '-g', + '--with-manga-name', + action='store_true', + help=( + 'Pad the manga name at left for all downloaded manga volumes filenames. ' + 'E.g. from `vol_001.zip` to `manga_name-vol_001.zip`.' + ) + ) + + args.add_argument( + '-o', + '--override-archive-name', + metavar='ARCHIVE_NAME', + type=str, + default='', + dest='override_archive_name', + help=( + 'Pad %(metavar)s at left for all downloaded manga volumes filename. ' + 'E.g from `vol_001.zip` to `%(metavar)s-vol_001.zip`.' + ) + ) + + args.add_argument( + '-c', + '--min-free-space', + metavar='MB', + type=int, + default=100, + help=( + 'Alert when the minimum free disc space, i.e. MB, is reached. ' + 'Insert it in order of megabytes (Mb).' + ) + ) + + +def _reader_args(args_parser): # pragma: no cover + args = args_parser.add_argument_group('Archive options') + + args.add_argument( + '-z', + '--cbz', + action='store_true', + help=( + 'Make `*.cbz` archives (for reader).' + ) + ) + + args.add_argument( + '-r', + '--rename-pages', + action='store_true', + help=( + 'Normalize image filenames. ' + 'E.g. from `0_page_1.jpg` to `0001.jpg`.' + ) + ) + + +def get_cli_arguments() -> ArgumentParser: # pragma: no cover + ''' + Method to generate manga-py CLI with its options. + ''' + args_parser = ArgumentParser( + add_help=False, + formatter_class=DescriptionDefaultsHelpFormatter, + prog="manga-py", + description=( + '%(prog)s is the universal manga downloader (for your offline reading).\n ' + 'Site: https://manga-py.com/manga-py/\n ' + 'Source-code: https://github.com/manga-py/manga-py\n ' + 'Version: ' + __version__ + ), + epilog=( + 'So, that is how %(prog)s can be executed to download yours favourite mangas.\n' + 'Enjoy! 😉' + ) + ) + + args = args_parser.add_argument_group('General options') + + args.add_argument( + 'url', + metavar='URL', + type=str, + help=( + '%(metavar)s, i.e. link from manga, to be downloaded.' + ) + ) + + args.add_argument( + '-v', + '--version', + action='version', + version=__version__, + help=( + 'Show %(prog)s\'s version number and exit.' + ) + ) + + args.add_argument( + '-n', + '--name', + metavar='NAME', + type=str, + default='', + help=( + 'Rename manga, i.e. by %(metavar)s, and its folder to where it will be saved locally.' + ) + ) + + args.add_argument( + '-d', + '--destination', + metavar='PATH', + type=str, + default='Manga', + help=( + 'Destination folder to where the manga will be saved locally. ' + 'The path will be `./%(metavar)s/manga_name/`.' + ) + ) + + args.add_argument( + '-P', + '--no-progress', + action='store_true', + help=( + 'Don\'t show progress bar.' + ) + ) + + _image_args(args_parser) + _reader_args(args_parser) + _downloading_args(args_parser) + _debug_args(args_parser) + + return args_parser diff --git a/manga-py-stable_1.x/manga_py/crypt/__init__.py b/manga-py-stable_1.x/manga_py/crypt/__init__.py new file mode 100644 index 0000000..6832935 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/__init__.py @@ -0,0 +1,8 @@ +from .ac_qq_com import AcQqComCrypt +from .base_lib import BaseLib +from .kissmanga_com import KissMangaComCrypt +from .mangago_me import MangaGoMe +from .mangarock_com import MangaRockComCrypt +from .manhuagui_com import ManhuaGuiComCrypt +from .puzzle import Puzzle +from .sunday_webry_com import SundayWebryCom diff --git a/manga-py-stable_1.x/manga_py/crypt/ac_qq_com.py b/manga-py-stable_1.x/manga_py/crypt/ac_qq_com.py new file mode 100644 index 0000000..29aa250 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/ac_qq_com.py @@ -0,0 +1,38 @@ +class AcQqComCrypt: + _provider = None + _site_key = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=" + + def __init__(self, provider): + self._provider = provider + + def decode(self, data): + data = self._provider.re.sub('[^A-Za-z0-9%+/=]', '', data) + a = '' + e = 0 + while e < len(data) - 4: + e += 1 + b = self._site_key.find(data[e]) + e += 1 + d = self._site_key.find(data[e]) + e += 1 + f = self._site_key.find(data[e]) + e += 1 + g = self._site_key.find(data[e]) + + b = b << 2 | d >> 4 + d = (d & 15) << 4 | f >> 2 + h = (f & 3) << 6 | g + a += chr(b) + + if f != 64: + a += chr(d) + if g != 64: + a += chr(h) + return self._protect(a) + + def _protect(self, data): + try: + data = self._provider.re.search('({.+}})', data).group(1) + return self._provider.json.loads(data) + except Exception: + return {} diff --git a/manga-py-stable_1.x/manga_py/crypt/aes.js b/manga-py-stable_1.x/manga_py/crypt/aes.js new file mode 100644 index 0000000..827503c --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/aes.js @@ -0,0 +1,35 @@ +/* +CryptoJS v3.1.2 +code.google.com/p/crypto-js +(c) 2009-2013 by Jeff Mott. All rights reserved. +code.google.com/p/crypto-js/wiki/License +*/ +var CryptoJS=CryptoJS||function(u,p){var d={},l=d.lib={},s=function(){},t=l.Base={extend:function(a){s.prototype=this;var c=new s;a&&c.mixIn(a);c.hasOwnProperty("init")||(c.init=function(){c.$super.init.apply(this,arguments)});c.init.prototype=c;c.$super=this;return c},create:function(){var a=this.extend();a.init.apply(a,arguments);return a},init:function(){},mixIn:function(a){for(var c in a)a.hasOwnProperty(c)&&(this[c]=a[c]);a.hasOwnProperty("toString")&&(this.toString=a.toString)},clone:function(){return this.init.prototype.extend(this)}}, +r=l.WordArray=t.extend({init:function(a,c){a=this.words=a||[];this.sigBytes=c!=p?c:4*a.length},toString:function(a){return(a||v).stringify(this)},concat:function(a){var c=this.words,e=a.words,j=this.sigBytes;a=a.sigBytes;this.clamp();if(j%4)for(var k=0;k>>2]|=(e[k>>>2]>>>24-8*(k%4)&255)<<24-8*((j+k)%4);else if(65535>>2]=e[k>>>2];else c.push.apply(c,e);this.sigBytes+=a;return this},clamp:function(){var a=this.words,c=this.sigBytes;a[c>>>2]&=4294967295<< +32-8*(c%4);a.length=u.ceil(c/4)},clone:function(){var a=t.clone.call(this);a.words=this.words.slice(0);return a},random:function(a){for(var c=[],e=0;e>>2]>>>24-8*(j%4)&255;e.push((k>>>4).toString(16));e.push((k&15).toString(16))}return e.join("")},parse:function(a){for(var c=a.length,e=[],j=0;j>>3]|=parseInt(a.substr(j, +2),16)<<24-4*(j%8);return new r.init(e,c/2)}},b=w.Latin1={stringify:function(a){var c=a.words;a=a.sigBytes;for(var e=[],j=0;j>>2]>>>24-8*(j%4)&255));return e.join("")},parse:function(a){for(var c=a.length,e=[],j=0;j>>2]|=(a.charCodeAt(j)&255)<<24-8*(j%4);return new r.init(e,c)}},x=w.Utf8={stringify:function(a){try{return decodeURIComponent(escape(b.stringify(a)))}catch(c){throw Error("Malformed UTF-8 data");}},parse:function(a){return b.parse(unescape(encodeURIComponent(a)))}}, +q=l.BufferedBlockAlgorithm=t.extend({reset:function(){this._data=new r.init;this._nDataBytes=0},_append:function(a){"string"==typeof a&&(a=x.parse(a));this._data.concat(a);this._nDataBytes+=a.sigBytes},_process:function(a){var c=this._data,e=c.words,j=c.sigBytes,k=this.blockSize,b=j/(4*k),b=a?u.ceil(b):u.max((b|0)-this._minBufferSize,0);a=b*k;j=u.min(4*a,j);if(a){for(var q=0;q>>2]>>>24-8*(r%4)&255)<<16|(l[r+1>>>2]>>>24-8*((r+1)%4)&255)<<8|l[r+2>>>2]>>>24-8*((r+2)%4)&255,v=0;4>v&&r+0.75*v>>6*(3-v)&63));if(l=t.charAt(64))for(;d.length%4;)d.push(l);return d.join("")},parse:function(d){var l=d.length,s=this._map,t=s.charAt(64);t&&(t=d.indexOf(t),-1!=t&&(l=t));for(var t=[],r=0,w=0;w< +l;w++)if(w%4){var v=s.indexOf(d.charAt(w-1))<<2*(w%4),b=s.indexOf(d.charAt(w))>>>6-2*(w%4);t[r>>>2]|=(v|b)<<24-8*(r%4);r++}return p.create(t,r)},_map:"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="}})(); +(function(u){function p(b,n,a,c,e,j,k){b=b+(n&a|~n&c)+e+k;return(b<>>32-j)+n}function d(b,n,a,c,e,j,k){b=b+(n&c|a&~c)+e+k;return(b<>>32-j)+n}function l(b,n,a,c,e,j,k){b=b+(n^a^c)+e+k;return(b<>>32-j)+n}function s(b,n,a,c,e,j,k){b=b+(a^(n|~c))+e+k;return(b<>>32-j)+n}for(var t=CryptoJS,r=t.lib,w=r.WordArray,v=r.Hasher,r=t.algo,b=[],x=0;64>x;x++)b[x]=4294967296*u.abs(u.sin(x+1))|0;r=r.MD5=v.extend({_doReset:function(){this._hash=new w.init([1732584193,4023233417,2562383102,271733878])}, +_doProcessBlock:function(q,n){for(var a=0;16>a;a++){var c=n+a,e=q[c];q[c]=(e<<8|e>>>24)&16711935|(e<<24|e>>>8)&4278255360}var a=this._hash.words,c=q[n+0],e=q[n+1],j=q[n+2],k=q[n+3],z=q[n+4],r=q[n+5],t=q[n+6],w=q[n+7],v=q[n+8],A=q[n+9],B=q[n+10],C=q[n+11],u=q[n+12],D=q[n+13],E=q[n+14],x=q[n+15],f=a[0],m=a[1],g=a[2],h=a[3],f=p(f,m,g,h,c,7,b[0]),h=p(h,f,m,g,e,12,b[1]),g=p(g,h,f,m,j,17,b[2]),m=p(m,g,h,f,k,22,b[3]),f=p(f,m,g,h,z,7,b[4]),h=p(h,f,m,g,r,12,b[5]),g=p(g,h,f,m,t,17,b[6]),m=p(m,g,h,f,w,22,b[7]), +f=p(f,m,g,h,v,7,b[8]),h=p(h,f,m,g,A,12,b[9]),g=p(g,h,f,m,B,17,b[10]),m=p(m,g,h,f,C,22,b[11]),f=p(f,m,g,h,u,7,b[12]),h=p(h,f,m,g,D,12,b[13]),g=p(g,h,f,m,E,17,b[14]),m=p(m,g,h,f,x,22,b[15]),f=d(f,m,g,h,e,5,b[16]),h=d(h,f,m,g,t,9,b[17]),g=d(g,h,f,m,C,14,b[18]),m=d(m,g,h,f,c,20,b[19]),f=d(f,m,g,h,r,5,b[20]),h=d(h,f,m,g,B,9,b[21]),g=d(g,h,f,m,x,14,b[22]),m=d(m,g,h,f,z,20,b[23]),f=d(f,m,g,h,A,5,b[24]),h=d(h,f,m,g,E,9,b[25]),g=d(g,h,f,m,k,14,b[26]),m=d(m,g,h,f,v,20,b[27]),f=d(f,m,g,h,D,5,b[28]),h=d(h,f, +m,g,j,9,b[29]),g=d(g,h,f,m,w,14,b[30]),m=d(m,g,h,f,u,20,b[31]),f=l(f,m,g,h,r,4,b[32]),h=l(h,f,m,g,v,11,b[33]),g=l(g,h,f,m,C,16,b[34]),m=l(m,g,h,f,E,23,b[35]),f=l(f,m,g,h,e,4,b[36]),h=l(h,f,m,g,z,11,b[37]),g=l(g,h,f,m,w,16,b[38]),m=l(m,g,h,f,B,23,b[39]),f=l(f,m,g,h,D,4,b[40]),h=l(h,f,m,g,c,11,b[41]),g=l(g,h,f,m,k,16,b[42]),m=l(m,g,h,f,t,23,b[43]),f=l(f,m,g,h,A,4,b[44]),h=l(h,f,m,g,u,11,b[45]),g=l(g,h,f,m,x,16,b[46]),m=l(m,g,h,f,j,23,b[47]),f=s(f,m,g,h,c,6,b[48]),h=s(h,f,m,g,w,10,b[49]),g=s(g,h,f,m, +E,15,b[50]),m=s(m,g,h,f,r,21,b[51]),f=s(f,m,g,h,u,6,b[52]),h=s(h,f,m,g,k,10,b[53]),g=s(g,h,f,m,B,15,b[54]),m=s(m,g,h,f,e,21,b[55]),f=s(f,m,g,h,v,6,b[56]),h=s(h,f,m,g,x,10,b[57]),g=s(g,h,f,m,t,15,b[58]),m=s(m,g,h,f,D,21,b[59]),f=s(f,m,g,h,z,6,b[60]),h=s(h,f,m,g,C,10,b[61]),g=s(g,h,f,m,j,15,b[62]),m=s(m,g,h,f,A,21,b[63]);a[0]=a[0]+f|0;a[1]=a[1]+m|0;a[2]=a[2]+g|0;a[3]=a[3]+h|0},_doFinalize:function(){var b=this._data,n=b.words,a=8*this._nDataBytes,c=8*b.sigBytes;n[c>>>5]|=128<<24-c%32;var e=u.floor(a/ +4294967296);n[(c+64>>>9<<4)+15]=(e<<8|e>>>24)&16711935|(e<<24|e>>>8)&4278255360;n[(c+64>>>9<<4)+14]=(a<<8|a>>>24)&16711935|(a<<24|a>>>8)&4278255360;b.sigBytes=4*(n.length+1);this._process();b=this._hash;n=b.words;for(a=0;4>a;a++)c=n[a],n[a]=(c<<8|c>>>24)&16711935|(c<<24|c>>>8)&4278255360;return b},clone:function(){var b=v.clone.call(this);b._hash=this._hash.clone();return b}});t.MD5=v._createHelper(r);t.HmacMD5=v._createHmacHelper(r)})(Math); +(function(){var u=CryptoJS,p=u.lib,d=p.Base,l=p.WordArray,p=u.algo,s=p.EvpKDF=d.extend({cfg:d.extend({keySize:4,hasher:p.MD5,iterations:1}),init:function(d){this.cfg=this.cfg.extend(d)},compute:function(d,r){for(var p=this.cfg,s=p.hasher.create(),b=l.create(),u=b.words,q=p.keySize,p=p.iterations;u.length>>2]&255}};d.BlockCipher=v.extend({cfg:v.cfg.extend({mode:b,padding:q}),reset:function(){v.reset.call(this);var a=this.cfg,b=a.iv,a=a.mode;if(this._xformMode==this._ENC_XFORM_MODE)var c=a.createEncryptor;else c=a.createDecryptor,this._minBufferSize=1;this._mode=c.call(a, +this,b&&b.words)},_doProcessBlock:function(a,b){this._mode.processBlock(a,b)},_doFinalize:function(){var a=this.cfg.padding;if(this._xformMode==this._ENC_XFORM_MODE){a.pad(this._data,this.blockSize);var b=this._process(!0)}else b=this._process(!0),a.unpad(b);return b},blockSize:4});var n=d.CipherParams=l.extend({init:function(a){this.mixIn(a)},toString:function(a){return(a||this.formatter).stringify(this)}}),b=(p.format={}).OpenSSL={stringify:function(a){var b=a.ciphertext;a=a.salt;return(a?s.create([1398893684, +1701076831]).concat(a).concat(b):b).toString(r)},parse:function(a){a=r.parse(a);var b=a.words;if(1398893684==b[0]&&1701076831==b[1]){var c=s.create(b.slice(2,4));b.splice(0,4);a.sigBytes-=16}return n.create({ciphertext:a,salt:c})}},a=d.SerializableCipher=l.extend({cfg:l.extend({format:b}),encrypt:function(a,b,c,d){d=this.cfg.extend(d);var l=a.createEncryptor(c,d);b=l.finalize(b);l=l.cfg;return n.create({ciphertext:b,key:c,iv:l.iv,algorithm:a,mode:l.mode,padding:l.padding,blockSize:a.blockSize,formatter:d.format})}, +decrypt:function(a,b,c,d){d=this.cfg.extend(d);b=this._parse(b,d.format);return a.createDecryptor(c,d).finalize(b.ciphertext)},_parse:function(a,b){return"string"==typeof a?b.parse(a,this):a}}),p=(p.kdf={}).OpenSSL={execute:function(a,b,c,d){d||(d=s.random(8));a=w.create({keySize:b+c}).compute(a,d);c=s.create(a.words.slice(b),4*c);a.sigBytes=4*b;return n.create({key:a,iv:c,salt:d})}},c=d.PasswordBasedCipher=a.extend({cfg:a.cfg.extend({kdf:p}),encrypt:function(b,c,d,l){l=this.cfg.extend(l);d=l.kdf.execute(d, +b.keySize,b.ivSize);l.iv=d.iv;b=a.encrypt.call(this,b,c,d.key,l);b.mixIn(d);return b},decrypt:function(b,c,d,l){l=this.cfg.extend(l);c=this._parse(c,l.format);d=l.kdf.execute(d,b.keySize,b.ivSize,c.salt);l.iv=d.iv;return a.decrypt.call(this,b,c,d.key,l)}})}(); +(function(){for(var u=CryptoJS,p=u.lib.BlockCipher,d=u.algo,l=[],s=[],t=[],r=[],w=[],v=[],b=[],x=[],q=[],n=[],a=[],c=0;256>c;c++)a[c]=128>c?c<<1:c<<1^283;for(var e=0,j=0,c=0;256>c;c++){var k=j^j<<1^j<<2^j<<3^j<<4,k=k>>>8^k&255^99;l[e]=k;s[k]=e;var z=a[e],F=a[z],G=a[F],y=257*a[k]^16843008*k;t[e]=y<<24|y>>>8;r[e]=y<<16|y>>>16;w[e]=y<<8|y>>>24;v[e]=y;y=16843009*G^65537*F^257*z^16843008*e;b[k]=y<<24|y>>>8;x[k]=y<<16|y>>>16;q[k]=y<<8|y>>>24;n[k]=y;e?(e=z^a[a[a[G^z]]],j^=a[a[j]]):e=j=1}var H=[0,1,2,4,8, +16,32,64,128,27,54],d=d.AES=p.extend({_doReset:function(){for(var a=this._key,c=a.words,d=a.sigBytes/4,a=4*((this._nRounds=d+6)+1),e=this._keySchedule=[],j=0;j>>24]<<24|l[k>>>16&255]<<16|l[k>>>8&255]<<8|l[k&255]):(k=k<<8|k>>>24,k=l[k>>>24]<<24|l[k>>>16&255]<<16|l[k>>>8&255]<<8|l[k&255],k^=H[j/d|0]<<24);e[j]=e[j-d]^k}c=this._invKeySchedule=[];for(d=0;dd||4>=j?k:b[l[k>>>24]]^x[l[k>>>16&255]]^q[l[k>>> +8&255]]^n[l[k&255]]},encryptBlock:function(a,b){this._doCryptBlock(a,b,this._keySchedule,t,r,w,v,l)},decryptBlock:function(a,c){var d=a[c+1];a[c+1]=a[c+3];a[c+3]=d;this._doCryptBlock(a,c,this._invKeySchedule,b,x,q,n,s);d=a[c+1];a[c+1]=a[c+3];a[c+3]=d},_doCryptBlock:function(a,b,c,d,e,j,l,f){for(var m=this._nRounds,g=a[b]^c[0],h=a[b+1]^c[1],k=a[b+2]^c[2],n=a[b+3]^c[3],p=4,r=1;r>>24]^e[h>>>16&255]^j[k>>>8&255]^l[n&255]^c[p++],s=d[h>>>24]^e[k>>>16&255]^j[n>>>8&255]^l[g&255]^c[p++],t= +d[k>>>24]^e[n>>>16&255]^j[g>>>8&255]^l[h&255]^c[p++],n=d[n>>>24]^e[g>>>16&255]^j[h>>>8&255]^l[k&255]^c[p++],g=q,h=s,k=t;q=(f[g>>>24]<<24|f[h>>>16&255]<<16|f[k>>>8&255]<<8|f[n&255])^c[p++];s=(f[h>>>24]<<24|f[k>>>16&255]<<16|f[n>>>8&255]<<8|f[g&255])^c[p++];t=(f[k>>>24]<<24|f[n>>>16&255]<<16|f[g>>>8&255]<<8|f[h&255])^c[p++];n=(f[n>>>24]<<24|f[g>>>16&255]<<16|f[h>>>8&255]<<8|f[k&255])^c[p++];a[b]=q;a[b+1]=s;a[b+2]=t;a[b+3]=n},keySize:8});u.AES=p._createHelper(d)})(); diff --git a/manga-py-stable_1.x/manga_py/crypt/aes_zp.js b/manga-py-stable_1.x/manga_py/crypt/aes_zp.js new file mode 100644 index 0000000..18f43ef --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/aes_zp.js @@ -0,0 +1,7 @@ +/* +CryptoJS v3.1.2 +code.google.com/p/crypto-js +(c) 2009-2013 by Jeff Mott. All rights reserved. +code.google.com/p/crypto-js/wiki/License +*/ +CryptoJS.pad.ZeroPadding={pad:function(a,c){var b=4*c;a.clamp();a.sigBytes+=b-(a.sigBytes%b||b)},unpad:function(a){for(var c=a.words,b=a.sigBytes-1;!(c[b>>>2]>>>24-8*(b%4)&255);)b--;a.sigBytes=b+1}}; diff --git a/manga-py-stable_1.x/manga_py/crypt/base_lib.py b/manga-py-stable_1.x/manga_py/crypt/base_lib.py new file mode 100644 index 0000000..55cd521 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/base_lib.py @@ -0,0 +1,126 @@ +import base64 +import codecs +import gzip +import zlib +from binascii import unhexlify +from struct import pack, unpack + +from Crypto.Cipher import AES +from Crypto.Hash import SHA256, MD5 +from execjs import compile + + +class BaseLib: + + @staticmethod + def decode_escape(data): # pragma: no cover + + if isinstance(data, str): + data = data.encode() + try: + data = codecs.escape_decode(data) + return data[0] + except Exception: + return '' + + @staticmethod + def encode_hex(data): # pragma: no cover + return codecs.decode(data, 'hex') + + @staticmethod + def to_sha_256(data): # pragma: no cover + if isinstance(data, str): + data = data.encode() + sha = SHA256.new() + sha.update(data) + return sha.digest() + + @staticmethod + def decrypt_aes(iv, key, data, mode: int = AES.MODE_CBC): # pragma: no cover + aes = AES.new(key, mode, iv) + return aes.decrypt(data) + + @staticmethod + def base64decode(data, altchars=None, validate=False): # pragma: no cover + return base64.b64decode(data, altchars, validate) + + @staticmethod + def base64encode(data, altchars=None): # pragma: no cover + return base64.b64encode(data, altchars) + + @staticmethod + def exec_js(source, js): # pragma: no cover + return compile(source).eval(js) + + @staticmethod + def gunzip(data): # pragma: no cover + return gzip.decompress(data) + + @staticmethod + def gzip(data, lvl: int = 9): # pragma: no cover + return gzip.compress(data, lvl) + + @staticmethod + def zlib_d(data, **kwargs): # pragma: no cover + return zlib.decompress(data, **kwargs) + + @staticmethod + def zlib_c(data, **kwargs): # pragma: no cover + return zlib.compress(data, **kwargs) + + @staticmethod + def md5(string): # pragma: no cover + if isinstance(string, str): + string = string.encode() + _ = MD5.new() + _.update(string) + return _ + + @staticmethod + def pack(fmt, *args): # pragma: no cover + return pack(fmt, *args) + + @staticmethod + def unpack(fmt, string): # pragma: no cover + return unpack(fmt, string) + + @staticmethod + def pack_auto(int_list) -> bytes: + """ + :param int_list: list + :return: str + """ + base_frm = '{}B'.format(len(int_list)) + return pack(base_frm, *int_list) + + @staticmethod + def unpack_auto(string) -> list: + """ + :param string: str + :return: tuple + """ + if isinstance(string, str): + string = string.encode() + + return list(string) + + @staticmethod + def str2hex(string): + hex_str = '' + if isinstance(string, bytes): + string = string.decode() + for char in string: + int_char = ord(char) + hex_num = hex(int_char).lstrip("0x") + hex_str += hex_num + return hex_str + + @staticmethod + def hex2str(string): + clear_str = '' + if isinstance(string, bytes): + string = string.decode() + for counter in range(0, len(string), 2): + hex_char = string[counter] + string[counter + 1] + clear_str += unhexlify(hex_char) + return clear_str diff --git a/manga-py-stable_1.x/manga_py/crypt/kissmanga_com.py b/manga-py-stable_1.x/manga_py/crypt/kissmanga_com.py new file mode 100644 index 0000000..f330bb3 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/kissmanga_com.py @@ -0,0 +1,16 @@ +from .base_lib import BaseLib +from sys import stderr + + +class KissMangaComCrypt(BaseLib): + + def decrypt(self, iv, key, data): + iv = self.encode_hex(iv) + key = self.to_sha_256(key) + data = self.base64decode(data) + + try: + return self.decrypt_aes(iv, key, data) + except Exception as e: + print(e, file=stderr) + return False diff --git a/manga-py-stable_1.x/manga_py/crypt/mangago_me.py b/manga-py-stable_1.x/manga_py/crypt/mangago_me.py new file mode 100644 index 0000000..c011f3a --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/mangago_me.py @@ -0,0 +1,68 @@ +from pathlib import Path + +from .base_lib import BaseLib +from .puzzle import Puzzle + + +class MangaGoMe(BaseLib): + _key = 'e10adc3949ba59abbe56e057f20f883e' + _iv = '1234567890abcdef1234567890abcdef' + + # https://codepen.io/1271/pen/mKYLrG + def decrypt(self, data): + scripts = [ + 'aes.js', + 'aes_zp.js', + ] + script = '' + path = Path(__file__).resolve().parent + for i in scripts: + with open(str(path.joinpath(i)), 'r') as f: + script += f.read() + decrypted = self.exec_js(script, + 'CryptoJS.AES.decrypt("%s",CryptoJS.enc.Hex.parse("%s"),{iv:CryptoJS.enc.Hex.parse("%s"),padding:CryptoJS.pad.ZeroPadding}).toString(CryptoJS.enc.Utf8)' % ( + data, self._key, self._iv)) + order_js = """function replacePos(e,r,i){return e.substr(0,r)+i+e.substring(r+1,e.length)}function dorder(e,r){for(j=r.length-1;j>=0;j--)for(i=e.length-1;i-r[j]>=0;i--)i%2!=0&&(temp=e[i-r[j]],e=replacePos(e=replacePos(e,i-r[j],e[i]),i,temp));return e}""" + code = decrypted[19] + decrypted[23] + decrypted[31] + decrypted[39] + decrypted = decrypted[:19] + decrypted[20:23] + decrypted[24:31] + decrypted[32:39] + decrypted[40:] + return self.exec_js(order_js, 'dorder("%s","%s")' % (decrypted, code)) + + @staticmethod + def puzzle(_path, _dst, url): + values = { + '60a2b0ed56cd458c4633d04b1b76b7e9': '18a72a69a64a13a1a43a3aa42a23a66a26a19a51a54a78a34a17a31a35a15a58a29a61a48a73a74a44a52a60a24a63a20a32a7a45a53a75a55a62a59a41a76a68a2a36a21a10a38a33a71a40a67a22a4a50a80a65a27a37a47a70a14a28a16a6a56a30a57a5a11a79a9a77a46a39a25a49a8a12', + '400df5e8817565e28b2e141c533ed7db': '61a74a10a45a3a37a72a22a57a39a25a56a52a29a70a60a67a41a63a55a27a28a43a18a5a9a8a40a17a48a44a79a38a47a32a73a4a6a13a34a33a49a2a42a50a76a54a36a35a14a58a7a69a46a16a30a21a11aa51a53a77a26a31a1a19a20a80a24a62a68a59a66a75a12a64a78a71a15a65a23', + '84ba0d8098f405b14f4dbbcc04c93bac': '61a26a35a16a55a10a72a37a2a60a66a65a33a44a7a28a70a62a32a56a30a40a58a15a74a47aa36a78a75a11a6a77a67a39a23a9a31a64a59a13a24a80a14a38a45a21a63a19a51a17a34a50a46a5a29a73a8a57a69a48a68a49a71a41a12a52a18a79a76a54a42a22a4a1a3a53a20a25a43a27', + '56665708741979f716e5bd64bf733c33': '23a7a41a48a57a27a69a36a76a62a40a75a26a2a51a6a10a65a43a24a1aa20a71a28a30a13a38a79a78a72a14a49a55a56a58a25a70a12a80a3a66a11a39a42a17a15a54a45a34a74a31a8a61a46a73a63a22a64a19a77a50a9a59a37a68a52a18a32a16a33a60a67a21a44a53a5a35a4a29a47', + '37abcb7424ce8df47ccb1d2dd9144b49': '67a45a39a72a35a38a61a11a51a60a13a22a31a25a75a30a74a43a69a50a6a26a16a49a77a68a59a64a17a56a18a1a10a54a44a62a53a80a5a23a48a32a29a79a24a70a28a58a71a3a52a42a55a9a14a36a73a34a2a27a57a0a21a41a33a37a76a8a40a65a7a20a12a19a47a4a78a15a63a66a46', + '874b83ba76a7e783d13abc2dabc08d76': '26a59a42a43a4a20a61a28a12a64a37a52a2a77a34a13a46a74a70a0a44a29a73a66a55a38a69a67a62a9a63a6a54a79a21a33a8a58a40a47a71a49a22a50a57a78a56a25a17a15a36a16a48a32a5a10a14a80a24a72a76a45a3a53a23a41a60a11a65a19a27a51a68a35a31a1a75a39a30a7a18', + '930b87ad89c2e2501f90d0f0e92a6b97': '9a29a49a67a62a40a28a50a64a77a46a31a16a73a14a45a51a44a7a76a22a78a68a37a74a69a25a65a41a11a52aa18a36a10a38a12a15a2a58a48a8a27a75a20a4a80a61a55a42a13a43a47a39a35a60a26a30a63a66a57a33a72a24a71a34a23a3a70a54a56a32a79a5a21a6a59a53a17a1a19', + '1269606c6c3d8bb6508426468216d6b1': '49a15a0a60a14a26a34a69a61a24a35a4a77a80a70a40a39a6a68a17a41a56a28a46a79a16a21a1a37a42a44a58a78a18a52a73a32a9a12a50a8a13a20a19a67a36a45a75a48a10a65a7a38a66a3a2a43a27a29a31a72a74a55a23a54a22a59a57a11a62a47a53a30a5a64a25a76a71a51a33a63', + '33a3b21bb2d14a09d15f995224ae4284': '30a59a35a34a42a8a10a56a70a64a48a69a26a18a6a16a54a24a73a79a68a33a32a2a63a53a31a14a17a57a41a80a76a40a60a12a43a29a39a4a77a58a66a36a38a52a13a19a0a75a28a55a25a61a71a11a67a49a23a45a5a15a1a50a51a9a44a47a65a74a72a27a7a37a46a20a22a62a78a21a3', + '9ae6640761b947e61624671ef841ee78': '62a25a21a75a42a61a73a59a23a19a66a38a71a70a6a55a3a16a43a32a53a37a41a28a49a63a47a17a7a30a78a46a20a67a56a79a65a14a69a60a8a52a22a9a24a2a4a13a36a27a0a18a33a12a44a5a76a26a29a40a1a11a64a48a39a51a80a72a68a10a58a35a77a54a34a74a57a31a50a45a15', + 'a67e15ed870fe4aab0a502478a5c720f': '8a12a59a52a24a13a37a21a55a56a41a71a65a43a40a66a11a79a67a44a33a20a72a2a31a42a29a34a58a60a27a48a28a15a35a51a76a80a0a63a69a53a39a46a64a50a75a1a57a9a62a74a18a16a73a14a17a6a19a61a23a38a10a3a32a26a36a54a4a30a45a47a70a22a7a68a49a77a5a25a78', + 'b6a2f75185754b691e4dfe50f84db57c': '47a63a76a58a37a4a56a21a1a48a62a2a36a44a34a42a23a9a60a72a11a74a70a20a77a16a15a35a69a0a55a46a24a6a32a75a68a43a41a78a31a71a52a33a67a25a80a30a5a28a40a65a39a14a29a64a3a53a49a59a12a66a38a27a79a45a18a22a8a61a50a17a51a10a26a13a57a19a7a54a73', + 'db99689c5a26a09d126c7089aedc0d86': '57a31a46a61a55a41a26a2a39a24a75a4a45a13a23a51a15a8a64a37a72a34a12a3a79a42a80a17a62a49a19a77a48a68a78a65a14a10a29a16a20a76a38a36a54a30a53a40a33a21a44a22a32a5a1a7a70a67a58a0a71a74a43a66a6a63a35a56a73a9a27a25a59a47a52a11a50a18a28a60a69', + 'd320d2647d70c068b89853e1a269c609': '77a38a53a40a16a3a20a18a63a9a24a64a50a61a45a59a27a37a8a34a11a55a79a13a47a68a12a22a46a33a1a69a52a54a31a23a62a43a0a2a35a28a57a36a51a78a70a5a32a75a41a30a4a80a19a21a42a71a49a10a56a74a17a7a25a6a14a73a29a44a48a39a60a58a15a66a67a72a65a76a26', + 'c587e77362502aaedad5b7cddfbe3a0d': '50aa59a70a68a30a56a10a49a43a45a29a23a28a61a15a40a71a14a44a32a34a17a26a63a76a75a33a74a12a11a21a67a31a19a80a7a64a8a3a51a53a38a18a6a42a27a9a52a20a41a60a1a22a77a16a54a47a79a24a78a2a46a37a73a65a36a35a39a5a4a25a72a13a62a55a57a58a69a66a48', + 'f4ab0903149b5d94baba796a5cf05938': '40a37a55a73a18a42a15a59a50a13a22a63a52a58a6a80a47a17a38a71a74a70a30a11a10a19a0a31a36a21a51a68a1a3a14a66a45a2a79a7a76a75a8a67a20a78a25a69a43a28a35a60a4a23a65a54a34a9a5a39a27a57a26a33a12a24a46a72a56a44a49a61a64a29a53a48a32a62a41a16a77', + 'f5baf770212313f5e9532ec5e6103b61': '55a69a78a75a38a25a20a60a6a80a46a5a48a18a23a24a17a67a64a70a63a57a22a10a49a19a8a16a11a12a61a76a34a27a54a73a44a0a56a3a15a29a28a13a4a2a7a77a74a35a37a26a30a58a9a71a50a1a43a79a47a32a14a53a52a66a72a59a68a31a42a45a62a51a40a39a33a65a41a36a21', + 'e2169a4bfd805e9aa21d3112d498d68c': '54a34a68a69a26a20a66a1a67a74a22a39a63a70a5a37a75a15a6a14a62a50a46a35a44a45a28a8a40a25a29a76a51a77a17a47a0a42a2a9a48a27a13a64a58a57a18a30a80a23a61a36a60a59a71a32a7a38a41a78a12a49a43a79a24a31a52a19a3a53a72a10a73a11a33a16a4a55a65a21a56', + '1796550d20f64decb317f9b770ba0e78': '37a55a39a79a2a53a75a1a30a32a3a13a25a49a45a5a60a62a71a78a63a24a27a33a19a64a67a57a0a8a54a9a41a61a50a73a7a65a58a51a15a14a43a4a35a77a68a72a34a80a22a17a48a10a70a46a40a28a20a74a52a23a38a76a42a18a66a11a59a6a69a31a56a16a47a21a12a44a36a29a26', + 'bf53be6753a0037c6d80ca670f5d12d5': '55a41a18a19a4a13a36a12a56a69a64a80a30a39a57a50a48a26a46a73a17a52a49a66a11a25a61a51a68a24a70a7a67a53a43a8a29a75a65a42a38a58a9a28a0a78a54a31a22a5a15a3a79a77a59a23a45a40a47a44a6a2a1a35a14a62a63a76a20a16a32a21a71a10a74a60a34a37a33a72a27', + '6c41ff7fbed622aa76e19f3564e5d52a': '40a3a13a59a68a34a66a43a67a14a26a46a8a24a33a73a69a31a2a57a10a51a62a77a74a41a47a35a64a52a15a53a6a80a76a50a28a75a56a79a17a45a25a49a48a65a78a27a9a63a12a55a32a21a58a38a0a71a44a30a61a36a16a23a20a70a22a37a4a19a7a60a11a5a18a39a1a54a72a29a42', + } + ik = '18a72a69a64a13a1a43a3aa42a23a66a26a19a51a54a78a34a17a31a35a15a58a29a61a48a73a74a44a52a60a24a63a20a32a7a45a53a75a55a62a59a41a76a68a2a36a21a10a38a33a71a40a67a22a4a50a80a65a27a37a47a70a14a28a16a6a56a30a57a5a11a79a9a77a46a39a25a49a8a12' + + for k in values: + if ~url.find(k): + ik = values[k] + matrix = {} + for n, i in enumerate(ik.split('a')): + if len(i) > 0: + m = int(i) + else: + m = 0 + matrix[n] = m + puzzle = Puzzle(9, 9, matrix, 0) + puzzle.de_scramble(_path, _dst) diff --git a/manga-py-stable_1.x/manga_py/crypt/mangarock_com.py b/manga-py-stable_1.x/manga_py/crypt/mangarock_com.py new file mode 100644 index 0000000..e96ab7d --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/mangarock_com.py @@ -0,0 +1,22 @@ +from .base_lib import BaseLib + + +class MangaRockComCrypt(BaseLib): + def decrypt(self, string): + if isinstance(string, str): + string = string.encode() + n = len(string) + 7 + + tmp = self.pack_auto([82, 73, 70, 70]) + + tmp += self.pack_auto([ + n & 0xff, + (n >> 8) & 0xff, + (n >> 16) & 0xff, + (n >> 24) & 0xff, + ]) + + tmp += self.pack_auto([87, 69, 66, 80, 86, 80, 56]) + for i in range(len(string)): + tmp += self.pack('1B', string[i] ^ 101) + return tmp diff --git a/manga-py-stable_1.x/manga_py/crypt/manhuagui_com.py b/manga-py-stable_1.x/manga_py/crypt/manhuagui_com.py new file mode 100644 index 0000000..84e307f --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/manhuagui_com.py @@ -0,0 +1,24 @@ +from .base_lib import BaseLib + + +class ManhuaGuiComCrypt(BaseLib): + def decrypt(self, js, default=''): + # try: + return self.exec_js(self.js_gz_b64_data(), js) + # except Exception: + # return default + + def js_gz_b64_data(self): # FIXME + data = """eJzNVdtymzAQ/RWHhwwqawp2mqbI60yaNr0lvaXXeJgMARFIiEQlYTt1+fcKfOt03Olj8iAQR7tn +j3YO0jiSneOzUy1zfolpxWOdC26T2djgAue4m0pxc5hF8lAkDHKc1bRZ5jhLWCxuSsmUOjIhTyPF +dneCFYsgM8l0JXmHV0WBKPYtK7CsZtIAAXfPPVu4BeOXOoN+D1aZfJW5JgMD5qm9lY9EaGbm2QhJ +hbQbMRo9qgdLMqodhzQhI+HGRviBtjUJUdcL1naJh7VtHTw9fPb86MXLV6/fHJ+8fff+w8fTT5+/ +fP32/Sy6iBOWXmb51XVxw0X5QypdjSfT25+e3+vvPNp9vPfEeYgWrEpwQmpSw7m3bkEOHPS8mxIU +RFACgwJiSHEUQoY7UJkxxj4kaFkwadApmvgi0LZHoBQqb4gCDjlP2DTw51uWZrty0KfSQZ+kIxmi +bPEIPWB4EunMLcXE7kGPQIE+LbaQUVLi1DXU21N3yQvr6XCIPniIa2R7215/YBNrklEbsNViWkwg ++oV2OfT2/cAjDwooBgNTTU1yHWd2RGaxsUTH9GOTtL27kBajMLrohWTRNW3V+ZvV+bv3Q14vmHvW +supGZzrqhxiDMmPilpXK7JhQ2v4ZC/JhTpYZmy0xvkNLxHgfTZGOKscJ29ZDjFXXh6zbvbce+a/a +pWU6E/dK5Ny2LFIbf5jqmSma/eWFsakE6SgOSYLNi7JCscZP8RZiRf44wWmCylHL084j9cKBSZPf +alJOsl42Jk2aLXe7/ypb1zVd8tc2oYvrppRCC31bMleVRR7jhgtleWW5m24gW2e5Im2yNjk1/Q2+ +WUKy""" + return self.zlib_d(self.base64decode(data)).decode() diff --git a/manga-py-stable_1.x/manga_py/crypt/puzzle.py b/manga-py-stable_1.x/manga_py/crypt/puzzle.py new file mode 100644 index 0000000..1418ad9 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/puzzle.py @@ -0,0 +1,66 @@ +from PIL import Image + + +class Puzzle: + need_copy_orig = False + + __x = 0 + __y = 0 + __multiply = 1 + __matrix = None + __image_src = None + __image_dst = None + __block_width = None + __block_height = None + + def __init__(self, x: int, y: int, matrix: dict, multiply: int = 1): + self.__x = x + self.__y = y + self.__matrix = matrix + self.__multiply = multiply + + def de_scramble(self, path_src: str, path_dst: str): + self.__image_src = Image.open(path_src, 'r') + self._process() + self.__image_dst.save(path_dst) + self.__image_src.close() + self.__image_dst.close() + + def _process(self): + self.__image_dst = Image.new(self.__image_src.mode, self.__image_src.size) + self._calc_block_size() + self._check_copy_orig_image() + self._solve_matrix() + + def _check_copy_orig_image(self): + if self.need_copy_orig: + self.__image_dst.paste(self.__image_src) + + def _calc_block_size(self): + if self.__multiply <= 1: + self.__block_width = int(self.__image_src.size[0] / self.__x) + self.__block_height = int(self.__image_src.size[1] / self.__y) + else: + self.__block_width = self.__multiply * int(self.__image_src.size[0] / self.__y / self.__multiply) + self.__block_height = self.__multiply * int(self.__image_src.size[1] / self.__x / self.__multiply) + + def _src_rect(self, i): + row = int(i / self.__x) + col = i - row * self.__x + x = col * self.__block_width + y = row * self.__block_height + return x, y, x + self.__block_width, y + self.__block_height + + def _dst_rect(self, i): + row = int(self.__matrix[i] / self.__x) + col = self.__matrix[i] - row * self.__y + x = col * self.__block_width + y = row * self.__block_height + return x, y, x + self.__block_width, y + self.__block_height + + def _solve_matrix(self): + for i in range(self.__x * self.__y): + src_rect = self._src_rect(i) + dst_rect = self._dst_rect(i) + region = self.__image_src.crop(src_rect) + self.__image_dst.paste(region, dst_rect) diff --git a/manga-py-stable_1.x/manga_py/crypt/sunday_webry_com.py b/manga-py-stable_1.x/manga_py/crypt/sunday_webry_com.py new file mode 100644 index 0000000..a8de4d5 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/sunday_webry_com.py @@ -0,0 +1,180 @@ +from PIL import Image + + +# DO NOT SEE HERE! IT WORKED! + + +class MatrixSunday: + __image_src = None + __image_dst = None + + def de_scramble(self, path_src: str, path_dst: str, data: list): + self.__image_src = Image.open(path_src, 'r') + self.__process(data) + self.__image_dst.save(path_dst) + self.__image_src.close() + self.__image_dst.close() + + def __process(self, data: list): + size_src = self.__image_src.size + + self.__image_dst = Image.new(self.__image_src.mode, size_src) + + for i in data: + x, y = i['srcX'] + i['width'], i['srcY'] + i['height'] + dx, dy = i['destX'] + i['width'], i['destY'] + i['height'] + + c1 = i['srcX'] < size_src[0] + c2 = i['srcX'] + i['width'] >= 0 + c3 = i['srcY'] < size_src[1] + c4 = i['srcY'] + i['height'] >= 0 + if c1 and c2 and c3 and c4: + region = self.__image_src.crop((i['destX'], i['destY'], dx, dy)) + self.__image_dst.paste(region, (i['srcX'], i['srcY'], x, y)) + + +class SundayWebryCom: # pragma: no cover + _result = None + + def solve_by_img(self, src: str, element_width: int, element_height: int, n: int): + img = Image.open(src) + sizes = img.size + img.close() + return self.solve(*sizes, element_width, element_height, n) + + def solve(self, width: int, height: int, element_width: int, element_height: int, n: int): + e = width + t = height + r = element_width + i = element_height + + y = int(e / r) + g = int(t / i) + f = e % r + b = t % i + self._result = [] + + s = y - 43 * n % y + if s % y == 0: + s = (y - 4) % y + + a = g - 47 * n % g + if a % g == 0: + a = (g - 4) % g + if 0 == a: + a = g - 1 + + self.def1(f, b, s, r, a, i) + + self.def2(y, i, n, a, s, f, r, g, b) + + if f > 0: + self.def3(g, n, s, a, y, b, i, r, f) + + self.def4(y, g, n, r, f, s, a, i, b) + + return self._result + + def def1(self, f, b, s, r, a, i): + if f > 0 and b > 0: + o = s * r + u = a * i + self._result.append({ + 'srcX': o, + 'srcY': u, + 'destX': o, + 'destY': u, + 'width': f, + 'height': b, + # 'debug': 1 + }) + + def def2(self, y, i, n, a, s, f, r, g, b): + for l in range(y): + d = self._calc_x_x(l, y, n) + h = self._calc_y_x(d, s, a, g, n) + c = self._calc_pos_rest(d, s, f, r) + p = h * i + o = self._calc_pos_rest(l, s, f, r) + u = a * i + self._result.append({ + 'srcX': o, + 'srcY': u, + 'destX': c, + 'destY': p, + 'width': r, + 'height': b, + # 'debug': 2 + }) + + def def3(self, g, n, s, a, y, b, i, r, f): + for m in range(g): + h = self._calc_y_y(m, g, n) + d = self._calc_x_y(h, s, a, y, n) + p = self._calc_pos_rest(h, a, b, i) + u = self._calc_pos_rest(m, a, b, i) + self._result.append({ + 'srcX': s * r, + 'srcY': u, + 'destX': d * r, + 'destY': p, + 'width': f, + 'height': i, + # 'debug': 3 + }) + + def def4(self, y, g, n, r, f, s, a, i, b): + for l in range(y): + for m in range(g): + d = (l + 29 * n + 31 * m) % y + h = (m + 37 * n + 41 * d) % g + c = d * r + (f if d >= self._calc_x_y(h, s, a, y, n) else 0) + p = h * i + (b if h >= self._calc_y_x(d, s, a, g, n) else 0) + o = l * r + (f if l >= s else 0) + u = m * i + (b if m >= a else 0) + self._result.append({ + 'srcX': o, + 'srcY': u, + 'destX': c, + 'destY': p, + 'width': r, + 'height': i, + # 'debug': 4 + }) + + @staticmethod + def _calc_pos_rest(e, t, r, i): + m = 0 + if e >= t: + m = r + return e * i + m + + @staticmethod + def _calc_x_x(e, t, r): + return (e + 61 * r) % t + + @staticmethod + def _calc_x_y(e, t, r, i, n): + o = (n % 2 == 1) + if (e < r and o) or (e >= r and not o): + a = i - t + s = t + else: + a = t + s = 0 + return (e + 67 * n + t + 71) % a + s + + @staticmethod + def _calc_y_x(e, t, r, i, n): + o = (n % 2 == 1) + if (e < t and o) or (e >= t and not o): + a = r + s = 0 + else: + a = i - r + s = r + return (e + 53 * n + 59 * r) % a + s + + @staticmethod + def _calc_y_y(e, t, r): + return (e + 73 * r) % t diff --git a/manga-py-stable_1.x/manga_py/crypt/viz_com.py b/manga-py-stable_1.x/manga_py/crypt/viz_com.py new file mode 100644 index 0000000..259b0cf --- /dev/null +++ b/manga-py-stable_1.x/manga_py/crypt/viz_com.py @@ -0,0 +1,77 @@ +import re +from typing import List, Optional, Tuple +from PIL import Image +from sys import stderr + + +WIDTH = 256 +HEIGHT = 257 +KEY = 42016 + +class VizComMatrix: + @classmethod + def solve_image(cls, path: str, metadata: dict) -> Optional[Image.Image]: + orig = Image.open(path) # type: Image.Image + new_size = (orig.size[0] - 90, orig.size[1] - 140) + ref = Image.new(orig.mode, new_size) # type: Image.Image + ref.paste(orig) + + exif = orig._getexif() + if KEY in exif: + key = [int(i, 16) for i in exif[KEY].split(':')] + width, height = exif[WIDTH], exif[HEIGHT] + else: + key = [] + width, height = metadata['width'], metadata['height'] + small_width = int(width / 10) + small_height = int(height / 15) + + cls.paste(ref, orig, ( + 0, small_height + 10, + small_width, height - 2 * small_height, + ), ( + 0, small_height, + small_width, height - 2 * small_height, + )) + + cls.paste(ref, orig, ( + 0, 14 * (small_height + 10), + width, orig.height - 14 * (small_height + 10), + ), ( + 0, 14 * small_height, + width, orig.height - 14 * (small_height + 10), + )) + + cls.paste(ref, orig, ( + 9 * (small_width + 10), small_height + 10, + small_width + (width - 10 * small_width), height - 2 * small_height, + ), ( + 9 * small_width, small_height, + small_width + (width - 10 * small_width), height - 2 * small_height, + )) + + for i, j in enumerate(key): + cls.paste(ref, orig, ( + (i % 8 + 1) * (small_width + 10), (int(i / 8) + 1) * (small_height + 10), + small_width, small_height, + ), ( + (j % 8 + 1) * small_width, (int(j / 8) + 1) * small_height, + small_width, small_height, + )) + + return ref + + @classmethod + def paste(cls, ref: Image.Image, orig: Image.Image, orig_box: Tuple, ref_box: Tuple): + ref.paste(orig.crop(( + int(orig_box[0]), int(orig_box[1]), + int(orig_box[0] + orig_box[2]), int(orig_box[1] + orig_box[3]), + )), ( + int(ref_box[0]), int(ref_box[1]), + int(ref_box[0] + ref_box[2]), int(ref_box[1] + ref_box[3]), + )) + + +solve = VizComMatrix().solve_image + +__all__ = ['solve'] diff --git a/manga-py-stable_1.x/manga_py/fs.py b/manga-py-stable_1.x/manga_py/fs.py new file mode 100644 index 0000000..15b8bf2 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/fs.py @@ -0,0 +1,209 @@ +import tempfile +from json import loads as json_loads +from os import name as os_name, getpid, makedirs, walk +from pathlib import Path +from shutil import move +from shutil import rmtree + +__dir_name__ = '.PyMangaDownloader' + + +def mark_as_hidden(_path: str): + try: + from ctypes import windll + windll.kernel32.SetFileAttributesW(_path, 2) + except Exception: + pass + + +def get_temp_path(*args) -> str: + temp = 'temp_%s' % getpid() + return path_join(tempfile.gettempdir(), __dir_name__, temp, *args) + + +def root_path() -> str: + # fs.py/manga_py/../ + file = Path(__file__).resolve() + return str(file.parent.parent) + + +def get_util_home_path() -> str: + if os_name == 'nt': + home = path_join(str(Path.home()), 'AppData', 'Roaming', __dir_name__) + else: + home = path_join(str(Path.home()), __dir_name__) + make_dirs(home) + return str(home) + + +def make_dirs(directory): + is_dir(directory) or makedirs(directory) + + +def remove_file_query_params(name, save_path: bool = True) -> str: + if name is None: + raise AttributeError + file_path = dirname(name) + name = basename(name) + position = name.find('?') + if position == 0: + name = 'image.png' # fake image name + elif position > 0: + name = name[:position] + return str(path_join(file_path, name) if save_path else name) + + +def is_file(_path) -> bool: + return Path(_path).is_file() + + +def is_dir(_path) -> bool: + return Path(_path).is_dir() + + +def basename(_path) -> str: + return str(Path(_path).name) + + +def dirname(_path) -> str: + return str(Path(_path).parent) + + +def path_join(_path, *args) -> str: + return str(Path(_path).joinpath(*args)) + + +def unlink(_path, allow_not_empty=False): + if is_dir(_path): + if allow_not_empty: + rmtree(_path) + else: + Path(_path).rmdir() + elif is_file(_path): + Path(_path).unlink() + + +def os_stat(_path): + if is_file(_path): + return Path(_path).stat() + return None + + +def file_size(_path): + """ + :param _path: + :return: + :rtype: int + """ + data = os_stat(_path) + if data: + return data.st_size + return None + + +def rename(_from, _to): + if is_file(_from) or is_dir(_from): + is_dir(dirname(_to)) or makedirs(dirname(_to)) + move(_from, _to) + + +def storage(_path) -> str: + _path = get_temp_path('storage', _path) + make_dirs(dirname(_path)) + return str(_path) + + +def listing(_path) -> dict: + """ + :param _path: + :return: {'directories': [], 'files': []} + """ + _dirname, _dirnames, _filenames = walk(_path) + return {'directories': _dirnames, 'files': _filenames} + + +def __get_info(_path): + try: + with open(path_join(_path, 'info.json'), 'r') as r: + return json_loads(r.read()) + except FileNotFoundError: + return None + + +def get_info(_path) -> dict: + """ + listing subdirectories and reading info.json files + :param _path: [{..}, {..}, {..}] + :return: + """ + result = {} + for d in listing(_path)['directories']: + directory = path_join(_path, d) + info = __get_info(directory) + if info is not None: + result[directory] = info + return result + + +def __dirname(_path) -> str: + if not is_dir(_path): + _path = __dirname(dirname(_path)) + return str(_path) + + +def _disk_stat_posix(_path) -> dict: + import os + st = os.statvfs(_path) + free = st.f_bavail * st.f_frsize + total = st.f_blocks * st.f_frsize + used = (st.f_blocks - st.f_bfree) * st.f_frsize + return {'total': total, 'used': used, 'free': free} + + +def _disc_stat_win(_path) -> dict: + import ctypes + _, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), ctypes.c_ulonglong() + fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA + ret = fun(_path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free)) + if ret == 0: + fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW + ret = fun(_path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free)) + if ret == 0: + raise ctypes.WinError() + used = total.value - free.value + return {'total': total.value, 'used': used, 'free': free.value} + + +def get_disk_stat(_path) -> dict: + import os + _path = __dirname(_path) + + if hasattr(os, 'statvfs'): # POSIX + return _disk_stat_posix(_path) + elif os.name == 'nt': # Windows + return _disc_stat_win(_path) + else: + raise NotImplementedError('Platform not supported') + + +def check_free_space(_path: str, min_size: int = 100, percent: bool = False) -> bool: + """ + min_size = 10 # percent = True + min_size = 10 # percent = False (default) + + :param _path: + :param min_size: + :param percent: + :return: + """ + _stat = get_disk_stat(_path) + if percent: + _free = _stat['free'] / _stat['total'] + if (_free * 100) < min_size: + return False + return True + else: + _free = _stat['free'] / (2 << 19) # 1Mb + if _free < min_size: + return False + return True diff --git a/manga-py-stable_1.x/manga_py/http/__init__.py b/manga-py-stable_1.x/manga_py/http/__init__.py new file mode 100644 index 0000000..1839b67 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/http/__init__.py @@ -0,0 +1,121 @@ +from sys import stderr +from time import sleep +import requests + +from manga_py.fs import get_temp_path, make_dirs, remove_file_query_params, basename, path_join, dirname, file_size +from .multi_threads import MultiThreads +from .request import Request +from .url_normalizer import normalize_uri + + +class Http(Request): + count_retries = 20 + has_error = False + mute = False + + def __init__( + self, + allow_webp=True, + referer='', + user_agent=None, + proxies=None, + cookies=None, + kwargs=None + ): + super().__init__() + self.__set_param('allow_webp', allow_webp) + self.__set_param('referer', referer) + self.__set_param('user_agent', user_agent) + self.__set_param('proxies', proxies) + self.__set_param('cookies', cookies) + self.__set_param('kwargs', kwargs) + + def __set_param(self, name, value): + if value is not None: + self_val = getattr(self, name) + _type = type(self_val) + if self_val is not None and not isinstance(value, _type): + raise AttributeError('{} type not {}'.format(name, _type)) + setattr(self, name, value) + + def _download(self, file_name, url, method): + now_try_count = 0 + with open(file_name, 'wb') as out_file: + now_try_count += 1 + response = self.requests(url, method=method, timeout=60, allow_redirects=True) + if response.status_code >= 400: + self.debug and print('\nERROR! Code {}\nUrl: {}\n'.format( + response.status_code, + url, + )) + sleep(2) + if response.status_code == 403: + response = requests.request(method=method, url=url, timeout=60, allow_redirects=True) + + if response.status_code < 400: + out_file.write(response.content) + + response.close() + out_file.close() + + def _safe_downloader(self, url, file_name, method='get') -> bool: + try: + make_dirs(dirname(file_name)) + url = self.normalize_uri(url) + self._download(file_name, url, method) + except OSError as ex: + self.debug and print(ex) + return False + return True + + def _download_one_file_helper(self, url, dst, callback: callable = None, success_callback: callable = None, + callback_args=()): + r = 0 + while r < self.count_retries: + if self._safe_downloader(url, dst): + if file_size(dst) < 64: + return None + callable(success_callback) and success_callback(dst, *callback_args) + return True + + r += 1 + mode = 'Retry' + if r >= self.count_retries: + mode = 'Skip image' + callable(callback) and callback(text=mode) + return False + + def download_file(self, url: str, + dst: str = None, + idx=-1, + callback: callable = None, + success_callback: callable = None, + callback_args=()) -> bool: + if not dst: + name = basename(remove_file_query_params(url)) + dst = path_join(get_temp_path(), name) + result = self._download_one_file_helper(url, dst, callback, success_callback, callback_args) + if result is None and not self.mute: + self.has_error = True # issue 161 + self.debug and print('\nWarning: 0 bit image downloaded, please check for redirection or broken content', file=stderr) + if ~idx: + self.debug and print('Broken url: %s\nPage idx: %d' % (url, (1 + idx)), file=stderr) + return result + + def normalize_uri(self, uri, referer=None): + if not referer: + referer = self.referer + if isinstance(uri, str): + return normalize_uri(uri.strip(), referer) + return uri + + def multi_download_get(self, urls, dst: str = None, callback: callable = None): + threading = MultiThreads() + for idx, url in enumerate(urls): + threading.add(self.download_file, (url, dst, idx)) + threading.start(callback) + + def get_redirect_url(self, url, **kwargs): + location = self.requests(url=url, method='head', **kwargs) + url = location.headers.get('Location', url) + return self.normalize_uri(url) diff --git a/manga-py-stable_1.x/manga_py/http/auto_proxy.py b/manga-py-stable_1.x/manga_py/http/auto_proxy.py new file mode 100644 index 0000000..afd0aae --- /dev/null +++ b/manga-py-stable_1.x/manga_py/http/auto_proxy.py @@ -0,0 +1,53 @@ +import requests +from lxml.html import document_fromstring + + +class AutoProxy: + checked_url = 'https://httpbin.org/ip' + + @staticmethod + def __strip(text): + return text.text_content().strip(' \n\t\r\0') + + def _s(self, item): + td = item.cssselect('td') + proxy = self.__strip(td[4]) # proxy type + https = self.__strip(td[6]) # https (yes|no) + if ( + proxy == 'anonymous' + or proxy == 'elite proxy' + ) and https == 'yes': + return self.__strip(td[0]) + ':' + self.__strip(td[1]) + return None + + def _test_proxy(self, url): + proxies = { + 'http': url, + 'https': url, + } + try: + requests.head(url=self.checked_url, proxies=proxies, timeout=6) + except Exception: + return False + return proxies + + def _change_checked_url(self, checked_url): + if checked_url: + self.checked_url = checked_url + + def auto_proxy(self, checked_url=None): + self._change_checked_url(checked_url) + url = 'https://www.us-proxy.org' + items = document_fromstring(requests.get(url).text) + items = items.cssselect('#proxylisttable tbody tr') + for n, i in enumerate(items): + proxy = self._s(i) + test = False + if proxy: + test = self._test_proxy(proxy) + if test: + return test + return None + + +auto_proxy = AutoProxy().auto_proxy diff --git a/manga-py-stable_1.x/manga_py/http/google_dcp.py b/manga-py-stable_1.x/manga_py/http/google_dcp.py new file mode 100644 index 0000000..5006411 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/http/google_dcp.py @@ -0,0 +1,33 @@ +import random +import time + +from manga_py.crypt.base_lib import BaseLib +from .request import Request + + +class GoogleDCP: + host = 'proxy.googlezip.net' + authkey = 'ac4500dd3b7579186c1b0620614fdb1f7d61f944' + http = None + + def __init__(self, http: Request): + self.http = http + + def randint(self): + return random.randint(0, 999999999) + + def _build_header(self): + timestamp = int(time.time()) + md5 = BaseLib.md5('{}{}{}'.format(timestamp, self.authkey, timestamp)) + return 'Chrome-Proxy: ps={}-{}-{}-{}, sid={}, c=win, b=3029, p=110'.format( + int(time.time()), + self.randint(), + self.randint(), + self.randint(), + BaseLib.str2hex(md5.hexdigest()) + ) + + def set_proxy(self): + self.http.proxies['http'] = self.host + self.http.headers = self._build_header() + return self.http diff --git a/manga-py-stable_1.x/manga_py/http/multi_threads.py b/manga-py-stable_1.x/manga_py/http/multi_threads.py new file mode 100644 index 0000000..111cc65 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/http/multi_threads.py @@ -0,0 +1,33 @@ +from threading import Thread + + +class MultiThreads: + threads = None + max_threads = 2 + to_run = None + + def __init__(self): + self.threads = [] + self.to_run = [] + try: + import multiprocessing + self.max_threads = multiprocessing.cpu_count() + except Exception: + pass + + def add(self, target: callable, args: tuple): + self.threads.append(Thread(target=target, args=args)) + + def _run_processes(self, callback: callable = None, n: int = None): + for t in self.to_run: + if not n: + t.join() + callback is not None and callback() + + def start(self, callback: callable = None): + for n, t in enumerate(self.threads): # starting all threads + t.start() + self.to_run.append(t) + self._run_processes(callback, (n + 1) % self.max_threads) + self._run_processes(callback) + self.threads = [] diff --git a/manga-py-stable_1.x/manga_py/http/request.py b/manga-py-stable_1.x/manga_py/http/request.py new file mode 100644 index 0000000..ab77a59 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/http/request.py @@ -0,0 +1,171 @@ +import requests + +from .url_normalizer import normalize_uri + + +class Request: + __redirect_base_url = '' + _headers = None + referer = '' + proxies = None + allow_webp = True + user_agent = '{} {} {} {}'.format( + 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)', + 'AppleWebKit/537.36 (KHTML, like Gecko)', + 'Chrome/60.0.3112.101', + 'Safari/537.36' + ) + default_lang = 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3' + cookies = None + kwargs = None + debug = False + response = None + _history = None + allow_send_referer = True + + def __init__(self): + self.proxies = {} + self.cookies = {} + self._history = [] + + def __patch_headers(self, headers): + if isinstance(self._headers, dict): + for i in self._headers: + headers[i] = self._headers[i] + return headers + + def _get_cookies(self, cookies=None): + return cookies if cookies else self.cookies + + def _prepare_redirect_base_url(self, url): + if not self.__redirect_base_url: + self.__redirect_base_url = url + + def _get_kwargs(self): + kwargs = {} + if self.kwargs: + kwargs = self.kwargs + return kwargs + + def __update_cookies(self, r): + _ = r.cookies.get_dict() + for c in _: + self.cookies[c] = _[c] + + def __redirect_helper(self, r, url, method): + proxy = None + location = url + if r.status_code == 303: + method = 'get' + elif r.status_code == 305: + proxy = { + 'http': r.headers['location'], + 'https': r.headers['location'], + } + else: + location = normalize_uri(r.headers['location'], self.__redirect_base_url) + return proxy, location, method + + def _requests_helper( + self, method, url, headers=None, data=None, + max_redirects=10, **kwargs + ) -> requests.Response: + self._prepare_redirect_base_url(url) + headers = self.__patch_headers(headers) + args = { + 'url': url, + 'headers': headers, + 'data': data, + } + self.__set_defaults(args, kwargs) + self.__set_defaults(args, self._get_kwargs()) + args.setdefault('allow_redirects', False) + r = getattr(requests, method)(**args) + self.__update_cookies(r) + if r.is_redirect and method != 'head': + if max_redirects < 1: + self.debug and print(self._history) + raise AttributeError('Too many redirects') + self._history.append(url) + proxy, location, method = self.__redirect_helper(r, url, method) + if proxy: + kwargs['proxies'] = proxy + return self._requests_helper( + method=method, url=location, headers=headers, + data=data, max_redirects=(max_redirects - 1), + **kwargs + ) + return r + + @staticmethod + def __set_defaults(args_orig: dict, args_vars: dict): + for idx in args_vars: + args_orig.setdefault(idx, args_vars[idx]) + + def requests( + self, url: str, headers: dict = None, cookies: dict = None, + data=None, method='get', files=None, timeout=None, **kwargs + ) -> requests.Response: + if not isinstance(headers, dict): + headers = {} + self._history = [] + cookies = self._get_cookies(cookies) + headers.setdefault('User-Agent', self.user_agent) + if self.allow_send_referer and self.referer: + headers.setdefault('Referer', self.referer) + headers.setdefault('Accept-Language', self.default_lang) + if self.allow_webp: + headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=1.0,image/webp,image/apng,*/*;q=1.0' + kwargs.setdefault('proxies', self.proxies) + self.response = self._requests_helper( + method=method, url=url, headers=headers, cookies=cookies, + data=data, files=files, timeout=timeout, + **kwargs + ) + return self.response + + def get(self, url: str, headers: dict = None, cookies: dict = None, **kwargs) -> str: + response = self.requests( + url=url, + headers=headers, + cookies=cookies, + method='get', + **kwargs + ) + text = response.text + response.close() + return text + + def post(self, url: str, headers: dict = None, cookies: dict = None, data: dict = (), files=None, **kwargs) -> str: + response = self.requests( + url=url, + headers=headers, + cookies=cookies, + method='post', + data=data, + files=files, + **kwargs + ) + text = response.text + response.close() + return text + + def reset_proxy(self): + self.proxies = {} + + def set_proxy(self, proxy): + self.reset_proxy() + if isinstance(proxy, dict): + self.proxies['http'] = proxy.get('http', None) + self.proxies['https'] = proxy.get('https', None) + elif isinstance(proxy, str): + self.proxies['http'] = proxy + + def get_base_cookies(self, url: str): + """ + :param url: + :return: + """ + response = self.requests(url=url, method='head') + response.close() + return response.cookies diff --git a/manga-py-stable_1.x/manga_py/http/updater.py b/manga-py-stable_1.x/manga_py/http/updater.py new file mode 100644 index 0000000..b805373 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/http/updater.py @@ -0,0 +1,25 @@ +import json +import webbrowser + +from packaging import version +from requests import get + +from manga_py.meta import __version__, __repo_name__ + + +def check_version(): + api_url = 'https://api.github.com/repos/' + __repo_name__ + '/releases/latest' + api_content = json.loads(get(api_url).text) + tag_name = api_content.get('tag_name', None) + if tag_name and version.parse(tag_name) > version.parse(__version__): + download_addr = api_content['assets'][0] + return tag_name, download_addr['browser_download_url'] + return () + + +def download_update(): + pass + + +def open_browser(url): + webbrowser.open(url) diff --git a/manga-py-stable_1.x/manga_py/http/url_normalizer.py b/manga-py-stable_1.x/manga_py/http/url_normalizer.py new file mode 100644 index 0000000..f0a6e86 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/http/url_normalizer.py @@ -0,0 +1,73 @@ +from urllib.parse import urlparse + + +class UrlNormalizer: + + @staticmethod + def _parse_sheme(parse, base_parse): + if not parse.scheme: + uri = base_parse.scheme + else: + uri = parse.scheme + return uri + '://' + + @staticmethod + def _parse_netloc(parse, base_parse): + if not parse.netloc: + uri = base_parse.netloc + else: + uri = parse.netloc + return uri + + @staticmethod + def _test_path_netloc(parse): + if parse.path.find('://') == 0: + return urlparse('http' + parse.path).path + return parse.path + + @staticmethod + def __parse_rel_path(parse, base_parse): + path = '' + if base_parse.path.rfind('/') > 0: + path = base_parse.path[0:base_parse.path.rfind('/')] + return path.rstrip('/') + '/' + parse.path.lstrip('/') + + @staticmethod + def _parse_path(parse, base_parse): + if parse.netloc: + return parse.path + _path = UrlNormalizer._test_path_netloc(parse) + if _path: + if _path.find('/') == 0: + return _path + else: + return UrlNormalizer.__parse_rel_path(parse, base_parse) + else: + return base_parse.path + + @staticmethod + def _parse_query(parse): + if parse.query: + return '?' + parse.query + return '' + + @staticmethod + def _parse_fragment(parse): + if parse.fragment: + return '#' + parse.fragment + return '' + + @staticmethod + def url_helper(url: str, base_url: str) -> str: + parse = urlparse(url) + base_parse = urlparse(base_url) + un = UrlNormalizer + sheme = un._parse_sheme(parse, base_parse) + netloc = un._parse_netloc(parse, base_parse) + path = un._parse_path(parse, base_parse) + query = un._parse_query(parse) + fragment = un._parse_fragment(parse) + return sheme + netloc + path + query + fragment + + +normalize_uri = UrlNormalizer.url_helper diff --git a/manga-py-stable_1.x/manga_py/http/websocket.py b/manga-py-stable_1.x/manga_py/http/websocket.py new file mode 100644 index 0000000..80494a1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/http/websocket.py @@ -0,0 +1,2 @@ +class WebSocket: + pass diff --git a/manga-py-stable_1.x/manga_py/image.py b/manga-py-stable_1.x/manga_py/image.py new file mode 100644 index 0000000..d608bcc --- /dev/null +++ b/manga-py-stable_1.x/manga_py/image.py @@ -0,0 +1,110 @@ +import imghdr +from os import path + +from PIL import Image as PilImage, ImageChops + + +class Image: + _image = None # type: PilImage + src_path = None # type: str + + def __init__(self, src_path): + """ + :param src_path: + """ + if not path.isfile(src_path): + raise AttributeError('Image not found') + + self.src_path = src_path + self.__open(src_path) + + @property + def image(self) -> PilImage: + return self._image + + @image.setter + def image(self, image: PilImage): + self._image = image + + def __open(self, _path): + """ + :param _path: + :return: + """ + if self.image is None: + self._image = PilImage.open(_path) + + def gray(self, dest_path: str): + """ + :param dest_path: + :return: + """ + try: + image = self.image.convert('LA') + except (ValueError, OSError): + image = self.image.convert('L') + if dest_path is not None: + image.save(dest_path) + return image + + def convert(self, dest_path: str, quality: int = 95): + """ + see http://pillow.readthedocs.io/en/3.4.x/handbook/image-file-formats.html + :param dest_path: + :param quality: + :return: + """ + self.image.save(dest_path, quality=quality) + return dest_path + + def crop_manual_with_offsets(self, offsets, dest_path: str): + """ + :param offsets: + :param dest_path: + :return: + """ + left, upper, right, lower = offsets + width, height = self.image.size + image = self.image.crop(( + left, + upper, + width - right, + height - lower + )) + image.save(dest_path) + + def crop_manual(self, sizes: tuple, dest_path: str): + """ + :param sizes: The crop rectangle, as a (left, upper, right, lower)-tuple. + :param dest_path: + :return: + """ + self.image.crop(sizes).save(dest_path) + + def crop_auto(self, dest_path: str): + """ + :param dest_path: + :return: + """ + bg = PilImage.new( + self.image.mode, + self.image.size, + self.image.getpixel((0, 0)) + ) + diff = ImageChops.difference(self.image, bg) + diff = ImageChops.add(diff, diff, 2.0, -100) + bbox = diff.getbbox() + if bbox: + crop = self.image.crop(bbox) + if dest_path: + crop.save(dest_path) + + def close(self): + self.image is not None and self.image.close() + + @staticmethod + def real_extension(_path): + img = imghdr.what(_path) + if img: + return '.' + img + return None diff --git a/manga-py-stable_1.x/manga_py/info.py b/manga-py-stable_1.x/manga_py/info.py new file mode 100644 index 0000000..d708888 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/info.py @@ -0,0 +1,125 @@ +from argparse import Namespace +from datetime import datetime +from sys import argv +from typing import Union + +from manga_py import meta + + +class Info: + __doc__ = """ + --print-json argument helper + + { + 'site': 'https://example.org/kumo-desu-ga-nani-ka', + 'downloader': [ + 'https://manga-py.com/manga-py/', + 'https://github.com/manga-py/manga-py', + 'https://github.com/yuru-yuri/manga-py', + 'https://yuru-yuri.github.io/manga-py', + ], + 'version': '1.1.4', + 'delta': '0:00:00.003625', + 'start': '2018-06-08 17:22:24.419565', + 'end': '2018-06-08 17:22:24.423190', + 'user_agent': 'Mozilla/5.0', + 'cookies': {'cf_clearance': 'ec-1528654923-86400', '__cfduid': '21528654914'}, + 'args': { + '_raw_params': 'manga-py --cbz https://example.org/kumo-desu-ga-nani-ka', + 'url': 'https://example.org/kumo-desu-ga-nani-ka', + 'name': None, + 'destination': None, + 'no-progress': False, + 'cbz': False, + 'skip-volumes': None, + 'max-volumes': None, + 'user-agent': None, + 'proxy': None, + 'reverse-downloading': None, + 'rewrite-exists-archives': None, + 'no-multi-threads': None, + }, + 'error': False, + 'error_msg': '', + 'volumes': [ + { + 'name': 'Kumo desu ga, nani ka? - 0', + 'path': 'Manga/kumo-desu-ga-nani-ka/vol_000.zip', + }, + { + 'name': 'Kumo desu ga, nani ka? - 1', + 'path': 'Manga/kumo-desu-ga-nani-ka/vol_001.zip', + }, + ], + } + """ + _data = None + _start_time = None + + @staticmethod + def _dt(dt, fmt: str = '%A, %d. %B %Y %H:%M:%S'): + return dt.strftime(fmt) + + def __init__(self, args: Union[Namespace, dict]): # see manga_py.cli arguments + _args = args.__dict__ if args is not dict else args + _args['_raw_params'] = ' '.join(argv) + self._data = { + 'site': args.url, + 'downloader': meta.__downloader_uri__, + 'version': meta.__version__, + 'delta': None, + 'init': self._dt(datetime.now()), + 'start': None, + 'end': None, + 'user_agent': None, + 'cookies': None, + 'args': _args, + 'return_code': 0, + 'error': False, + 'error_msg': None, + 'volumes': [], + } + self._volumes = [] + + def set_ua(self, ua): + self._data['user_agent'] = ua + + def set_error(self, e, rc: int = 1): + self._data['return_code'] = rc + self._data['error'] = e + + def start(self): + self._start_time = datetime.now() + + def set_cookies(self, cookies): + self._data['cookies'] = cookies + + def set_volumes(self, volumes: list): + self._data['volumes'] = volumes + + def set_last_volume_error(self, error_message): + try: + self._data['volumes'][-1]['error'] = True + self._data['volumes'][-1]['error_message'] = error_message + except IndexError: + pass + + def add_volume(self, url: str, path: str, files: list = None): + volume = { + 'url': url, + 'path': path, + 'error': False, + 'error_message': '', + } + + if files is not None: + volume['files'] = files + volume['num_files'] = len(files) + + self._data['volumes'].append(volume) + + def get(self): + self._data['delta'] = str(datetime.now() - self._start_time) + self._data['start'] = self._dt(self._start_time) + self._data['end'] = self._dt(datetime.now()) + return self._data diff --git a/manga-py-stable_1.x/manga_py/meta.py b/manga-py-stable_1.x/manga_py/meta.py new file mode 100644 index 0000000..d60dc07 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/meta.py @@ -0,0 +1,3 @@ +__version__ = '1.11.0' +__repo_name__ = 'manga-py/manga-py' +__downloader_uri__ = 'https://github.com/%s' % __repo_name__ diff --git a/manga-py-stable_1.x/manga_py/parser.py b/manga-py-stable_1.x/manga_py/parser.py new file mode 100644 index 0000000..9a11dd2 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/parser.py @@ -0,0 +1,45 @@ +from argparse import ArgumentParser + +from loguru import logger + +from .info import Info +from .providers import get_provider + + +class Parser: + params = None + provider = None + + def __init__(self, args): + self.params = {} + self.args = args + self._add_params(args) + + def _add_params(self, params: ArgumentParser = None): + if params is None: + params = self.args.parse_args() + else: + params = params.parse_args() + self.params = params.__dict__ + + def init_provider( + self, + progress: callable = None, + log: callable = None, + quest: callable = None, + info: Info = None, + quest_password: callable = None, + ): + provider = get_provider(self.params.get('url', '')) + if isinstance(provider, bool): + raise AttributeError('Provider not found') + self.provider = provider(info) # provider __init__ + + self.provider.set_progress_callback(None if self.params['quiet'] else progress) + self.provider.set_log_callback(log) + self.provider.set_quest_callback(quest) + self.provider.set_quest_password_callback(quest_password) + + @logger.catch + def start(self): + self.provider.process(self.params['url'], self.params) diff --git a/manga-py-stable_1.x/manga_py/provider.py b/manga-py-stable_1.x/manga_py/provider.py new file mode 100644 index 0000000..0f519c3 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/provider.py @@ -0,0 +1,274 @@ +import json +import re +from abc import ABC +from sys import stderr + +from .base_classes import ( + Abstract, + Archive, + Base, + Callbacks, + # TODO + CloudFlareProtect, + Static +) +from .fs import ( + get_temp_path, + is_file, + basename, + remove_file_query_params, + path_join, + unlink, + file_size, +) +from .http import MultiThreads +from .info import Info +from .meta import __downloader_uri__ +from .meta import __version__ + + +class Provider(Base, Abstract, Static, Callbacks, ABC): + _volumes_count = 0 + _archive = None + _zero_fill = False + _with_manga_name = False + _info = None + _simulate = False + _volume = None + _show_chapter_info = False + __debug = False + _override_name = '' + + def __init__(self, info: Info = None): + super().__init__() + self.re = re + self.json = json + self._params['temp_directory'] = get_temp_path() + self._info = info + + def _params_parser(self, params): + # image params + self._set_if_not_none(self._image_params, 'crop_blank', params.get('crop_blank', False)) + self._set_if_not_none( + self._image_params, 'crop', + (params.get('xt', 0), + params.get('xr', 0), + params.get('xb', 0), + params.get('xl', 0)), + ) + self._image_params['no_webp'] = params.get('no_webp', False) + # downloading params + self._set_if_not_none(self._params, 'destination', params.get('destination', None)) + self._zero_fill = params.get('zero_fill') + self._with_manga_name = params.get('with_manga_name') + self._simulate = params.get('simulate') + self._show_chapter_info = params.get('show_current_chapter_info', False) + self.__debug = params.get('debug', False) + self._override_name = self._params.get('override_archive_name') + if self._with_manga_name and self._override_name: + raise RuntimeError('Conflict of parameters. Please use only --with-manga-name, or --override-archive-name') + + def process(self, url, params=None): # Main method + self._params['url'] = url + params = params if isinstance(params, dict) else {} + self._params_parser(params) + for i in params: + self._params.setdefault(i, params[i]) + + proxy = params.get('proxy', None) + if proxy is not None: + self._storage['proxies'] = { + 'http': proxy, + 'https': proxy, + } + + self.prepare_cookies() + self._storage['manga_name'] = self.get_manga_name() + self._storage['main_content'] = self.content + self._storage['chapters'] = self._prepare_chapters(self.get_chapters()) + + if not self._params.get('reverse_downloading', False): + self._storage['chapters'] = self._storage['chapters'][::-1] + + self._storage['init_cookies'] = self._storage['cookies'] + self._info and self._info.set_ua(self.http().user_agent) + + self.loop_chapters() + + def _check_archive(self): + # check + _path = self.get_archive_path() + not_allow_archive = not self._params.get('rewrite_exists_archives', False) + + return not_allow_archive and is_file(_path) + + def _download_chapter(self): + if not self._simulate: + try: + self.before_download_chapter() + self._storage['files'] = self.get_files() + self.loop_files() + except Exception as e: + # Main debug here + if self.__debug: + raise e + self.log([e], file=stderr) + self._info.set_last_volume_error(e) + + def loop_chapters(self): + volumes = self._storage['chapters'] + _min = self._params.get('skip_volumes', 0) + _max = self._params.get('max_volumes', 0) + count = 0 # count downloaded chapters + for idx, __url in enumerate(volumes): + self.chapter_id = idx + if idx < _min or (count >= _max > 0) or self._check_archive(): + continue + count += 1 + self._info.add_volume(self.chapter_for_json(), self.get_archive_path()) + self._download_chapter() + + def loop_files(self): + if isinstance(self._storage['files'], list): + if self._show_chapter_info: + self.log('\n\nCurrent chapter url: %s\n' % (self.chapter,)) + if len(self._storage['files']) == 0: + # see Std + self.log('Error processing file: %s' % self.get_archive_name(), file=stderr) + return + self._archive = Archive() + self._archive.not_change_files_extension = self._params.get('not_change_files_extension', False) + self._archive.no_webp = self._image_params.get('no_webp', False) + self._call_files_progress_callback() + + self._multi_thread_save(self._storage['files']) + + self.make_archive() + + def _save_file_params_helper(self, url, idx): + if url is None: + _url = self.http().normalize_uri(self.get_current_file()) + else: + _url = url + _url = self.before_file_save(_url, idx) + filename = remove_file_query_params(basename(_url)) + _path = self.remove_not_ascii(self._image_name(idx, filename)) + _path = get_temp_path(_path) + return _path, idx, _url + + def save_file(self, idx=None, callback=None, url=None, in_arc_name=None): + _path, idx, _url = self._save_file_params_helper(url, idx) + + if not is_file(_path) or file_size(_path) < 32: + self.http().download_file(_url, _path, idx) + self.after_file_save(_path, idx) + self._archive.add_file(_path) + + callable(callback) and callback() + + return _path + + def get_archive_path(self): + if self._override_name: + _path = "{}_{}".format(self._override_name, str(self.normal_arc_name(self.get_chapter_index().split('-')))) + else: + # see Std + _path = remove_file_query_params(self.get_archive_name()) + _path = self.remove_not_ascii(_path) + + if not _path: + _path = str(self.chapter_id) + + name = self._params.get('name', '') + if not len(name): + name = self._storage['manga_name'] + + additional_data_name = '' + if self.http().has_error: + additional_data_name = 'ERROR.' + self.http().has_error = False + + return path_join( + self._params.get('destination', 'Manga'), + name, + _path + '.%s%s' % (additional_data_name, self._archive_type()) + ) \ + .replace('?', '_') \ + .replace('"', '_') \ + .replace('>', '_') \ + .replace('<', '_') \ + .replace('|', '_') # Windows... + + def make_archive(self): + _path = self.get_archive_path() + + info = 'Site: {}\nDownloader: {}\nVersion: {}'.format(self.get_url(), __downloader_uri__, __version__) + + # """ + # make book info + # """ + # if self._params['cbz']: + # self._archive.add_book_info(self._arc_meta_info()) + + self._archive.add_info(info) + try: + self._archive.make(_path) + except OSError as e: + self.log('') + self.log(e) + self.log(e, file=stderr) + self._info.set_last_volume_error(str(e)) + unlink(_path) + raise e + + def html_fromstring(self, url, selector: str = None, idx: int = None): + params = {} + if isinstance(url, dict): + params = url['params'] + url = url['url'] + return self.document_fromstring(self.http_get(url, **params), selector, idx) + + def _multi_thread_callback(self): + self._call_files_progress_callback() + self._storage['current_file'] += 1 + + def _multi_thread_save(self, files): + threading = MultiThreads() + # hack + self._storage['current_file'] = 0 + if self._params.get('max_threads', None) is not None: + threading.max_threads = int(self._params.get('max_threads')) + for idx, url in enumerate(files): + threading.add(self.save_file, (idx, self._multi_thread_callback, url, None)) + + threading.start() + + def cf_protect(self, url): + """ + WARNING! Thins function replace cookies! + :param url: str + :return: + """ + cf = CloudFlareProtect() + params = cf.run(url) + if len(params): + self.update_cookies(params[0]) + self.update_ua(params[1]) + self._params['cf-protect'] = True + + def update_ua(self, ua): + self._storage['user_agent'] = ua + self.http().user_agent = ua + self._info and self._info.set_ua(ua) + + def update_cookies(self, cookies): + for k in cookies: + self._storage['cookies'][k] = cookies[k] + self.http().cookies[k] = cookies[k] + + @property + def content(self): + content = self._storage.get('main_content', None) + if content is None: + content = self.get_main_content() + return content diff --git a/manga-py-stable_1.x/manga_py/providers/1stkissmanga_com.py b/manga-py-stable_1.x/manga_py/providers/1stkissmanga_com.py new file mode 100644 index 0000000..8333747 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/1stkissmanga_com.py @@ -0,0 +1,12 @@ +from .rawdevart_com import RawDevArtCom + + +class FirstKissMangaCom(RawDevArtCom): + _chapter_selector = r'/manga/[^/]+/chapter-(\d+(?:-\d+)?)' + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.page-break img[data-src]', attr='data-src') + + +main = FirstKissMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/3asq_info.py b/manga-py-stable_1.x/manga_py/providers/3asq_info.py new file mode 100644 index 0000000..ee60e20 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/3asq_info.py @@ -0,0 +1,24 @@ +from .authrone_com import AuthroneCom +from .helpers.std import Std + + +class ThreeAsqInfo(AuthroneCom, Std): + _ch_selector = '.mng_det ul.lst > li > a.lst' + + def get_chapter_index(self) -> str: + return self.re.search( + r'\.info/[^/]+/([^/]+)', + self.chapter + ).group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'\.info/([^/]+)') + + def get_files(self): + return list(set(super().get_files())) # remove doubles + + +main = ThreeAsqInfo diff --git a/manga-py-stable_1.x/manga_py/providers/7sama_com.py b/manga-py-stable_1.x/manga_py/providers/7sama_com.py new file mode 100644 index 0000000..6088efd --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/7sama_com.py @@ -0,0 +1,69 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class SevenSamaCom(Provider, Std): + + def get_archive_name(self) -> str: + self._vol_fill = True + name = self.re.sub('[^a-zA-Z0-9]+', '_', self.chapter['chapter_name']) + return self.normal_arc_name([ + self.chapter['number'], + str(self.chapter_id), + name + ]) + + def get_chapter_index(self) -> str: + return self.chapter_id + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + idx = self.re.search(r'/manga/.+?/(\d+)', self.get_url()).group(1) + chapters = [] + for i in range(1, 1000): + content = self.http_get('{}/series/chapters_list.json?page={}&id_serie={}'.format( + self.domain, i, idx + ), {'x-requested-with': 'XMLHttpRequest'}) + data = self.json.loads(content) + if data['chapters'] is False: + break + chapters += self.__prepare_chapters(data['chapters']) + return chapters + + @staticmethod + def __prepare_chapters(items): + chapters = [] + for i in items: + for k, j in i['releases'].items(): + chapter = i.copy() + chapter['release'] = j + chapter['release_id'] = k + chapters.append(chapter) + return chapters + + def get_files(self): + url = self.chapter_for_json() + content = self.http_get('{}{}'.format(self.domain, url)) + api_key = self.re.search(r'this\.page\.identifier\s*=\s*[\'"](.+)[\'"]', content).group(1) + url = '{}/leitor/pages/{}.json?key={}'.format( + self.domain, self.chapter['release']['id_release'], api_key + ) + images = self.json.loads(self.http_get(url, {'x-requested-with': 'XMLHttpRequest'})) + return images['images'] + + def get_cover(self) -> str: + return self._cover_from_content('.cover img.cover') + + def book_meta(self) -> dict: + pass + + def chapter_for_json(self) -> str: + return self.chapter['release']['link'] + + +main = SevenSamaCom diff --git a/manga-py-stable_1.x/manga_py/providers/8muses_com.py b/manga-py-stable_1.x/manga_py/providers/8muses_com.py new file mode 100644 index 0000000..398a0c3 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/8muses_com.py @@ -0,0 +1,67 @@ +from manga_py.provider import Provider +from .helpers import eight_muses_com +from .helpers.std import Std + + +class EightMusesCom(Provider, Std): + _chapters = None + chapter_selector = '.gallery a.c-tile[href^="/comics/"]' + helper = None + _images_path = 'image/fl' + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/(?:album|picture)/([^/]+/[^/]+(?:/[^/]+)?)') + ch = self.chapter + if isinstance(ch, list) and len(ch) > 0: + ch = ch[0] + if isinstance(ch, dict): + ch = ch.get('href') + idx = re.search(ch).group(1) + return '-'.join(idx.split('/')) + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self._get_name('/album/([^/]+)') + + def get_chapters(self): + chapters = self._elements(self.chapter_selector) + return self.helper.chapters(chapters) + + def _parse_images(self, images) -> list: + return ['{}/{}/{}'.format( + self.domain, + self._images_path, + i.get('value') + ) for i in images if i.get('value')] + + @staticmethod + def _sort(items: dict) -> list: + items = [items[i] for i in sorted(items, key=lambda x: int(x)) if len(items[i]) > 5] + return list(set(items)) + + def get_files(self): + images = {} + _n = self.http().normalize_uri + for n, i in enumerate(self.chapter): + if n % 4 < 2: + img = self.html_fromstring(_n(i.get('href')), '#imageName,#imageNextName') + images[str(n)] = img[0] + images[str(n + 2)] = img[1] + return self._parse_images(self._sort(images)) + + def get_cover(self) -> str: + pass + + def prepare_cookies(self): + self._chapters = [] + self._base_cookies() + self.helper = eight_muses_com.EightMusesCom(self) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = EightMusesCom diff --git a/manga-py-stable_1.x/manga_py/providers/README.md b/manga-py-stable_1.x/manga_py/providers/README.md new file mode 100644 index 0000000..ffc4dca --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/README.md @@ -0,0 +1,63 @@ +# All providers + +### For template example, see _template.py + +## Functions: + +```python +from manga_py.provider import Provider +# from .helpers.std import Std + + +class _Template(Provider): +# class _Template(Provider, Std): # extends utils + + def get_archive_name(self) -> str: + pass + + def get_chapter_index(self) -> str: + pass + + def get_main_content(self): # call once +# return self._get_content('{}/manga/{}') + pass + + def prepare_cookies(self): # if site with cookie protect +# self._storage['proxies'] = auto_proxy() + +# self._storage['cookies'] = self.http().get_base_cookies(self.get_url()).get_dict() # base cookies + pass + + def get_manga_name(self) -> str: +# return self._get_name('/manga/([^/]+)') + return '' + + def get_chapters(self): # call once + # return self._elements('a.chapter') + return [] + + def get_files(self): # call ever volume loop + return [] + + def get_cover(self) -> str: + # return self._cover_from_content('.cover img') + pass + + def book_meta(self) -> dict: + """ + :see http://acbf.wikia.com/wiki/Meta-data_Section_Definition + :return { + 'author': str, + 'title': str, + 'annotation': str, + 'keywords': str, + 'cover': str, + 'rating': str, + } + """ + pass + + +main = _Template + +``` \ No newline at end of file diff --git a/manga-py-stable_1.x/manga_py/providers/_3asq_org.py b/manga-py-stable_1.x/manga_py/providers/_3asq_org.py new file mode 100644 index 0000000..6b10d54 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/_3asq_org.py @@ -0,0 +1,18 @@ +from .rawdevart_com import RawDevArtCom + + +class ThreeAsqOrg(RawDevArtCom): + + def get_chapter_index(self) -> str: + return self.chapter.split('/')[-2] + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, 'img.wp-manga-chapter-img') + + @property + def chapter(self): + return super().chapter + '?style=list' + + +main = ThreeAsqOrg diff --git a/manga-py-stable_1.x/manga_py/providers/__init__.py b/manga-py-stable_1.x/manga_py/providers/__init__.py new file mode 100644 index 0000000..a4cf5d7 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/__init__.py @@ -0,0 +1,877 @@ +import re +import importlib + +providers_list = { + '1stkissmanga_com': [ + r'1stkissmanga\.com/manga/.', + ], + '3asq_info': [ + r'3asq\.info/.', + ], + '_3asq_org': [ + r'3asq\.org/.', + ], + '7sama_com': [ + r'7sama\.com/manga/.', + ], + # '8muses_com': [ + # r'8muses\.com/comics/album/.', + # ], + 'ac_qq_com': [ + r'ac\.qq\.com/Comic.+?/id/\d', + ], + 'acomics_ru': [ + r'acomics\.ru/~.', + ], + 'adulto_seinagi_org': [ + r'adulto\.seinagi\.org/(series|read)/.', + r'xanime-seduccion\.com/(series|read)/.', + r'twistedhelscans\.com/(series|read)/.', + r'reader\.evilflowers\.com/(series|read)/.', + ], + 'allhentai_ru': [ + r'allhentai\.ru/.', + ], + 'animextremist_com': [ + r'animextremist\.com/mangas-online/.', + ], + 'antisensescans_com': [ + r'antisensescans\.com/online/(series|read)/.', + ], + 'asmhentai_com': [ + r'asmhentai\.com/(g|gallery)/\d', + ], + 'atfbooru_ninja': [ + r'atfbooru\.ninja/posts.', + ], + # 'authrone_com': [ + # r'authrone\.com/manga/.', + # ], + 'bato_to': [ + r'bato\.to/(series|chapter)/\d', + ], + 'blogtruyen_com': [ + r'blogtruyen\.com/.', + ], + 'bns_shounen_ai_net': [ + r'bns\.shounen-ai\.net/read/(series|read)/.', + ], + 'boredomsociety_xyz': [ + r'boredomsociety\.xyz/(titles/info|reader)/\d', + ], + 'cdmnet_com_br': [ + r'cdmnet\.com\.br/titulos/.', + ], + 'chochox_com': [ + r'chochox\.com/.', + ], + 'choutensei_260mb_net': [ + r'choutensei\.260mb\.net/(series|read)/.', + ], + 'comicextra_com': [ + r'comicextra\.com/.', + ], + # 'comico_co_id_content': [ + # r'comico\.co\.id/content\?contentId=\d', + # ], + 'comico_co_id_titles': [ + r'comico\.co\.id/titles/\d', + ], + 'comic_webnewtype_com': [ + r'comic\.webnewtype\.com/contents/.', + ], + 'comico_jp': [ + r'comico\.jp(?:/challenge)?/(detail|articleList).+titleNo.', + ], + 'comicsandmanga_ru': [ + r'comicsandmanga\.ru/online-reading/.', + ], + 'comicvn_net': [ + r'comicvn\.net/truyen-tranh-online/.', + ], + 'cycomi_com': [ + r'cycomi\.com/fw/cycomibrowser/chapter/title/\d', + ], + 'danbooru_donmai_us': [ + r'danbooru\.donmai\.us/posts.', + ], + 'darkskyprojects_org': [ + r'darkskyprojects\.org/biblioteca/.', + ], + 'dejameprobar_es': [ + r'dejameprobar\.es/slide/.', + r'menudo-fansub\.com/slide/.', + r'npscan\.mangaea\.net/slide/.', + r'snf\.mangaea\.net/slide/.', + r'yuri-ism\.net/slide/.', + ], + 'desu_me': [ + r'desu\.me/manga/.', + ], + 'digitalteam1_altervista_org': [ + r'digitalteam1\.altervista\.org/reader/read/.', + ], + # 'dm5_com': [ + # r'dm5\.com/manhua-.', + # ], + 'doujins_com': [ + r'doujins\.com/gallery/.', + r'doujin-moe\.us/gallery/.', + ], + 'e_hentai_org': [ + r'e-hentai\.org/g/\d', + ], + 'exhentai_org': [ + r'exhentai\.org/g/\d', + ], + 'fanfox_net': [ + r'fanfox\.net/manga/.', + ], + 'freeadultcomix_com': [ + r'freeadultcomix\.com/.', + ], + 'freemanga_to': [ + r'freemanga\.to/(manga|chapter)/.', + ], + 'funmanga_com': [ + r'funmanga\.com/.', + ], + 'gmanga_me': [ + r'gmanga\.me/mangas/.', + ], + 'gomanga_co': [ + r'gomanga\.co/reader/.', + r'jaiminisbox\.com/reader/.', + r'kobato\.hologfx\.com/reader/.', + # r'atelierdunoir\.org/reader/.', + r'seinagi\.org/reader/.', + ], + 'goodmanga_net': [ + r'goodmanga\.net/.', + ], + 'helveticascans_com': [ + r'helveticascans\.com/r/(series|read)/.', + ], + 'hakihome_com': [ + r'hakihome\.com/.', + ], + 'hatigarmscans_eu': [ + r'hatigarmscans\.eu/hs/(series|read).', + r'hatigarmscans\.net/hs/(series|read).', + r'hatigarmscans\.net/manga/.', + ], + 'heavenmanga_biz': [ + r'heavenmanga\.\w+/.', + ], + 'hentai2read_com': [ + r'hentai2read\.com/.', + ], + 'hentai_cafe': [ + r'hentai\.cafe/.', + ], + 'hentai_chan_me': [ + r'hentai-chan\.me/(related|manga|online)/.', # todo + ], + 'hentai_image_com': [ + r'hentai-image\.com/image/.', + ], + 'hentaihand_com': [ + r'hentaihand\.com/comic/\d', + ], + 'hentaifox_com': [ + r'hentaifox\.com/.', + ], + 'hentaihere_com': [ + r'hentaihere\.com/m/.', + ], + 'hentaiporns_net': [ + r'hentaiporns\.net/.' + ], + 'hentairead_com': [ + r'hentairead\.com/.', + ], + 'hitomi_la': [ + r'hitomi\.la/(galleries|reader)/.', + ], + 'hgamecg_com': [ + r'hgamecg\.com/index/category/\d', + ], + 'hitmanga_eu': [ + r'hitmanga\.eu/.', + r'mymanga\.io/.', + ], + 'hocvientruyentranh_com': [ + r'hocvientruyentranh\.com/(manga|chapter)/.', + ], + 'hoducomics_com': [ + r'hoducomics\.com/webtoon/list/\d', + r'hodu1\.com/webtoon/list/\d', + ], + 'hotchocolatescans_com': [ + r'hotchocolatescans\.com/fs/(series|read)/.', + r'mangaichiscans\.mokkori\.fr/fs/(series|read)/.', + r'taptaptaptaptap\.net/fs/(series|read)/.', + ], + 'riceballicious_info': [ + r'riceballicious\.info/fs/reader/(series|read)/.', + ], + 'rocaca_com': [ + r'rocaca\.com/manga/.', + ], + 'inmanga_com': [ + r'inmanga\.com/ver/manga/.', + ], + 'isekaiscan_com': [ + r'isekaiscan\.com/manga/.', + ], + 'japscan_com': [ + r'japscan\.to/.', + ], + 'jurnalu_ru': [ + r'jurnalu\.ru/online-reading/.', + ], + 'kissmanga_com': [ + r'kissmanga\.com/Manga/.', + ], + 'komikcast_com': [ + r'komikcast\.com/.', + ], + 'komikid_com': [ + r'komikid\.com/manga/.', + r'mangazuki\.co/manga/.', + r'mangaforest\.com/manga/.', + r'mangadenizi\.com/.', + r'mangadoor\.com/manga/.', + r'manga\.fascans\.com/manga/.', + r'mangadesu\.net/manga/.', + r'mangahis\.com/manga/.', + r'cmreader\.info/manga/.', + r'rawmangaupdate\.com/manga/.', + r'mangaraw\.online/manga/.', + r'manhua-tr\.com/manga/.', + r'manga-v2\.mangavadisi\.org/manga/.', + r'universoyuri\.com/manga/.', + r'digitalteam1\.altervista\.org/manga/.', + # r'sosscanlation\.com/manga/.', + r'komikgue\.com/manga/.', + r'onma\.me/manga/.', + ], + 'kumanga_com': [ + r'kumanga\.com/manga/\d', + ], + 'lector_kirishimafansub_com': [ + r'lector\.kirishimafansub\.com/(lector/)?(series|read)/.', + ], + 'leitor_net': [ + r'leitor\.net/manga/.', + ], + 'leomanga_com': [ + r'leomanga\.com/manga/.', + ], + 'leviatanscans_com': [ + r'leviatanscans\.com/comics/\d' + ], + 'lhtranslation_com': [ + r'read\.lhtranslation\.com/(truyen|manga)-.', + r'lhtranslation\.net/(truyen|manga)-.', + ], + 'lolibooru_moe': [ + r'lolibooru\.moe/post.', + ], + 'lolivault_net': [ + r'lolivault\.net/online/(series|read).', + ], + 'luscious_net': [ + r'luscious\.net/.+/album/.', + r'luscious\.net/albums/.', + ], + 'mangapark_org': [ + r'mangapark\.org/(series|chapter)/', # is different! + ], + 'mang_as': [ + r'mang\.as/manga/.', + ], + 'manga_ae': [ + r'mangaae\.com/.', + ], + 'manga_fox_com': [ + r'manga-fox\.com/.', + r'manga-here\.io/.', + ], + 'manga_mexat_com': [ + r'manga\.mexat\.com/category/.', + ], + 'manga_online_biz': [ + r'manga-online\.biz/.', + ], + 'manga_online_com_ua': [ + r'manga-online\.com\.ua/.+html', + ], + 'manga_room_com': [ + r'manga-room\.com/manga/.', + ], + 'manga_sh': [ + r'manga\.sh/comics/.', + ], + 'manga_tube_me': [ + r'manga-tube\.me/series/.', + ], + 'mangaarabteam_com': [ + r'mangaarabteam\.com/.', + ], + 'manga_tr_com': [ + r'manga-tr\.com/(manga|id)-.', + ], + 'mangabat_com': [ + r'mangabat\.com/(manga|chapter)/.', + ], + 'mangabb_co': [ + r'mangabb\.co/.', + ], + 'mangabox_me': [ + r'mangabox\.me/reader/.', + ], + 'mangachan_me': [ + r'mangachan\.me/(related|manga|online)/.', + r'yaoichan\.me/(manga|online).', + ], + 'mangachan_me_download': [ + r'mangachan\.me/download/.', + r'hentai-chan\.me/download/.', + r'yaoichan\.me/download/.', + ], + 'mangacanblog_com': [ + r'mangacanblog\.com/.', + ], + 'mangaclub_ru': [ + r'mangaclub\.ru/.', + ], + 'mangadeep_com': [ + r'mangadeep\.com/.', + r'manga99\.com/.', + ], + 'mangadex_org': [ + r'mangadex\.org/manga/.', + ], + 'mangadex_com': [ + r'mangadex\.com/(title|chapter)/.', + r'mangadex\.org/(title|chapter)/.', + ], + 'mangadex_info': [ + r'mangadex\.info/manga/.', + ], + 'mangaeden_com': [ + r'mangaeden\.com/[^/]+/[^/]+-manga/.', + r'perveden\.com/[^/]+/[^/]+-manga/.', + ], + 'mangafans_us': [ # MangaNeloCom + r'mangafans\.us/manga/.', + r'mangahot\.org/read-manga/.', + ], + # 'mangaforall_com': [ + # r'mangaforall\.com/m/.', + # ], + 'mangafreak_net_download': [ + r'mangafreak\.net/Manga/.', + ], + 'mangafull_org': [ + r'mangafull\.org/manga/.', + ], + # 'mangago_me': [ + # r'mangago\.me/read-manga/.', + # ], + 'mangahasu_se': [ + r'mangahasu\.se/.', + ], + 'mangaheaven_club': [ + r'mangaheaven\.club/read-manga/.', + ], + 'mangaheaven_xyz': [ + r'mangaheaven\.xyz/manga/.', + ], + 'mangahere_cc': [ + r'mangahere\.co/manga/.', + r'mangahere\.cc/manga/.', + ], + 'mangahi_net': [ + r'mangahi\.net/.', + ], + 'mangaid_me': [ + r'mangaid\.co/manga/.', + r'mangaid\.net/manga/.', + r'mangaid\.me/manga/.', + ], + 'mangahome_com': [ + r'mangahome\.com/manga/.', + ], + 'mangahub_io': [ + r'mangahub\.io/(manga|chapter)/.', + # r'mangareader\.site/(manga|chapter)/.', + r'mangakakalot\.fun/(manga|chapter)/.', + r'mangahere\.onl/(manga|chapter)/.', + ], + 'mangahub_ru': [ + r'mangahub\.ru/.', + ], + 'mangaindo_web_id': [ + r'mangaindo\.web\.id/.', + ], + 'mangainn_net': [ + r'mangainn\.net/.', + ], + 'mangajinnofansub_com': [ # normal + r'mangajinnofansub\.com/lector/(series|read)/.', + ], + 'mangakakalot_com': [ + r'mangakakalot\.com/(manga|chapter)/.', + ], + 'mangakatana_com': [ + r'mangakatana\.com/manga/.', + ], + 'mangaku_web_id': [ + # r'mangaku\.web\.id/.', + r'mangaku\.in/.', + ], + 'mangalib_me': [ + r'mangalib\.me/.', + ], + 'mangalife_us': [ + r'mangalife\.us/(read-online|manga)/.', + ], + 'mangamew_com': [ + r'mangamew\.com/(\w+-)?manga/.', + ], + 'mangamew_com_vn': [ + r'mangamew\.com/(\w+-)?truyen/.', + ], + 'manganelo_com': [ + r'manganelo\.com/(manga|chapter)/.', + ], + 'mangaon_net': [ + r'mangaon\.net/(manga-info|read-online)/.', + ], + 'mangaonline_com_br': [ + r'mangaonline\.com\.br/.', + ], + 'mangaonline_today': [ + r'mangaonline\.today/.', + ], + 'mangaonlinehere_com': [ + r'mangaonlinehere\.com/(manga-info|read-online)/.', + ], + 'mangapanda_com': [ + r'mangapanda\.com/.', + ], + 'mangapark_me': [ + r'mangapark\.me/manga/.', + ], + 'mangareader_net': [ + r'mangareader\.net/.', + ], + 'mangareader_site': [ + r'mangareader\.site', + ], + 'mangareader_xyz': [ + r'mangareader\.xyz/manga/.', + r'mangareader\.xyz/.+?/chapter-\d', + # r'mangafox\.cc/manga/.', + # r'mangafox\.cc/.+?/chapter-\d', + ], + 'mangarock_com': [ + r'mangarock\.com/manga/.', + ], + 'mangarussia_com': [ + r'mangarussia\.com/(manga|chapter)/.', + ], + 'mangasaurus_com': [ + r'mangasaurus\.com/(manga|view).', + ], + 'mangaseeonline_us': [ + r'mangaseeonline\.us/(read-online|manga)/.', + ], + 'mangashiro_net': [ + r'mangashiro\.net/.', + ], + 'mangasupa_com': [ + r'mangasupa\.com/(manga|chapter)/.', + ], + 'mangasushi_net': [ + r'mangasushi\.net/manga/.', + ], + 'mangatail_com': [ + r'mangasail\.com/(manga|chapter|node|content)/.', + r'mangasail\.co/(manga|chapter|node|content)/.', + r'mangatail\.me/(manga|chapter|node|content)/.', + ], + 'mangatown_com': [ + r'mangatown\.com/manga/.', + ], + 'mangatrue_com': [ + r'mangatrue\.com/manga/.', + r'mangaall\.com/manga/.', + ], + 'mangawindow_net': [ + r'mangawindow\.net/(series|chapter)/\d', # is different! + ], + 'mangax_net': [ + r'mangax\.net/\w/.', + ], + 'mangazuki_me': [ + r'mangazuki\.me/manga/.', + r'mangazuki\.info/manga/.', + r'mangazuki\.online/manga/.', + ], + 'manhuagui_com': [ + r'manhuagui\.com/comic/\d', + ], + 'manhuatai_com': [ + r'manhuatai\.com/.', + ], + 'manhwa_co': [ + r'manhwa\.co/.', + ], + # 'manhwahentai_com': [ + # r'manhwahentai\.com/manhwa/.' + # ], + 'merakiscans_com': [ + r'merakiscans\.com/manga/.', + ], + 'mintmanga_com': [ + r'mintmanga\.com/.', + ], + 'mngcow_co': [ + r'mngcow\.co/.', + ], + 'mngdoom_com': [ + r'mangadoom\.co/.', + r'mngdoom\.com/.', + ], + 'mymangalist_org': [ + r'mymangalist.org/(read|chapter)-', + ], + 'myreadingmanga_info': [ + r'myreadingmanga\.info/.', + ], + 'neumanga_tv': [ + r'neumanga\.tv/manga/.', + ], + 'nhentai_net': [ + r'nhentai\.net/g/.', + ], + 'niadd_com': [ + r'niadd\.com/manga/.', + ], + 'nightow_net': [ + r'nightow\.net/online/\?manga=.', + ], + 'nineanime_com': [ + r'nineanime\.com/manga/.+\.html' + ], + 'ninemanga_com': [ + r'ninemanga\.com/(manga|chapter).', + r'addfunny\.com/(manga|chapter).', + ], + 'noranofansub_com': [ + r'noranofansub\.com(/lector)?/(series/|read/)?.', + ], + 'nozominofansub_com': [ # mangazuki_co + r'nozominofansub\.com/public(/index\.php)?/manga/.', + r'godsrealmscan\.com/public(/index\.php)?/manga/.', + ], + # 'nude_moon_me': [ + # r'nude-moon\.me/\d', + # ], + 'otakusmash_com': [ + r'otakusmash\.com/.', + r'mrsmanga\.com/.', + r'mentalmanga\.com/.', + r'mangasmash\.com/.', + r'omgbeaupeep\.com/comics/.', + ], + 'otscans_com': [ + r'otscans\.com/foolslide/(series|read)/.', + ], + 'pecintakomik_com_manga': [ + r'pecintakomik\.com/manga/.', + ], + 'pecintakomik_com': [ + r'pecintakomik\.com/.', + ], + 'plus_comico_jp_manga': [ + r'plus\.comico\.jp/manga/\d', + ], + 'plus_comico_jp': [ + r'plus\.comico\.jp/store/\d', + ], + 'porncomix_info': [ + r'porncomix\.info/.', + ], + 'psychoplay_co': [ + r'psychoplay\.co/(series|read)/.', + ], + 'puzzmos_com': [ + r'puzzmos\.com/manga/.', + ], + r'pururin_io': [ + r'pururin\.io/(gallery|read)/.', + ], + 'pzykosis666hfansub_com': [ + r'pzykosis666hfansub\.com/online/.', + ], + 'ravens_scans_com': [ + r'ravens-scans\.com(/lector)?/(serie/|read/).', + ], + 'raw_senmanga_com': [ + r'raw\.senmanga\.com/.', + ], + 'rawdevart_com': [ + r'rawdevart\.com/manga/.', + ], + 'rawlh_com': [ + r'lhscan\.net/(truyen|manga|read)-.', + r'rawqq\.com/(truyen|manga|read)-.', + r'rawqv\.com/(truyen|manga|read)-.', + ], + 'rawneko_com': [ + r'rawneko\.com/manga/.', + ], + 'read_egscans_com': [ + r'read\.egscans\.com/.', + ], + 'read_powermanga_org': [ + r'lector\.dangolinenofansub\.com/(series|read)/.', + r'read\.powermanga\.org/(series|read)/.', + # r'read\.yagami\.me/(series|read)/.', + r'reader\.kireicake\.com/(series|read)/.', + r'reader\.shoujosense\.com/(series|read)/.', + r'reader\.whiteoutscans\.com/(series|read)/.', + r'slide\.world-three\.org/(series|read)/.', + r'manga\.animefrontline\.com/(series|read)/.', + r'reader\.s2smanga\.com/(series|read)/.', + r'reader\.seaotterscans\.com/(series|read)/.', + r'reader\.idkscans\.com/(series|read)/.', + r'reader\.thecatscans\.com/(series|read)/.', + r'reader\.deathtollscans\.net/(series|read)/.', + r'lector\.ytnofan\.com/(series|read)/.', + r'reader\.jokerfansub\.com/(series|read)/.', + r'lector\.patyscans\.com/(series|read)/.', + r'truecolorsscans\.miocio\.org/(series|read)/.', + r'reader\.letitgo\.scans\.today/(series|read)/.', + r'reader\.fos-scans\.com/(series|read)/.', + r'reader\.serenade\.moe/(series|read)/.', + r'reader\.vortex-scans\.com/(series|read)/.', + r'reader\.roseliascans\.com/(series|read)/.', + r'reader\.silentsky-scans\.net/(series|read)/.', + r'hoshiscans\.shounen-ai\.net/(series|read)/.', + r'digitalteamreader\.netsons\.org/(series|read)/.', + r'reader\.manga-download\.org/(series|read)/.', + ], + 'read_yagami_me': [ + r'read\.yagami\.me/series/\w', + ], + # 'readcomicbooksonline_org_manga': [ # todo #168 + # r'readcomicbooksonline\.net/manga/.', + # r'readcomicbooksonline\.org/manga/.', + # ], + # 'readcomicbooksonline_org': [ + # r'readcomicbooksonline\.net/.', + # r'readcomicbooksonline\.org/.', + # ], + 'comicpunch_net_manga': [ + r'comicpunch\.net/asiancomics/.', + ], + 'comicpunch_net': [ + r'comicpunch\.net/.', + ], + 'reader_championscans_com': [ + r'reader\.championscans\.com/(series|read)/.', + ], + 'reader_imangascans_org': [ + r'reader\.imangascans\.org/.', + ], + # 'readhentaimanga_com': [ + # r'readhentaimanga\.com/.', + # ], + 'readcomiconline_to': [ + r'readcomiconline\.to/Comic/.', + ], + 'readcomicsonline_ru': [ + r'readcomicsonline\.ru/comic/.', + ], + 'readmanga_me': [ + r'readmanga\.me/.', + ], + 'readmanga_eu': [ + r'readmanga\.eu/manga/\d+/.', + ], + 'readmng_com': [ + r'readmng\.com/.', + ], + 'readms_net': [ + r'readms\.net/(r|manga)/.', + ], + 'remanga_org': [ + r'remanga\.org/manga/.', + ], + 'santosfansub_com': [ + r'santosfansub\.com/Slide/.', + ], + 'selfmanga_ru': [ + r'selfmanga\.ru/.', + ], + 'senmanga_com': [ + r'senmanga\.com/.', + ], + 'shakai_ru': [ + r'shakai\.ru/manga.*?/\d', + ], + 'shogakukan_co_jp': [ + r'shogakukan\.co\.jp/books/\d', + r'shogakukan\.co\.jp/magazines/series/\d', + ], + 'shogakukan_tameshiyo_me': [ + r'shogakukan\.tameshiyo\.me/\d', + ], + 'siberowl_com': [ + r'siberowl\.com/mangas/.', + ], + 'sleepypandascans_co': [ + r'sleepypandascans\.co/(Series|Reader)/.', + ], + #'somanga_net': [ + # r'somanga\.net/(leitor|manga)/.', + # r'somangas\.net/(leitor|manga)/.', + #], + 'subapics_com': [ + # r'subapics\.com/manga/.', + # r'subapics\.com/.+-chapter-.', + r'mangakita\.net/manga/.', + r'mangakita\.net/.+-chapter-.', + r'komikstation\.com/manga/.', + r'komikstation\.com/.+-chapter-.', + r'mangavy\.com/manga/.', + r'mangavy\.com/.+-chapter-.', + r'mangakid\.net/manga/.', + r'mangakid\.net/.+-chapter-.', + ], + 'submanga_online': [ + r'submanga\.online/manga/.', + ], + 'sunday_webry_com': [ + r'sunday-webry\.com/series/\d', + ], + 'taadd_com': [ + r'taadd\.com/(book|chapter)/.', + ], + 'tapas_io': [ + r'tapas\.io/episode/\d', + r'tapas\.io/series/\w', + ], + 'tenmanga_com': [ + r'tenmanga\.com/(book|chapter)/.', + ], + 'tmofans_com': [ + r'tmofans\.com/library/manga/\d', + ], + 'translate_webtoons_com': [ + r'translate\.webtoons\.com/webtoonVersion\?webtoonNo.', + ], + 'trashscanlations_com': [ + r'trashscanlations\.com/series/.', + ], + 'tonarinoyj_jp': [ + r'tonarinoyj\.jp/episode/.', + ], + 'toonkor_co': [ + r'toonkor\.co/.', + ], + 'triplesevenscans_com': [ + r'sensescans\.com/reader/(series|read)/.', + r'triplesevenscans\.com/reader/(series|read)/.', + r'cm-scans\.shounen-ai\.net/reader/(series|read)/.', + r'yaoislife\.shounen-ai\.net/reader/(series|read)/.', + r'fujoshibitches\.shounen-ai\.net/reader/(series|read)/.', + ], + 'truyen_vnsharing_site': [ + r'truyen\.vnsharing\.site/index/read/.', + ], + 'truyenchon_com': [ + r'truyenchon\.com/truyen/.', + r'nettruyen\.com/truyen-tranh/.', + ], + 'truyentranhtuan_com': [ + r'truyentranhtuan\.com/.', + ], + 'tsumino_com': [ + r'tsumino\.com/Book/Info/\d', + r'tsumino\.com/Read/View/\d', + ], + # 'tumangaonline_com': [ + # r'tumangaonline\.com/.', + # r'tumangaonline\.me/.', + # ], + 'unionmangas_net': [ + r'unionmangas\.cc/(leitor|manga)/.', + r'unionmangas\.net/(leitor|manga)/.', + r'unionmangas\.site/(leitor|manga)/.', + ], + 'viz_com': [ + r'viz\.com/shonenjump/chapters/.', + ], + 'web_ace_jp': [ + r'web-ace\.jp/youngaceup/contents/\d', + ], + 'webtoon_bamtoki_com': [ + r'webtoon\.bamtoki\.com/.', + r'webtoon\.bamtoki\.se/.', + ], + 'webtoons_com': [ + r'webtoons\.com/[^/]+/[^/]+/.', + ], + 'webtoontr_com': [ + r'webtoontr\.com/_/.', + ], + 'westmanga_info': [ + r'westmanga\.info/.', + ], + 'whitecloudpavilion_com': [ + r'whitecloudpavilion\.com/manga/free/manga/.', + ], + 'wiemanga_com': [ + r'wiemanga\.com/(manga|chapter)/.', + ], + 'wmanga_ru': [ + r'wmanga\.ru/starter/manga_.', + ], + 'yande_re': [ + r'yande\.re/post.', + ], + 'zeroscans_com': [ + r'zeroscans\.com/manga/.', + r'manhwareader\.com/manga/.', + ], + 'zingbox_me': [ + r'zingbox\.me/.', + ], + 'zip_read_com': [ + r'zip-read\.com/.', + ], + 'zmanga_net': [ + r'zmanga\.net/.', + ], +} + + +def __check_provider(provider, url): + items = [r'\b' + i for i in provider] + reg = '(?:' + '|'.join(items) + ')' + return re.search(reg, url) + + +def get_provider(url): + fromlist = 'manga_py.providers' + for i in providers_list: + if __check_provider(providers_list[i], url): + provider = importlib.import_module('%s.%s' % (fromlist, i)) + return provider.main + return False diff --git a/manga-py-stable_1.x/manga_py/providers/_template.py b/manga-py-stable_1.x/manga_py/providers/_template.py new file mode 100644 index 0000000..982261b --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/_template.py @@ -0,0 +1,50 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class _Template(Provider, Std): + + def get_archive_name(self) -> str: + pass + + def get_chapter_index(self) -> str: + pass + + def get_main_content(self): + pass + + def get_manga_name(self) -> str: + return '' + + def get_chapters(self): + # return self._elements('a.chapter') + return [] + + def get_files(self): + return [] + + def get_cover(self) -> str: + # return self._cover_from_content('.cover img') + pass + + def book_meta(self) -> dict: + """ + :see http://acbf.wikia.com/wiki/Meta-data_Section_Definition + return { + 'author': str, + 'title': str, + 'annotation': str, + 'keywords': str, + 'cover': str, + 'rating': str, + } + """ + pass + + def chapter_for_json(self) -> str: + # overload std param, if need + # return self.chapter + pass + + +main = _Template diff --git a/manga-py-stable_1.x/manga_py/providers/ac_qq_com.py b/manga-py-stable_1.x/manga_py/providers/ac_qq_com.py new file mode 100644 index 0000000..e5faf63 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/ac_qq_com.py @@ -0,0 +1,52 @@ +from manga_py.crypt import AcQqComCrypt +from manga_py.provider import Provider +from .helpers.std import Std + + +class AcQqCom(Provider, Std): + _decoder = None + _re = None + + def get_chapter_index(self) -> str: + return self.re.search(r'/cid/(\d+)', self.chapter).group(1) + + def get_main_content(self): + content = self._storage.get('main_content', None) + if content is not None: + return content + idx = self._get_name(r'/id/(\d+)') + return self.http_get('{}/Comic/comicInfo/id/{}'.format(self.domain, idx)) + + def get_manga_name(self) -> str: + return self.text_content(self.content, '.works-intro-title strong', 0) + + def get_chapters(self): + return self._elements('.chapter-page-all li a')[::-1] + + def get_files(self): + content = self.http_get(self.chapter) + data = self._re.search(content).group(1) + data = self._decoder.decode(data) + return [i.get('url') for i in data.get('picture', [])][0:1] + + def get_cover(self) -> str: + return self._cover_from_content('.works-cover img') + + def prepare_cookies(self): + self._re = self.re.compile(r'var\s+DATA\s*=\s*[\'"](.*?)[\'"]') + self._decoder = AcQqComCrypt(self) + self._base_cookies() + + def book_meta(self) -> dict: + result = { + 'author': self.text_content(self.content, '.works-intro-digi em'), + 'rating': self.text_content(self.content, 'p.ui-left strong'), + 'cover': self.get_cover(), + 'annotation': self.text_content(self.content, '.works-intro-short'), + 'language': 'cn', + } + + return result + + +main = AcQqCom diff --git a/manga-py-stable_1.x/manga_py/providers/acomics_ru.py b/manga-py-stable_1.x/manga_py/providers/acomics_ru.py new file mode 100644 index 0000000..5f9f9dd --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/acomics_ru.py @@ -0,0 +1,49 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class AComicsRu(Provider, Std): + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + return self._get_content('{}/~{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.ru/~([^/]+)') + + def get_chapters(self): + return ['~' + self.manga_name] + + def get_files(self): + pages_max = self.text_content(self.content, 'span.issueNumber').split('/')[1] + _min = self._params['skip_volumes'] + _max = self._params['max_volumes'] + if _max > 0 and _min > 0: + _max += _min - 1 + + if _max == 0: + _max = int(pages_max) + + images = [] + for i in range(_min, _max): + parser = self.document_fromstring(self._get_content('{}/~{}/%d' % (i + 1))) + images += self._images_helper(parser, '#mainImage') + + return images + + def get_cover(self) -> str: + return self._cover_from_content('header.serial a img') + + def book_meta(self) -> dict: + pass + + def prepare_cookies(self): + self.update_cookies({'ageRestrict': '21'}) + + +main = AComicsRu diff --git a/manga-py-stable_1.x/manga_py/providers/adulto_seinagi_org.py b/manga-py-stable_1.x/manga_py/providers/adulto_seinagi_org.py new file mode 100644 index 0000000..54c0988 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/adulto_seinagi_org.py @@ -0,0 +1,9 @@ +from .read_powermanga_org import ReadPowerMangaOrg + + +class AdultoSeinagiOrg(ReadPowerMangaOrg): + _name_re = '[^/]/[^/]+/([^/]+)/' + _content_str = '{}/series/{}/' + + +main = AdultoSeinagiOrg diff --git a/manga-py-stable_1.x/manga_py/providers/allhentai_ru.py b/manga-py-stable_1.x/manga_py/providers/allhentai_ru.py new file mode 100644 index 0000000..85ee8a5 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/allhentai_ru.py @@ -0,0 +1,64 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class AllHentaiRu(Provider, Std): + + def get_archive_name(self): + name = self.re.search('/.+/([^/]+/[^/]+)/?', self.chapter) + return self.normal_arc_name({'vol': name.group(1).split('/', 2)}) + + def get_chapter_index(self): + name = self.re.search('/.+/(?:vol)?([^/]+/[^/]+)/?', self.chapter) + return name.group(1).replace('/', '-') + + def get_main_content(self): + return self._get_content('{}/{}?mature=1&mtr=1') + + def get_manga_name(self) -> str: + return self._get_name(r'\.ru/([^/]+)') + + def get_chapters(self): + return self._elements('.expandable .cTable tr > td > a') + + def get_files(self): + _uri = self.http().normalize_uri(self.chapter) + content = self.http_get(_uri) + result = self.re.search(r'var pictures.+?(\[\{.+\}\])', content, self.re.M) + if not result: + return [] + content = result.group(1).replace("'", '"') + content = self.re.sub('(\w*):([^/])', r'"\1":\2', content) + return [i['url'] for i in self.json.loads(content)] + + def get_cover(self): + return self._cover_from_content('.picture-fotorama > img') + + def save_file(self, idx=None, callback=None, url=None, in_arc_name=None): + _path = None + try: + _path = super().save_file(idx, callback, url, in_arc_name) + except AttributeError: + pass + if _path is None: + for i in ['a', 'b', 'c']: + try: + _path, idx, _url = self._save_file_params_helper(url, idx) + _url = self.re.sub(r'//\w\.', '//%s.' % i, url) + + self.http().download_file(_url, _path, idx) + callable(callback) and callback() + self.after_file_save(_path, idx) + + self._archive.lazy_add(_path) + break + except AttributeError: + pass + return _path + + def book_meta(self) -> dict: + # todo meta + pass + + +main = AllHentaiRu diff --git a/manga-py-stable_1.x/manga_py/providers/animextremist_com.py b/manga-py-stable_1.x/manga_py/providers/animextremist_com.py new file mode 100644 index 0000000..7d8c9b8 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/animextremist_com.py @@ -0,0 +1,46 @@ +from manga_py.provider import Provider +from .helpers import animextremist_com +from .helpers.std import Std + + +class AnimeXtremistCom(Provider, Std): + helper = None + prefix = '/mangas-online/' + + def get_chapter_index(self) -> str: + chapter = self.chapter + idx = self.re.search(r'(.+?-\d+)', chapter[0]) + return idx.group(1) if idx else '0' + + def get_main_content(self): + return self._get_content('{}%s{}/' % self.prefix) + + def get_manga_name(self) -> str: + return self._get_name(r'{}([^/]+)'.format(self.prefix)) + + def get_chapters(self): + ch = self.helper.get_chapters() + return ch[::-1] + + def get_files(self): + chapter = self.chapter + items = self.helper.sort_images(chapter[1]) + images = [] + for i in items: + img = self.helper.get_page_image(i, 'img#photo') + img and images.append(img) + return images + + def prepare_cookies(self): + self.helper = animextremist_com.AnimeXtremistCom(self) + + def get_cover(self) -> str: + pass + # return self._cover_from_content('.cover img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = AnimeXtremistCom diff --git a/manga-py-stable_1.x/manga_py/providers/antisensescans_com.py b/manga-py-stable_1.x/manga_py/providers/antisensescans_com.py new file mode 100644 index 0000000..2223634 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/antisensescans_com.py @@ -0,0 +1,9 @@ +from .read_powermanga_org import ReadPowerMangaOrg + + +class AntiSenseScansCom(ReadPowerMangaOrg): + _name_re = '/online/[^/]+/([^/]+)/' + _content_str = '{}/online/series/{}/' + + +main = AntiSenseScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/asmhentai_com.py b/manga-py-stable_1.x/manga_py/providers/asmhentai_com.py new file mode 100644 index 0000000..abe060b --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/asmhentai_com.py @@ -0,0 +1,45 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class AsmHentaiCom(Provider, Std): + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + title = self.text_content(self.content, '.info > h1,title') + if ~title.find(' Page '): + title = self.re.search(r'(.+) Page ', title).group(1) + return title + + def get_chapters(self): + url = self.get_url() + if ~url.find('/g/'): + url = self._elements('.gallery > div > a')[0].get('href') + return [url] + + def get_files(self): + content = self.http_get(self.chapter) + src = self.re.search(r'\$\(\[[\'"]//(.+)/[\'"]', content).group(1) + pages = self.re.search(r'var +Pages ?=.*?(\d+)', content).group(1) + result = [] + http = self.re.search('(https?):', self.get_url()).group(1) + for i in range(int(pages)): + result.append('{}://{}/{}.jpg'.format(http, src, 1 + i)) + return result + + def get_cover(self) -> str: + return self._cover_from_content('.cover > a > img') + + def book_meta(self) -> dict: + pass + + +main = AsmHentaiCom diff --git a/manga-py-stable_1.x/manga_py/providers/atfbooru_ninja.py b/manga-py-stable_1.x/manga_py/providers/atfbooru_ninja.py new file mode 100644 index 0000000..1ac61f9 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/atfbooru_ninja.py @@ -0,0 +1,8 @@ +from .danbooru_donmai_us import DanbooruDonmaiUs + + +class AtfBooruNinja(DanbooruDonmaiUs): + _archive_prefix = 'atfbooru' + + +main = AtfBooruNinja diff --git a/manga-py-stable_1.x/manga_py/providers/authrone_com.py b/manga-py-stable_1.x/manga_py/providers/authrone_com.py new file mode 100644 index 0000000..6af8380 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/authrone_com.py @@ -0,0 +1,43 @@ +from .helpers.std import Std +from .mangaonline_today import MangaOnlineToday + + +class AuthroneCom(MangaOnlineToday, Std): + _ch_selector = '.mng_det ul.lst > li > a' + + def get_archive_name(self) -> str: + idx = self.re.search('/manga/[^/]+/([^/]+/[^/]+)', self.chapter).group(1).split('.', 2) + if len(idx) > 1: + return 'vol_{:0>3}-{}_{}'.format(idx[0], *idx[1].split('/')) + return 'vol_{:0>3}-0-{}'.format(*idx[0].split('/')) + + def get_chapter_index(self) -> str: + return self.re.search('/manga/[^/]+/([^/]+)', self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): # need sorting chapters: /manga/love_stage/ + items = self._elements(self._ch_selector) + pages = self._elements('ul.lst + ul.pgg li:last-child > a') + patern = r'list/(\d+)/' + if pages and len(pages): + link = pages[-1].get('href') + page = self.re.search(patern, link).group(1) + for i in range(2, int(page) + 1): + page_link = self.re.sub(patern, 'list/%d/' % i, link) + items += self._elements(self._ch_selector, self.http_get(page_link)) + return items + + def get_cover(self) -> str: + return self._cover_from_content('#sct_content img.cvr') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = AuthroneCom diff --git a/manga-py-stable_1.x/manga_py/providers/bato_to.py b/manga-py-stable_1.x/manga_py/providers/bato_to.py new file mode 100644 index 0000000..cc90542 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/bato_to.py @@ -0,0 +1,65 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class BatoTo(Provider, Std): + + def get_chapter_index(self) -> str: + return '{}-{}'.format( + self.chapter_id, + self.chapter[1], + ) + + def get_main_content(self): + url = self.get_url() + if ~url.find('/chapter/'): + url = self.html_fromstring(url, '.nav-path .nav-title > a', 0).get('href') + return self.http_get(url) + + def get_manga_name(self) -> str: + selector = '.nav-path .nav-title > a,.title-set .item-title > a' + content = self.http_get(self.get_url()) + return self.text_content(content, selector, 0) + + def get_chapters(self): + items = self._elements('.main > .item > a') + n = self.http().normalize_uri + result = [] + for i in items: + title = i.cssselect('b')[0].text_content().strip(' \n\t\r') + if ~title.find('DELETED'): # SKIP DELETED + continue + result.append(( + n(i.get('href')), + title, + )) + return result + # [( + # n(i.get('href')), + # i.cssselect('b')[0].text_content().strip(' \n\t\r'), + # )] + + @staticmethod + def _sort_files(data): + keys = sorted(data, key=lambda _: int(_)) + return [data[i] for i in keys] + + def get_files(self): + data = self.re.search(r'\simages\s*=\s*({.+});', self.http_get(self.chapter[0])) + try: + return self._sort_files(self.json.loads(data.group(1))) + except ValueError: + return [] + + def get_cover(self) -> str: + return self._cover_from_content('.attr-cover img') + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.chapter[0] + + +main = BatoTo diff --git a/manga-py-stable_1.x/manga_py/providers/blogtruyen_com.py b/manga-py-stable_1.x/manga_py/providers/blogtruyen_com.py new file mode 100644 index 0000000..9e2ae01 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/blogtruyen_com.py @@ -0,0 +1,40 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class BlogTruyenCom(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'\.com/c(\d+)/', self.chapter) + return '{}-{}'.format(self.chapter_id, idx.group(1)) + + def get_main_content(self): + url = self._test_main_url(self.get_url()) + return self.http_get(self.http().normalize_uri(url)) + + def _test_main_url(self, url): + if ~url.find('.com/c'): + selector = '.breadcrumbs a + a' + url = self.html_fromstring(url, selector, 0).get('href') + return url + + def get_manga_name(self) -> str: + url = self._test_main_url(self.get_url()) + return self.re.search(r'/\d+/([^/]+)', url).group(1) + + def get_chapters(self): + return self._elements('#list-chapters .title > a') + + def get_files(self): + items = self.html_fromstring(self.chapter, '#content img') + return [i.get('src') for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('.thumbnail img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = BlogTruyenCom diff --git a/manga-py-stable_1.x/manga_py/providers/bns_shounen_ai_net.py b/manga-py-stable_1.x/manga_py/providers/bns_shounen_ai_net.py new file mode 100644 index 0000000..75bdebf --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/bns_shounen_ai_net.py @@ -0,0 +1,9 @@ +from .read_powermanga_org import ReadPowerMangaOrg + + +class BnsShounenAiNet(ReadPowerMangaOrg): + _name_re = '/read/[^/]+/([^/]+)/' + _content_str = '{}/read/series/{}/' + + +main = BnsShounenAiNet diff --git a/manga-py-stable_1.x/manga_py/providers/boredomsociety_xyz.py b/manga-py-stable_1.x/manga_py/providers/boredomsociety_xyz.py new file mode 100644 index 0000000..f35f1d0 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/boredomsociety_xyz.py @@ -0,0 +1,47 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class BoredomSocietyXyz(Provider, Std): + def get_chapter_index(self) -> str: + return self.re.search( + r'/reader/\d+/(\d+(?:\.\d+)?)', + self.chapter + ).group(1).replace('.', '-') + + def get_main_content(self): + idx = self.re.search( + '/(?:titles/info|reader)/(\d+)', + self.get_url() + ).group(1) + return self.http_get('{}/titles/info/{}'.format( + self.domain, + idx + )) + + def get_manga_name(self) -> str: + return self.text_content(self.content, 'h2') + + def get_chapters(self): + return self._elements('a.titlesinfo_chaptertitle') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + images = self._images_helper(parser, 'img.reader_mangaimage') + n = self.http().normalize_uri + return [n(i) for i in images] + + def get_cover(self) -> str: + return self._cover_from_content('img.titlesinfo_coverimage') + + def book_meta(self) -> dict: + pass + + def prepare_cookies(self): + # enable "all-images-on-page" + self.http_post('{}/module/reader/ajax.php'.format(self.domain), data={ + 'readingtype': 'all' + }) + + +main = BoredomSocietyXyz diff --git a/manga-py-stable_1.x/manga_py/providers/cdmnet_com_br.py b/manga-py-stable_1.x/manga_py/providers/cdmnet_com_br.py new file mode 100644 index 0000000..418bb18 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/cdmnet_com_br.py @@ -0,0 +1,73 @@ +from lxml import html + +from manga_py.provider import Provider +from .helpers.std import Std + + +class CdmNetComBr(Provider, Std): + + def get_archive_name(self) -> str: + url = self.chapter + idx = self.get_chapter_index() + if ~url.find('/manga/'): + return 'vol_{:0>3}'.format(idx) + if ~url.find('/novel/'): + return 'novel_{:0>3}'.format(idx) + + def get_chapter_index(self) -> str: + re = self.re.compile('/titulos/[^/]+/[^/]+/[^/]+/([^/]+)') + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/titulos/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'/titulos/([^/]+)') + + def get_chapters(self): + return self._elements('.ui .content .table td > a') + + def save_file(self, idx=None, callback=None, url=None, in_arc_name=None): + if ~url.find('/manga/'): + return super().save_file(idx, callback, url, in_arc_name) + if ~url.find('/novel/'): + _path, idx, _url = self._save_file_params_helper(url, idx) + _path += '.html' + element = self.html_fromstring(url, '.novel-chapter', 0) + with open(_path, 'wb') as f: + f.write(html.tostring(element)) + callable(callback) and callback() + self.after_file_save(_path, idx) + self._archive.add_file(_path, in_arc_name) + return _path + + def _manga(self): + file_type = '.jpg' + content = self.http_get(self.chapter) + re_suffix = self.re.compile(r'urlSulfix\s*=\s*[\'"](.+)[\'"]\s*;') + re_images = self.re.compile(r'pages\s*=\s*(\[.+\])\s*;') + suffix = re_suffix.search(content).group(1) + images = re_images.search(content).group(1) + images = self.re.sub("'", '"', images) + images = self.json.loads(self.re.sub(r'",\]', '"]', images)) + + self.log(['{}{}{}'.format(suffix, i, file_type) for i in images]) + + return ['{}{}{}'.format(suffix, i, file_type) for i in images] + + def get_files(self): + if ~self.chapter.find('/manga/'): + return self._manga() + if ~self.chapter.find('/novel/'): + return [self.chapter] + return [] + + def get_cover(self) -> str: + return self._cover_from_content('.content .description img.image') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = CdmNetComBr diff --git a/manga-py-stable_1.x/manga_py/providers/chochox_com.py b/manga-py-stable_1.x/manga_py/providers/chochox_com.py new file mode 100644 index 0000000..a621af2 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/chochox_com.py @@ -0,0 +1,36 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ChoChoxCom(Provider, Std): + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + return [b''] + + def get_files(self): + return [i.get('src') for i in self._elements('img.alignnone')] + + def get_cover(self) -> str: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = ChoChoxCom diff --git a/manga-py-stable_1.x/manga_py/providers/choutensei_260mb_net.py b/manga-py-stable_1.x/manga_py/providers/choutensei_260mb_net.py new file mode 100644 index 0000000..c98f4d6 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/choutensei_260mb_net.py @@ -0,0 +1,10 @@ +from .adulto_seinagi_org import AdultoSeinagiOrg + + +class ChouTensei260mbNet(AdultoSeinagiOrg): + def prepare_cookies(self): + self._storage['cookies'][' __test'] = '9f148766d926b07a85683d7a6cd50150' + super().prepare_cookies() + + +main = ChouTensei260mbNet diff --git a/manga-py-stable_1.x/manga_py/providers/comic_webnewtype_com.py b/manga-py-stable_1.x/manga_py/providers/comic_webnewtype_com.py new file mode 100644 index 0000000..3b272e0 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/comic_webnewtype_com.py @@ -0,0 +1,35 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ComicWebNewTypeCom(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.compile('/contents/[^/]+/([^/]+)') + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/contents/{}/') + + def get_manga_name(self) -> str: + return self._get_name('/contents/([^/]+)') + + def get_chapters(self): + return self._elements('#episodeList li.ListCard a') + + def get_files(self): + url = self.chapter + items = self.http_get(url + 'json/', headers={'x-requested-with': 'XMLHttpRequest'}) + imgs = self.json.loads(items) + imgs = [self.re.sub(r'jpg.+', 'jpg', img) for img in imgs] + return imgs + + def get_cover(self) -> str: + return self._cover_from_content('.WorkSummary-content img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ComicWebNewTypeCom diff --git a/manga-py-stable_1.x/manga_py/providers/comicextra_com.py b/manga-py-stable_1.x/manga_py/providers/comicextra_com.py new file mode 100644 index 0000000..5cafcc1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/comicextra_com.py @@ -0,0 +1,39 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ComicExtraCom(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'/chapter-(.+)', self.chapter) + if idx: + return '{}-{}'.format(self.chapter_id, idx.group(1)) + return str(self.chapter_id) + + def get_main_content(self): + return self._get_content('{}/comic/{}') + + def get_manga_name(self): + url = self.get_url() + test = self.re.search('/comic/([^/]+)', url) + if test: + return test.group(1) + return self.re.search('/([^/]+)/chapter', url).group(1) + + def get_chapters(self): + return self._elements('#list td a') + + def get_files(self): + url = self.chapter + '/full' + items = self.html_fromstring(url, '.chapter-container img.chapter_img') + return [i.get('src') for i in items] + + def get_cover(self): + return self._cover_from_content('.movie-image img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ComicExtraCom diff --git a/manga-py-stable_1.x/manga_py/providers/comico_co_id_content.py b/manga-py-stable_1.x/manga_py/providers/comico_co_id_content.py new file mode 100644 index 0000000..4b978ad --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/comico_co_id_content.py @@ -0,0 +1,42 @@ +from .comico_co_id_titles import ComicoCoIdTitles +from .helpers.std import Std + + +class ComicoCoIdContent(ComicoCoIdTitles, Std): # maybe + __origin_url = None + + def get_archive_name(self) -> str: + return '0' + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/title/(\d+)') + return re.search(self.chapter).group(1) + + def get_main_content(self): + idx = self.re.search(r'contentId=(\d+)', self.get_url()) + return self.http_get('{}/content?contentId={}'.format( + self.domain, + idx.group(1) + )) + + def get_manga_name(self) -> str: + return 'Fake' + + def prepare_cookies(self): + self.__origin_url = self.get_url() + + def get_chapters(self): + pass + # return self._elements('.contlst-container .contlst-item > a') + + def get_files(self): + return [] + + def get_cover(self) -> str: + pass + + def chapter_for_json(self): + return self.get_url() + + +main = ComicoCoIdContent diff --git a/manga-py-stable_1.x/manga_py/providers/comico_co_id_titles.py b/manga-py-stable_1.x/manga_py/providers/comico_co_id_titles.py new file mode 100644 index 0000000..e944cc9 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/comico_co_id_titles.py @@ -0,0 +1,71 @@ +from time import time + +from manga_py.provider import Provider +from .helpers.std import Std + + +class ComicoCoIdTitles(Provider, Std): + _url = None + + def get_chapter_index(self) -> str: + return str(self.chapter.get('id', '0')) + + def _manga_id(self): + idx = self.re.search(r'/titles/(\d+)', self.get_url()) + return idx.group(1) + + def get_main_content(self): + self._url = '{}/titles/{}'.format( + self.domain, + self._manga_id(), + ) + return self.http_get(self._url) + + def get_manga_name(self) -> str: + h2 = self.document_fromstring(self.content, '.con > h2', 0) + return '{} - {}'.format( + h2.text_content(), + self._manga_id() + ) + + @staticmethod + def __parse_page(content): + items = [] + for i in content.get('data', {}).get('list', []): + if i.get('salePolicy', {}).get('isFree', False): + items.append(i) + return items + + def get_chapters(self): + items = [] + for page in range(1, 10): + content = self.http_get('{}/chapters?page={}&_={}'.format( + self._url, + page, + int(time()), + )) + try: + content = self.json.loads(content) + if content.get('header', {}).get('resultCode', -1) < 0: + break + items += self.__parse_page(content) + except Exception: + break + return items + + def get_files(self): + parser = self.html_fromstring('{}/chapters/{}'.format( + self._url, + self.chapter.get('id'), + ), '._view', 0) + return self._images_helper(parser, '._image') + + def get_cover(self) -> str: + return self._cover_from_content('.bg_img_small img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ComicoCoIdTitles diff --git a/manga-py-stable_1.x/manga_py/providers/comico_jp.py b/manga-py-stable_1.x/manga_py/providers/comico_jp.py new file mode 100644 index 0000000..505891e --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/comico_jp.py @@ -0,0 +1,50 @@ +from sys import stderr + +from manga_py.provider import Provider +from .helpers.std import Std + + +class ComicoJp(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'articleNo=(\d+)', self.chapter) + if idx: + return '{}-{}'.format(self.chapter_id, idx.group(1)) + return str(self.chapter_id) + + def get_main_content(self): + title_no = self.re.search(r'\.jp/.+titleNo=(\d+)', self.get_url()) + if title_no: + content = self.http_post('{}/api/getArticleList.nhn'.format(self.domain), data={ + 'titleNo': title_no.group(1) + }) + try: + return self.json.loads(content).get('result', {}).get('list', []) + except TypeError: + pass + return [] + + def get_manga_name(self): + content = self.http_get(self.get_url()) + name = self.text_content(content, 'title') + return name[:name.rfind('|')].strip(' \n\t\r') + + def get_chapters(self): + # TODO: see i['freeFlg'] Y = true, W = false #19 + items = [i['articleDetailUrl'] for i in self.content if i['freeFlg'] == 'Y'] + self.log('Free chapters count: %d' % len(items), file=stderr) + return items[::-1] + + def get_files(self): + items = self.html_fromstring(self.chapter, '.comic-image._comicImage > img.comic-image__image') + return [i.get('src') for i in items] + + def get_cover(self): + pass + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ComicoJp diff --git a/manga-py-stable_1.x/manga_py/providers/comicpunch_net.py b/manga-py-stable_1.x/manga_py/providers/comicpunch_net.py new file mode 100644 index 0000000..b895407 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/comicpunch_net.py @@ -0,0 +1,29 @@ +from manga_py.provider import Provider +from .helpers.std import Std +from urllib import parse + + +class ComicPunchNet(Provider, Std): + + def get_chapter_index(self) -> str: + return self.re.search(r'[-/]((?:Annual|Issue|Chapter)-\w+)', self.chapter).group(1) + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self.text_content(self.content, '.page-title') + + def get_chapters(self): + return self._elements('.chapter > a') + + def get_files(self): + parser = self.html_fromstring(self.chapter + '?q=fullchapter') + base_url = parser.cssselect('base[href]')[0].get('href') + return [parse.urljoin(base_url, i) for i in self._images_helper(parser, 'img.picture')] + + def get_cover(self) -> str: + return self._cover_from_content('.pic .series') + + +main = ComicPunchNet diff --git a/manga-py-stable_1.x/manga_py/providers/comicpunch_net_manga.py b/manga-py-stable_1.x/manga_py/providers/comicpunch_net_manga.py new file mode 100644 index 0000000..33ea3d4 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/comicpunch_net_manga.py @@ -0,0 +1,27 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ComicPunchNetManga(Provider, Std): + def get_chapter_index(self) -> str: + re = self.re.compile(r'/chapter_(\d+(?:\.\d+)?)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self.text_content(self.content, '.page-title') + + def get_chapters(self): + return self._elements('.manga_chapter a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, 'img.picture') + + def get_cover(self) -> str: + return self._cover_from_content('.field-name-field-pic img') + + +main = ComicPunchNetManga diff --git a/manga-py-stable_1.x/manga_py/providers/comicsandmanga_ru.py b/manga-py-stable_1.x/manga_py/providers/comicsandmanga_ru.py new file mode 100644 index 0000000..722e867 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/comicsandmanga_ru.py @@ -0,0 +1,50 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ComicsAndMangaRu(Provider, Std): + + def get_archive_name(self) -> str: + index = self.get_chapter_index() + return 'vol_{:0>3}'.format(index) + + def get_chapter_index(self) -> str: + return self.re.search(r'.+/[^/]+?(\d+)$', self.chapter).group(1) + + def get_main_content(self): + name = self.re.search('/(online-reading/[^/]+/[^/]+)', self.get_url()) + return self.http_get('{}/{}'.format(self.domain, name.group(1))) + + def get_manga_name(self): + name = self.re.search('/online-reading/[^/]+/([^/]+)', self.get_url()) + return name.group(1) + + def get_chapters(self): + selector = '.MagList > .MagListLine > a' + items = self.document_fromstring(self.content, selector) + return items[::-1] + + def get_files(self): + img_selector = 'a > img' + nu = self.http().normalize_uri + uri = nu(self.chapter) + parser = self.html_fromstring(uri, '.ForRead', 0) + pages = parser.cssselect('.navigation select')[0].cssselect('option + option') + images = self._images_helper(parser, img_selector) + + for i in pages: + uri = '{}/{}'.format(nu(self.chapter.rstrip('/')), i.get('value')) + parser = self.html_fromstring(uri, '.ForRead', 0) + images += self._images_helper(parser, img_selector) + + return images + + def get_cover(self): + pass # TODO + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ComicsAndMangaRu diff --git a/manga-py-stable_1.x/manga_py/providers/comicvn_net.py b/manga-py-stable_1.x/manga_py/providers/comicvn_net.py new file mode 100644 index 0000000..011d25e --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/comicvn_net.py @@ -0,0 +1,55 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ComicNnNet(Provider, Std): + + def get_archive_name(self) -> str: + return self.get_chapter_index() + + def get_chapter_index(self) -> str: # todo + re = self.re.compile('/truyen-tranh-online/[^/]+/([^/]+)') + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/truyen-tranh-online/{}') + + def _iframe_hook(self, url): + content = self.html_fromstring(url) + iframe = content.cssselect('iframe') + if iframe: + url = iframe[0].get('src') + self.log('Iframe!\n' + url) + return self.html_fromstring(url) + + def get_manga_name(self) -> str: + name = self._get_name(r'/truyen-tranh-online/([^/]+)') + if self.re.search('.+-\d+', name): + return name + a = self._iframe_hook(self.get_url()) + self._params['url'] = a.cssselect('.sub-bor h1 a')[0].get('href') + return self.get_manga_name() + + def get_chapters(self): + return self._elements('.manga-chapter-head + ul li > a') + + def get_files(self): + content = self._iframe_hook(self.chapter) + files = content.cssselect('textarea#txtarea img') + if files: + n = self.http().normalize_uri + return [n(i.get('src')) for i in files] + return [] + + def prepare_cookies(self): + self._base_cookies() + + def get_cover(self) -> str: + return self._cover_from_content('.manga-detail .row img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ComicNnNet diff --git a/manga-py-stable_1.x/manga_py/providers/cycomi_com.py b/manga-py-stable_1.x/manga_py/providers/cycomi_com.py new file mode 100644 index 0000000..a6b043f --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/cycomi_com.py @@ -0,0 +1,58 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class CycomiCom(Provider, Std): + @staticmethod + def remove_not_ascii(value): + return value + + def get_chapter_index(self) -> str: + return self.chapter[1] + + def get_main_content(self): + idx = self.re.search( + r'/title/(\d+)', + self.get_url() + ).group(1) + + url = '{}/fw/cycomibrowser/chapter/title/{}'.format( + self.domain, + idx + ) + + return self.http_get(url) + + def get_manga_name(self) -> str: + return self.text_content(self.content, '.title-texts h3') + + def get_chapters(self): + selector = 'a.chapter-item:not(.is-preread)' + items = [] + n = self.http().normalize_uri + for el in self._elements(selector, self.content): + title = el.cssselect('p.chapter-title')[0] + title = title.text_content().strip(' \n\r\t\0') + episode_id = self.re.sub(r'.+pages/(.+)', r'\1', n(el.get('href'))) + title = episode_id + '_' + title + items.append((n(el.get('href')), title)) + return items + + def get_files(self): + content = self.http_get(self.chapter[0]) + parser = self.document_fromstring(content) + selector = '.comic-image' + return self._images_helper(parser, selector) + + def get_cover(self) -> str: + return self._cover_from_content('.title-image-container img') + + def chapter_for_json(self): + return self.chapter[1] + + def book_meta(self) -> dict: + # todo meta + pass + + +main = CycomiCom diff --git a/manga-py-stable_1.x/manga_py/providers/danbooru_donmai_us.py b/manga-py-stable_1.x/manga_py/providers/danbooru_donmai_us.py new file mode 100644 index 0000000..6bb2aa8 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/danbooru_donmai_us.py @@ -0,0 +1,99 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class DanbooruDonmaiUs(Provider, Std): + _is_tag = False + _archive_prefix = 'danbooru_' + _manga_name = None + + def get_archive_name(self) -> str: + if self.chapter: + return 'page_{:0>2}'.format(self.chapter) + return 'archive' + + def get_chapter_index(self) -> str: + if self.chapter: + return str(self.chapter) + return '0' + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + if ~self.get_url().find('tags='): + self._is_tag = True + self._manga_name = self._get_name(r'[\?&]tags=([^&]+)') + else: + self._manga_name = self._get_name(r'/posts/(\d+)') + return self._archive_prefix + self._manga_name + + def get_chapters(self): # pragma: no cover + if self._is_tag: + pages = self._elements('.paginator .current-page > span') + images_on_page = len(self._elements('#posts > div > article')) + if pages: + count = self.html_fromstring('{}/counts/posts?tags={}'.format( + self.domain, + self.manga_name, + ), '#a-posts', 0).text_content() + page = self.re.search(r'\n\s+(\d+)', count).group(1) + max_page = int(int(page) / images_on_page) + 1 + if max_page > 1001: + self.log('1000 pages maximum!') + max_page = 1000 + return range(1, max_page)[::-1] + return [1] + + def _tag_images(self): # pragma: no cover + url = '{}/posts?tags={}&page={}'.format( + self.domain, + self._manga_name, + self.chapter, + ) + parser = self.html_fromstring(url, '#posts article a') + n = self.http().normalize_uri + images = [] + for i in parser: + images += self._post_image(n(i.get('href'))) + return images + + def _post_image(self, url): # pragma: no cover + if isinstance(url, str): + parser = self.html_fromstring(url) + else: + parser = url + + full_size = parser.cssselect('#image-resize-notice a') + if full_size: + return [full_size[0].get('href')] + return [parser.cssselect('#image')[0].get('src')] + + def _post_images(self, url): # pragma: no cover + parser = self.html_fromstring(url) + links = parser.cssselect('#has-parent-relationship-preview article a') + if links: + images = [] + n = self.http().normalize_uri + for i in links: + images += self._post_image(n(i.get('href'))) + return images + return self._post_image(parser) + + def get_files(self): + if self._is_tag: + return self._tag_images() + return self._post_images(self.get_url()) + + def get_cover(self) -> str: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = DanbooruDonmaiUs diff --git a/manga-py-stable_1.x/manga_py/providers/darkskyprojects_org.py b/manga-py-stable_1.x/manga_py/providers/darkskyprojects_org.py new file mode 100644 index 0000000..8a4b9eb --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/darkskyprojects_org.py @@ -0,0 +1,37 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class DarkSkyProjectsOrg(Provider, Std): + + def get_archive_name(self) -> str: + return self.normal_arc_name({'vol': [ + self.chapter_id, + self.get_chapter_index() + ]}) + + def get_chapter_index(self) -> str: + return self.re.search('/biblioteca/[^/]+/([^/]+)', self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/biblioteca/{}') + + def get_manga_name(self) -> str: + return self._get_name('/biblioteca/([^/]+)') + + def get_chapters(self): + return self._elements('.chapters h5 a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, 'data-src') + + def get_cover(self) -> str: + return self._cover_from_content('.boxed > .img-responsive') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = DarkSkyProjectsOrg diff --git a/manga-py-stable_1.x/manga_py/providers/dejameprobar_es.py b/manga-py-stable_1.x/manga_py/providers/dejameprobar_es.py new file mode 100644 index 0000000..a187d4c --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/dejameprobar_es.py @@ -0,0 +1,9 @@ +from .helveticascans_com import HelveticaScansCom + + +class DejameProbarEs(HelveticaScansCom): + _name_re = '/slide/[^/]+/([^/]+)/' + _content_str = '{}/slide/series/{}/' + + +main = DejameProbarEs diff --git a/manga-py-stable_1.x/manga_py/providers/desu_me.py b/manga-py-stable_1.x/manga_py/providers/desu_me.py new file mode 100644 index 0000000..0bc2295 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/desu_me.py @@ -0,0 +1,41 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class DesuMe(Provider, Std): + + def get_archive_name(self) -> str: + idx = self.get_chapter_index().split('-') + return self.normal_arc_name({'vol': idx[0], 'ch': idx[1]}) + + def get_chapter_index(self) -> str: + result = self.re.search(r'/vol(\d+)/ch(\d+)', self.chapter).groups() + return '{}-{}'.format(result[0], result[1]) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_chapters(self): + return self._elements('#animeView ul h4 > a.tips') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_files(self): + content = self.http_get(self.domain + self.chapter) + result = self.re.search(r'images:\s?(\[\[.+\]\])', content, self.re.M) + if not result: + return [] + root_url = self.re.search(r'dir:\s?"([^"]*)"', content).group(1).replace(r'\/', '/') + + return [root_url + i[0] for i in self.json.loads(result.group(1))] + + def get_cover(self): + return self._cover_from_content('.c-poster > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = DesuMe diff --git a/manga-py-stable_1.x/manga_py/providers/digitalteam1_altervista_org.py b/manga-py-stable_1.x/manga_py/providers/digitalteam1_altervista_org.py new file mode 100644 index 0000000..2c75147 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/digitalteam1_altervista_org.py @@ -0,0 +1,51 @@ +from .read_powermanga_org import ReadPowerMangaOrg + + +class DigitalTeam1AltervistaOrg(ReadPowerMangaOrg): + __title = None + _name_re = '/reader/[^/]+/([^/]+)' + _content_str = '{}/reader/read/{}/' + _chapters_selector = '.chapter_list li > div > a' + + def get_chapters(self): + self.__title = self.text_content(self.content, 'title') + return super().get_chapters() + + def __parse_json(self, data) -> list: + items = [] + for n, i in enumerate(data[0]): + items.append('{}/reader{}{}{}{}'.format( + self.domain, + data[2], # path + i['name'], # image index + data[1][n], # image hash + i['ex'] # image extension + )) + return items + + def get_files(self): + chapter = self.re.search('/(\d+)/', self.chapter).group(1) + data = { + 'info[manga]': self.manga_name, + 'info[chapter]': chapter, + 'info[ch_sub]': '0', # todo: watch this + 'info[title]': self.__title, + } + json = self.json.loads(self.http_post( + '{}/reader/c_i'.format(self.domain), + data=data, + headers={'X-Requested-With': 'XMLHttpRequest'} + )) + + if isinstance(json, str): # DO NOT TOUCH THIS! + json = self.json.loads(json) + + if json: + return self.__parse_json(json) + return [] + + def get_cover(self) -> str: + return self._cover_from_content('.cover img') + + +main = DigitalTeam1AltervistaOrg diff --git a/manga-py-stable_1.x/manga_py/providers/dm5_com.py b/manga-py-stable_1.x/manga_py/providers/dm5_com.py new file mode 100644 index 0000000..1aeab82 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/dm5_com.py @@ -0,0 +1,111 @@ +from urllib.parse import quote_plus + +import execjs + +from manga_py.fs import is_file +from manga_py.provider import Provider +from .helpers.std import Std + + +class Dm5Com(Provider, Std): + def get_chapter_index(self) -> str: + re = self.re.compile(r'[^\d+](\d+)') + return re.search(self.chapter[1]).group(1) + + def get_main_content(self): + content = self._storage.get('main_content', None) + if content is None: + if self.get_url().find('/manhua-'): + # normal url + name = self._get_name('/manhua-([^/]+)') + else: + # chapter url + selector = '.title .right-arrow > a' + name = self.html_fromstring(self.get_url(), selector, 0) + name = self._get_name('/manhua-([^/]+)', name.get('href')) + content = self.http_get('{}/manhua-{}/'.format( + self.domain, + name + )) + return content + + def get_manga_name(self) -> str: + title = self.text_content(self.content, '.info .title') + if title: + return title + re = self.re.search('/manhua-([^/]+)', self.get_url()) + return re.group(1) + + def get_chapters(self): + items = self._elements('ul.detail-list-select') + if not items: + return [] + items = items[0].cssselect('li > a') + n = self.http().normalize_uri + return [(n(i.get('href')), i.text_content()) for i in items] + + def get_files(self): # fixme + content = self.http_get(self.chapter[0]) + parser = self.document_fromstring(content) + pages = parser.cssselect('.chapterpager a') + if pages: + pages = int(pages[-1].text_content().strip()) + else: + pages = 1 + s = lambda k: self.re.search(r'%s\s*=[\s"]*(.+?)[\s"]*;' % k, content).group(1) + key = parser.cssselect('#dm5_key')[0].get('value') + cid = s(r'\bDM5_CID') + mid = s(r'\bDM5_MID') + sign = s(r'\bDM5_VIEWSIGN') + sign_dt = quote_plus(s(r'\bDM5_VIEWSIGN_DT')) + chapter_idx = self.re.search(r'/(m\d+)', self.chapter[0]).group(1) + url = '{}/{}/chapterfun.ashx?cid={}&page={}&key={}&language=1>k=6&_cid={}&_mid={}&_dt={}&_sign={}' + items = [] + for page in range(pages): + data = self.http_get(url.format( + self.domain, chapter_idx, + cid, page + 1, key, cid, + mid, sign_dt, sign, + ), headers=self._get_headers()) + item_url = execjs.eval(data) + if item_url: + items += item_url + return items + + def save_file(self, idx=None, callback=None, url=None, in_arc_name=None): + self._storage['referer'] = self.chapter[0] + _path, idx, _url = self._save_file_params_helper(url, idx) + + if not is_file(_path): + self.http(True).download_file(_url, _path, idx) + callable(callback) and callback() + self.after_file_save(_path, idx) + self._archive.lazy_add(_path) + return _path + + @staticmethod + def _get_headers(): + return {'Cache-mode': 'no-cache', 'X-Requested-With': 'XMLHttpRequest'} + + def get_cover(self) -> str: + return self._cover_from_content('.banner_detail_form .cover > img') + + def book_meta(self) -> dict: + rating = self.text_content(self.content, '.right .score', 0) + rating = self.re.search(r'(\d\d?\.\d)', rating).group(1) + author = self.text_content(self.content, '.banner_detail_form .info .subtitle a') + anno = self.text_content(self.content, '.banner_detail_form .info .content') + return { + 'author': author, + 'title': self.get_manga_name(), + 'annotation': anno, + 'keywords': str, + 'cover': self.get_cover(), + 'rating': rating, + } + + def chapter_for_json(self): + return self.chapter[0] + + +main = Dm5Com diff --git a/manga-py-stable_1.x/manga_py/providers/doujins_com.py b/manga-py-stable_1.x/manga_py/providers/doujins_com.py new file mode 100644 index 0000000..7c8d389 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/doujins_com.py @@ -0,0 +1,38 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class DoujinsCom(Provider, Std): + img_selector = '#image-container img.doujin' + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + return self._get_content('{}/gallery/{}') + + def get_manga_name(self) -> str: + return self._get_name('/gallery/([^/]+)') + + def get_chapters(self): + return [b''] + + def get_files(self): + items = self.document_fromstring(self.content, self.img_selector) + return [i.get('data-file').replace('&', '&') for i in items] + + def get_cover(self) -> str: + return self._cover_from_content(self.img_selector) + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = DoujinsCom diff --git a/manga-py-stable_1.x/manga_py/providers/e_hentai_org.py b/manga-py-stable_1.x/manga_py/providers/e_hentai_org.py new file mode 100644 index 0000000..8aab2e1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/e_hentai_org.py @@ -0,0 +1,67 @@ +from lxml.html import HtmlElement + +from manga_py.provider import Provider +from .helpers import e_hentai_org +from .helpers.std import Std +from time import sleep + + +class EHentaiOrg(Provider, Std): + helper = None + + def save_file(self, idx=None, callback=None, url=None, in_arc_name=None): + _url = None + if isinstance(url, HtmlElement): + _url = self.helper.get_image(url) + else: + _url = url + return super().save_file(idx=idx, callback=callback, url=_url, in_arc_name=in_arc_name) + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + return self.http_get(self.helper.get_url()) + + def get_manga_name(self) -> str: + return self._get_name('/g/([^/]+/[^/?]+)').replace('/', '-') + + def prepare_cookies(self): + self.helper = e_hentai_org.EHentaiOrg(self) + self.http().cookies['nw'] = "1" # issue #178 + self.http().cookies['nm'] = "1" # issue #178 + + def get_chapters(self): + parser = self.document_fromstring(self.content) + max_idx = self.helper.get_pages_count(parser) + self.log('Please, wait...\n') + return list(range(max_idx, -1, -1)) + + def get_files(self): + url = self.helper.get_url() + '?p=' + selector = '#gdt div[class^=gdt] a' + idx = self.chapter + if idx == 0: + content = self.content + else: + content = self.http_get('{}{}'.format(url, idx)) + pages = self.document_fromstring(content, selector) + + n = self.http().normalize_uri + f = self.document_fromstring + + images = [] + for page in pages: + _url = n(page.get('href')) + images.append(n(f(self.http_get(_url), '#img', 0).get('src'))) + sleep(.1) + return images + + def get_cover(self) -> str: + return self._cover_from_content('#gd1 > div') + + def chapter_for_json(self): + return self.get_url() + + +main = EHentaiOrg diff --git a/manga-py-stable_1.x/manga_py/providers/exhentai_org.py b/manga-py-stable_1.x/manga_py/providers/exhentai_org.py new file mode 100644 index 0000000..44dcabd --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/exhentai_org.py @@ -0,0 +1,82 @@ +from sys import exit +from time import sleep + +from manga_py.fs import get_util_home_path, path_join, is_file, unlink +from .e_hentai_org import EHentaiOrg + + +from lxml.html import HtmlElement + + +class ExHentaiOrg(EHentaiOrg): + __uri = 'https://forums.e-hentai.org/index.php?act=Login&CODE={}' + cookie_file = None + + def prepare_cookies(self): + super().prepare_cookies() + + self.cookie_file = path_join(get_util_home_path(), 'cookies_exhentai.dat') + if is_file(self.cookie_file): + with open(self.cookie_file, 'r') as r: + self._storage['cookies'] = self.json.loads(r.read()) + self.http().cookies = self._storage['cookies'].copy() + else: + action, method, form_data = self.prepare_form() + content = self.http().requests(action, data=form_data, method=method.lower()) + if not ~content.text.find('You are now logged in as:'): + self.log('Wrong password?') + sleep(.1) + exit() + else: + with open(self.cookie_file, 'w') as w: + w.write(self.json.dumps(self._storage['cookies'])) + + sleep(5) + + if not self.check_panda(): + self.log('Panda detected. Please, try again') + exit(1) + + def prepare_form(self): + # Login on e-hentai! + name = self.quest([], 'Request login on e-hentai.org') + password = self.quest_password('Request password on e-hentai.org\n') + + selectors = [ + 'input[type="hidden"]', + 'input[checked]', + 'input[type="submit"]', + ] + + form_data = { + 'UserName': name, + 'PassWord': password, + } + prepare = self.http_get(self.__uri.format('00')) + parser = self.document_fromstring(prepare, 'form[name="LOGIN"]')[0] # type: HtmlElement + action = parser.get('action', self.__uri.format('01')) + method = parser.get('method', 'get') + for i in parser.cssselect(','.join(selectors)): # type: HtmlElement + form_data[i.get('name')] = i.get('value') + + return action, method, form_data + + def check_panda(self): + success = True + req = self.http().requests('https://exhentai.org/', method='head') + if ~req.headers['Content-Type'].find('image/'): + """ + if authorization was not successful + """ + self.log('Sad panda detected') + # self.log('Cookies:\n') + # self.log(self.http().cookies, '\n') + self.http().cookies = {} + unlink(self.cookie_file) + success = False + req.close() + + return success + + +main = ExHentaiOrg diff --git a/manga-py-stable_1.x/manga_py/providers/fanfox_net.py b/manga-py-stable_1.x/manga_py/providers/fanfox_net.py new file mode 100644 index 0000000..6da9fdd --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/fanfox_net.py @@ -0,0 +1,99 @@ +from manga_py.crypt.base_lib import BaseLib +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaFoxMe(Provider, Std): + + def get_archive_name(self) -> str: + groups = self._ch_parser() + ch = groups[1].replace('.', '-') + vol = ['0'] + if groups[0]: + vol = [groups[0]] + return self.normal_arc_name({'vol': vol, 'ch': ch}) + + def _ch_parser(self): + selector = r'/manga/[^/]+/(?:v([^/]+)/)?c([^/]+)/' + groups = self.re.search(selector, self.chapter).groups() + return groups + + def get_chapter_index(self) -> str: + groups = self._ch_parser() + idx = groups[1].replace('.', '-') + if not ~idx.find('-'): + idx = idx + '-0' + if groups[0]: + return '{}-{}'.format(idx, groups[0]) + return idx + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)/?') + + def get_chapters(self): + return self._elements('[id^="list-"] a[href]') + + def _get_links(self, content): + js = self.re.search(r'eval\((function\b.+)\((\'[\w ].+)\)\)', content).groups() + return BaseLib.exec_js('m = ' + js[0], 'm(' + js[1] + ')') + + def _one_link_helper(self, content, page): + cid = self.re.search(r'chapterid\s*=\s*(\d+)', content).group(1) + base_url = self.chapter[0:self.chapter.rfind('/')] + links = self._get_links(content) + key = ''.join(self.re.findall(r'\'(\w)\'', links)) + return self.http_get('{}/chapterfun.ashx?cid={}&page={}&key={}'.format( + base_url, + cid, + page, + key + )) + + def _parse_links(self, data): + base_path = self.re.search(r'pix="(.+?)"', data).group(1) + images = self.re.findall(r'"(/\w.+?)"', data) + return [base_path + i for i in images] + + def _get_links_page_to_page(self, content): + last_page = self.document_fromstring(content, '.pager-list-left > span > a:nth-last-child(2)', 0) + links = [] + for i in range(0, int(int(last_page.get('data-page')) / 2 + .5)): + data = self._one_link_helper(content, (i * 2) + 1) + links += self._parse_links(self._get_links(data)) + return links + + def get_files(self): + content = self.http_get(self.chapter) + links = self._get_links(content) + + n = self.http().normalize_uri + + if ~links.find('key='): + # chapters data example: http://fanfox.net/manga/the_hero_is_overpowered_but_overly_cautious/c001/chapterfun.ashx?cid=567602&page=6&key=6b5367d728d445a8 + return self._get_links_page_to_page(content) + + if ~links.find('token='): + links_array = self.re.search(r'(\[.+?\])', links) + links_array = links_array.group(1).replace('\'', '"') + links_data = self.json.loads(links_array) + return [n(i) for i in links_data] + + data = self.re.search(r'\w=(\[.+\])', links).group(1) + data = self.json.loads(data.replace("'", '"')) + return [n(i) for i in data] + + def get_cover(self): + return self._cover_from_content('img.detail-info-cover-img') + + def book_meta(self) -> dict: + # todo meta + pass + + def prepare_cookies(self): + self.http().cookies['isAdult'] = '1' + + +main = MangaFoxMe diff --git a/manga-py-stable_1.x/manga_py/providers/freeadultcomix_com.py b/manga-py-stable_1.x/manga_py/providers/freeadultcomix_com.py new file mode 100644 index 0000000..aeb5558 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/freeadultcomix_com.py @@ -0,0 +1,50 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class FreeAdultComixCom(Provider, Std): + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + return [b''] + + def _external_images(self): # https://freeadultcomix.com/star-vs-the-forces-of-sex-iii-croc/ + links = self._elements('.single-post p > a[target="_blank"] > img') + items = [] + re = self.re.compile(r'(.+/)th(/.+)') + for i in links: + g = re.search(i.get('src')).groups() + items.append('{}/i/{}/0.jpg'.format(*g)) + return items + + def get_files(self): + images = self._elements('.single-post p > img[class*="wp-image-"]') + if not len(images): + items = self._external_images() + else: + items = [i.get('src') for i in images] + return items + + def get_cover(self) -> str: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = FreeAdultComixCom diff --git a/manga-py-stable_1.x/manga_py/providers/freemanga_to.py b/manga-py-stable_1.x/manga_py/providers/freemanga_to.py new file mode 100644 index 0000000..ae6a087 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/freemanga_to.py @@ -0,0 +1,38 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class FreeMangaTo(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.compile(r'[Cc]hapter\s(\d+(?:\.\d+)?)') + chapter = re.search(self.chapter[0]).group(1) + return chapter.replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/(?:manga|chapter)/([^/]+)') + + def get_chapters(self): + items = self._elements('.readIcon a') + n = self.http().normalize_uri + return [(i.text_content(), n(i.get('href'))) for i in items] + + def get_files(self): + content = self.http_get(self.chapter[1]) + images = self.re.search(r'image:\s*(\[.+\])', content) + return self.json.loads(images.group(1)) + + def get_cover(self) -> str: + return self._cover_from_content('.tooltips > img') + + def book_meta(self) -> dict: + pass + + def chapter_for_json(self) -> str: + return self.chapter[1] + + +main = FreeMangaTo diff --git a/manga-py-stable_1.x/manga_py/providers/funmanga_com.py b/manga-py-stable_1.x/manga_py/providers/funmanga_com.py new file mode 100644 index 0000000..181e52e --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/funmanga_com.py @@ -0,0 +1,35 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class FunMangaCom(Provider, Std): + + def _get_chapter_idx(self): + re = self.re.compile(r'\.com/[^/]+/([^/]+)') + return re.search(self.chapter).group(1) + + def get_chapter_index(self) -> str: + return self._get_chapter_idx().replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + items = self._elements('.chapter-list li > a') + return [i.get('href') + '/all-pages' for i in items] + + def get_files(self): + items = self.html_fromstring(self.chapter, '.content-inner > img.img-responsive') + return [i.get('src') for i in items] + + def get_cover(self): + return self._cover_from_content('img.img-responsive.mobile-img') + + def prepare_cookies(self): + self.cf_protect(self.get_url()) + + +main = FunMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/gmanga_me.py b/manga-py-stable_1.x/manga_py/providers/gmanga_me.py new file mode 100644 index 0000000..b060dbc --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/gmanga_me.py @@ -0,0 +1,23 @@ +from .gomanga_co import GoMangaCo + + +class GMangaMe(GoMangaCo): + _name_re = '/mangas/([^/]+)' + _content_str = '{}/mangas/{}' + _chapters_selector = 'a.chapter-link' + + def get_chapter_index(self) -> str: + selector = r'/mangas/[^/]+/(\d+/[^/]+)' + idx = self.re.search(selector, self.chapter).group(1) + return idx.replace('/', '-') + + def _get_json_selector(self, content): + return r'1:\salphanumSort\((\[.+\])\)' + + def get_cover(self) -> str: + image = self.re.search(r'"image"\s?:\s?"(.+)",', self.content) + if image: + return image.group(1) + + +main = GMangaMe diff --git a/manga-py-stable_1.x/manga_py/providers/gomanga_co.py b/manga-py-stable_1.x/manga_py/providers/gomanga_co.py new file mode 100644 index 0000000..f4189b0 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/gomanga_co.py @@ -0,0 +1,56 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class GoMangaCo(Provider, Std): + _name_re = '/reader/[^/]+/([^/]+)/' + _content_str = '{}/reader/series/{}/' + _chapters_selector = '.list .element .title a' + _chapter_re = r'/rea\w+/[^/]+/[^/]+/(?:[^/]+/)?(\d+/\d+(?:/\d+)?)' + + _go_chapter_content = '' + + def get_chapter_index(self) -> str: + group = self.re.search(self._chapter_re, self.chapter).group(1) + return group.replace('/', '-') + + def get_main_content(self): + return self._get_content(self._content_str) + + def get_manga_name(self) -> str: + return self._get_name(self._name_re) + + def get_chapters(self): + return self._elements(self._chapters_selector) + + def _get_json_selector(self, content): + idx = self.re.search(r'page_width\s=\sparseInt\((\w+)\[', content).group(1) + return r'var\s{}\s*=\s*(\[.+\])'.format(idx) + + def get_files(self): + self._go_chapter_content = self.http_get(self.chapter) + selector = self._get_json_selector(self._go_chapter_content) + items = self.json.loads(self.re.search(selector, self._go_chapter_content).group(1)) + return [i.get('url') for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('.cover img') + + def prepare_cookies(self): + url = self.get_url() + self.cf_protect(url) + data = {'adult': 'true'} + try: + response = self.http().requests(method='post', data=data, url=url) + cookies = response.cookies.items() + for i in cookies: + self._storage['cookies'][i[0]] = i[1] + except Exception: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + +main = GoMangaCo diff --git a/manga-py-stable_1.x/manga_py/providers/goodmanga_net.py b/manga-py-stable_1.x/manga_py/providers/goodmanga_net.py new file mode 100644 index 0000000..f2b14c5 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/goodmanga_net.py @@ -0,0 +1,53 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class GoodMangaNet(Provider, Std): + + def get_chapter_index(self) -> str: + return self.re.search(r'/chapter/(\d+)', self.chapter).group(1) + + def get_main_content(self): + url = self.get_url() + if ~url.find('/chapter/'): + url = self.html_fromstring(url, '#manga_head h3 > a', 0).get('href') + _id = self.re.search(r'net/(\d+/[^/]+)', url).group(1) + return self.http_get('{}/{}'.format(self.domain, _id)) + + def get_manga_name(self) -> str: + url = self.get_url() + reg = r'/([^/]+)/chapter/|net/\d+/([^/]+)' + groups = self.re.search(reg, url).groups() + return groups[0] if groups[0] else groups[1] + + @staticmethod + def get_chapters_links(parser): + return [i.get('href') for i in parser.cssselect('#chapters li > a')] + + def get_chapters(self): + selector = '#chapters li > a' + chapters = self._elements(selector) + pagination = self._elements('.pagination li > button[href]') + for i in pagination: + chapters += self._elements(selector, self.http_get(i.get('href'))) + return chapters + + def get_files(self): + img_selector = '#manga_viewer > a > img' + parser = self.html_fromstring(self.chapter) + images = self._images_helper(parser, img_selector) + pages = self._first_select_options(parser, '#asset_2 select.page_select', True) + for i in pages: + _parser = self.html_fromstring(i.get('value')) + images += self._images_helper(_parser, img_selector) + return images + + def get_cover(self): + pass # TODO + + def book_meta(self) -> dict: + # todo meta + pass + + +main = GoodMangaNet diff --git a/manga-py-stable_1.x/manga_py/providers/hakihome_com.py b/manga-py-stable_1.x/manga_py/providers/hakihome_com.py new file mode 100644 index 0000000..61d4d35 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hakihome_com.py @@ -0,0 +1,47 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class HakiHomeCom(Provider, Std): + + def get_chapter_index(self) -> str: + selector = '.+/([^/]+)/' + idx = self.re.search(selector, self.chapter) + return idx.group(1) + + def get_main_content(self): + selector = r'(https?://[^/]+/[^/]+/[^/]+-\d+/)' + url = self.re.search(selector, self.get_url()) + return self.http_get(url.group(1)) + + def get_manga_name(self) -> str: + url = self.get_url() + selector = r'\.com/[^/]+/(.+?)-\d+/' + return self.re.search(selector, url).group(1) + + def get_chapters(self): + return self._elements('.listing a.readchap') + + def get_files(self): + img_selector = '#con img' + n = self.http().normalize_uri + uri = n(self.chapter) + parser = self.html_fromstring(uri, '#contentchap', 0) + pages = self._first_select_options(parser, '#botn span > select[onchange]') + images = self._images_helper(parser, img_selector) + + for i in pages: + parser = self.html_fromstring(n(i.get('value')), '#contentchap', 0) + images += self._images_helper(parser, img_selector) + + return images + + def get_cover(self) -> str: + return self._cover_from_content('.noidung img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = HakiHomeCom diff --git a/manga-py-stable_1.x/manga_py/providers/hatigarmscans_eu.py b/manga-py-stable_1.x/manga_py/providers/hatigarmscans_eu.py new file mode 100644 index 0000000..c4b0da1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hatigarmscans_eu.py @@ -0,0 +1,23 @@ +from .gomanga_co import GoMangaCo + + +class HatigarmScansEu(GoMangaCo): + _name_re = '/manga/([^/]+)' + _content_str = '{}/manga/{}/' + _chapters_selector = '.chapters [class^="chapter-title"] a' + + def get_chapter_index(self) -> str: + url = self.chapter + index_re = r'/manga/[^/]+/(?:chapter-)?(\d+(?:\.\d+)?)' + group = self.re.search(index_re, url).group(1) + return group.replace('.', '-') + + def prepare_cookies(self): + pass + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '#all .img-responsive', 'data-src') + + +main = HatigarmScansEu diff --git a/manga-py-stable_1.x/manga_py/providers/heavenmanga_biz.py b/manga-py-stable_1.x/manga_py/providers/heavenmanga_biz.py new file mode 100644 index 0000000..4e98637 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/heavenmanga_biz.py @@ -0,0 +1,48 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class HeavenMangaBiz(Provider, Std): + + def get_chapter_index(self) -> str: + try: + return self.re.search(r'-chap-(\d+(?:-\d+)?)', self.chapter).group(1) + except Exception as e: + if self.re.search(r'-chap$', self.chapter): + return '0' + raise e + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + s = self.domain[self.domain.rfind('.'):] + selector = r'\%s/([^/]+)' + if ~self.get_url().find('-chap-'): + selector += '-chap-' + return self._get_name(selector % s) + + def get_chapters(self): + selector = '.chapters-wrapper h2.chap > a' + pages = self._elements('a.next.page-numbers') + items = self._elements(selector) + if pages: + pages = self.re.search(r'/page-(\d+)', pages[-1].get('href')).group(1) + for i in range(1, int(pages)): + url = '{}/{}/page-{}'.format(self.domain, self.manga_name, i + 1) + items += self._elements(selector, self.http_get(url)) + return items + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.chapter-content img') + + def get_cover(self) -> str: + return self._cover_from_content('.comic-info .thumb > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = HeavenMangaBiz diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/__init__.py b/manga-py-stable_1.x/manga_py/providers/helpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/_http2.py b/manga-py-stable_1.x/manga_py/providers/helpers/_http2.py new file mode 100644 index 0000000..0f8dfbd --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helpers/_http2.py @@ -0,0 +1,72 @@ +class Http2: + provider = None + path_join = None + is_file = None + chapters = None + chapters_count = 0 + + def __init__(self, provider): + from manga_py.fs import path_join, is_file + self.provider = provider + self.path_join = path_join + self.is_file = is_file + + def _get_name(self, idx): + return self.path_join( + self.provider._params.get('destination'), + self.provider._storage['manga_name'], + '{:0>3}-{}.{}'.format( + idx, self.provider.get_archive_name(), + self.provider._archive_type() + ) + ) + + def __download(self, idx, name, url): + _min, _max = self._min_max_calculate() + self.provider._info.add_volume( + self.provider.chapter, + self.provider.get_archive_path() + ) + + self.provider.progress(self.chapters_count, idx) + + if idx < _min or (idx >= _max > 0) or self.is_file(name): + return False + + if not self.provider._simulate: + try: + self.provider.http().download_file(url, name, idx) + except Exception as e: + self.provider._info.set_last_volume_error(e) + + def _min_max_calculate(self): + _min = self.provider._params.get('skip_volumes', 0) + _max = self.provider._params.get('max_volumes', 0) + self.chapters_count = len(self.chapters) + if _max > 0 or _min > 0: + if _max < self.chapters_count: + _max = self.chapters_count - _max + else: + _max = 0 + self.chapters_count = self.chapters_count - _min - _max + if _max > 0 and _min > 0: + _max += _min - 1 + return _min, _max + + def download_archives(self, chapters=None): + if chapters is None: + chapters = self.provider._storage['chapters'] + self.chapters = chapters + for idx, url in enumerate(chapters): + self.provider.before_download_chapter() + self.provider._storage['current_chapter'] = idx + name = self._get_name(idx) + idx, url, name = self.before_download(idx, url, name) + self.__download(idx, name, url) + self.after_download(idx, name) + + def before_download(self, idx, url, _path): + return idx, url, _path + + def after_download(self, idx, _path): + pass diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/animextremist_com.py b/manga-py-stable_1.x/manga_py/providers/helpers/animextremist_com.py new file mode 100644 index 0000000..34c6e17 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helpers/animextremist_com.py @@ -0,0 +1,81 @@ +from manga_py.provider import Provider + + +class AnimeXtremistCom: + provider = None + path = None + + def __init__(self, provider: Provider): + self.provider = provider + self.path = provider.get_url() + + @staticmethod + def build_path(item): + return item[0] + item[1] + + @staticmethod + def __sort(item, selector): + _re = selector.search(item) + if _re: + return int(_re.group(1)) + return 0 + + def sort_items(self, items): + r = self.provider.re.compile(r'.+?-(\d+)') + return sorted(items, key=lambda i: self.__sort(i[0], r)) + + def sort_images(self, items): + r = self.provider.re.compile(r'.+/.+-(\d+)[^/]*\.html') + return sorted(items, key=lambda i: self.__sort(i, r)) + + def _chapters(self, url=None): + a = 'li + li > a' + if url: + items = self.provider.html_fromstring(url, a) + else: + items = self.provider.document_fromstring(self.provider.content, a) + return items + + # http://animextremist.com/mangas-online/99love/ + def _chapters_with_dirs(self, items): + result = [] + for i in items: + href = i.get('href') + url = '{}{}'.format(self.path, href) + result += [(href, ['{}{}'.format( + url, + a.get('href') + ) for a in self._chapters(url)])] + return result + + @staticmethod + def _rebuild_dict_to_tuple(_dict): + result = [] + for i in _dict: + result += [(i, [a for a in _dict[i]])] + return result + + # http://animextremist.com/mangas-online/onepiece-manga/ + def _chapters_without_dirs(self, items): + result = {} + r = self.provider.re.compile(r'(.+?-\d+)') # todo + for i in items: + href = i.get('href') + key = self.provider.re.search(r, href).group(1) + if result.get(key) is None: + result[key] = [] + result[key].append('{}{}'.format(self.path, href)) + return self._rebuild_dict_to_tuple(result) + + def get_chapters(self): + items = self._chapters() + if len(items) and items[0].get('href').find('.html') < 0: + items = self._chapters_with_dirs(items) + else: + items = self._chapters_without_dirs(items) + return self.sort_items(items) + + def get_page_image(self, src, selector, attr='src') -> str: + image = self.provider.html_fromstring(src, selector) + if image and len(image): + return image[0].get(attr) diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/e_hentai_org.py b/manga-py-stable_1.x/manga_py/providers/helpers/e_hentai_org.py new file mode 100644 index 0000000..5689b83 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helpers/e_hentai_org.py @@ -0,0 +1,30 @@ +from manga_py.provider import Provider + + +class EHentaiOrg: + provider = None + + def __init__(self, provider: Provider): + self.provider = provider + + def get_pages_count(self, parser): + selector = '.gtb table.ptt td[onclick] > a' + paginate = parser.cssselect(selector) + max_idx = 0 + for i in paginate: + idx = self.provider.re.search(r'\?p=(\d+)', i.get('href')) + max_idx = max(max_idx, int(idx.group(1))) + return max_idx + + def get_image(self, i): + url = i.get('href') + src = self.provider.html_fromstring(url, 'img#img', 0) + return src.get('src') + + def get_url(self): + url = self.provider.get_url() + if ~url.find('?'): + url = url[:url.find('?')] + if ~url.find('#'): + url = url[:url.find('#')] + return url diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/eight_muses_com.py b/manga-py-stable_1.x/manga_py/providers/helpers/eight_muses_com.py new file mode 100644 index 0000000..88b3505 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helpers/eight_muses_com.py @@ -0,0 +1,26 @@ +from manga_py.provider import Provider + + +class EightMusesCom: + provider = None + + def __init__(self, provider: Provider): + self.provider = provider + self._n = provider.http().normalize_uri + + def is_images_page(self, parser) -> bool: + if not parser: + return False + return self.provider.re.search(r'/\d+$', parser[0].get('href')) is not None + + def parser(self, url, selector): + return self.provider.html_fromstring(self._n(url), selector) + + def chapters(self, parser) -> list: + if self.is_images_page(parser): + return [parser] + items = [] + selector = self.provider.chapter_selector + for i in parser: + items += self.chapters(self.parser(i.get('href'), selector)) + return items diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/jav_zip_org.py b/manga-py-stable_1.x/manga_py/providers/helpers/jav_zip_org.py new file mode 100644 index 0000000..0e21750 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helpers/jav_zip_org.py @@ -0,0 +1,50 @@ +from urllib.parse import urlparse + +from manga_py.http.url_normalizer import normalize_uri +from manga_py.provider import Provider + + +class JavZipOrg: + parser = None + url = None + domain = None + + def __init__(self, parser: Provider): + self.parser = parser + url = parser.chapter + if parser.re.search(r'jav-zip\.org', url): + self.url = url + _ = urlparse(url) + self.domain = _.scheme + '://' + _.netloc + + def _parse_id(self): + return self.parser.re.search('/.p=(\d+)', self.url).group(1) + + def parse_images(self, content): + images = [] + for i in content.cssselect('img'): + src = normalize_uri(i.get('src'), self.url) + images.append(src) + return images + + def get(self, step): + url = '{}/wp-admin/admin-ajax.php?post={}&action=get_content&step={}' + url = url.format(self.domain, self._parse_id, step) + content = self.parser.json.loads(self.parser.http_get(url)) + content = self.parser.document_fromstring(content['mes']) + allow_more = True + if len(content.cssselect('a.view-more')) < 1: + allow_more = False + return allow_more, content + + def get_images(self): + if not self.url: + return [] + images = [] + step = 0 + allow_more = True + while allow_more: + allow_more, content = self.get(step) + step += 50 # constant + images += self.parse_images(content) + return images diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/nine_manga.py b/manga-py-stable_1.x/manga_py/providers/helpers/nine_manga.py new file mode 100644 index 0000000..66e2899 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helpers/nine_manga.py @@ -0,0 +1,53 @@ +from abc import ABCMeta +from time import sleep +from urllib.parse import unquote + +from requests import get + +from manga_py.provider import Provider + + +class NineHelper(Provider, metaclass=ABCMeta): + img_server = 'https://ta1.taadd.com' + + def re_name(self, url): + return self.re.search(r'/manga/(.+)\.html', url) + + @staticmethod + def normalize_name(name, normalize): + if normalize: + name = unquote(name) + return name + + def parse_img_uri(self, url): + return self.re.search('://[^/]+/(.+)', url).group(1) + + def get_img_server(self, content): + server = self.re.search(r'img_url\s?=\s?"([^"]+)', content) + if server: + return server.group(1) + return self.img_server + + def get_files_on_page(self, content): + result = self.document_fromstring(content, 'em a.pic_download') + if not result: + return [] + images = [] + pic_url = self.get_img_server(content) + for i in result: + src = self.parse_img_uri(i.get('href')) + images.append('{}/{}'.format(pic_url, src)) + return images + + def _get_page_content(self, url): + sleep(.6) + return get( + url, + headers={ + 'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3', + 'Referer': '', + } # fix guard + ).text + + def prepare_cookies(self): + self._storage['cookies'].setdefault('__cfduid', '1a2b3c4d5e') diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/std.py b/manga-py-stable_1.x/manga_py/providers/helpers/std.py new file mode 100644 index 0000000..daac3e3 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helpers/std.py @@ -0,0 +1,150 @@ +from requests import get +from time import sleep + + +class Std: + _vol_fill = False + + def get_archive_name(self) -> str: + idx = self.get_chapter_index() + self._vol_fill = True + return self.normal_arc_name({'vol': idx.split('-')}) + + def _elements(self, selector, content=None) -> list: + if not content: + content = self.content + return self.document_fromstring(content, selector) + + def _cover_from_content(self, selector, attr='src') -> str: + image = self._elements(selector) + if image is not None and len(image): + return self.http().normalize_uri(image[0].get(attr)) + + @staticmethod + def _first_select_options(parser, selector, skip_first=True) -> list: + options = 'option' + if skip_first: + options = 'option + option' + select = parser.cssselect(selector) + if select: + return select[0].cssselect(options) + return [] + + @classmethod + def _images_helper(cls, parser, selector, attr='src') -> list: + image = parser.cssselect(selector) + return [i.get(attr).strip(' \r\n\t\0') for i in image] + + @classmethod + def _idx_to_x2(cls, idx, default=0) -> list: + return [ + str(idx[0]), + str(default if len(idx) < 2 or not idx[1] else idx[1]) + ] + + @staticmethod + def _join_groups(idx, glue='-') -> str: + result = [] + for i in idx: + if i: + result.append(i) + return glue.join(result) + + def _get_name(self, selector, url=None) -> str: + if url is None: + url = self.get_url() + return self.re.search(selector, url).group(1) + + def _get_content(self, tpl) -> str: + return self.http_get(tpl.format(self.domain, self.manga_name)) + + def _base_cookies(self, url=None): + if url is None: + url = self.get_url() + cookies = self.http().get_base_cookies(url) + self._storage['cookies'] = cookies.get_dict() + + def parse_background(self, image) -> str: + selector = r'background.+?url\([\'"]?([^\s]+?)[\'"]?\)' + url = self.re.search(selector, image.get('style')) + return self.http().normalize_uri(url.group(1)) + + @property + def manga_name(self) -> str: + name = self._storage.get('manga_name', None) + if name is None: + name = self.get_manga_name() + return name + + def normal_arc_name(self, idx): + if isinstance(idx, str): + idx = [idx] + if isinstance(idx, list): + self._vol_fill = True + return self.__normal_name_list(idx) + if isinstance(idx, dict): + return self.__normal_name_dict(idx) + raise DeprecationWarning('Wrong arc name type: %s' % type(idx)) + + @staticmethod + def __fill(var, fmt: str = '-{}'): + if isinstance(var, str): + var = [var] + return (fmt * len(var)).format(*var).lstrip('-') + + def __normal_name_list(self, idx: list): + fmt = 'vol_{:0>3}' + if len(idx) > 1: + fmt += '-{}' * (len(idx) - 1) + elif self._vol_fill and self._zero_fill: + idx.append('0') + fmt += '-{}' + return fmt.format(*idx) + + def __normal_name_dict(self, idx: dict): + vol = idx.get('vol', None) + ch = idx.get('ch', None) + result = '' + if vol: + if isinstance(vol, str): + vol = [vol] + result = self.__normal_name_list(vol) + if ch: + result += '-ch_' + self.__fill(ch) + + if self._with_manga_name: + name = self._params.get('name', '') + if not len(name): + name = self.manga_name + + result = '%s-%s' % (name, result) + + return result + + def text_content(self, content, selector, idx: int = 0, strip: bool = True): + doc = self.document_fromstring(content, selector) + if not doc: + return None + text = doc[idx].text_content() + if strip: + text = text.strip() + return text + + def _download(self, file_name, url, method): + # clean file downloader + now_try_count = 0 + while now_try_count < 5: + with open(file_name, 'wb') as out_file: + now_try_count += 1 + response = get(url, timeout=60, allow_redirects=True) + if response.status_code >= 400: + self.http().debug and self.log('ERROR! Code {}\nUrl: {}'.format( + response.status_code, + url, + )) + sleep(2) + continue + out_file.write(response.content) + response.close() + out_file.close() + break diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/tapas_io.py b/manga-py-stable_1.x/manga_py/providers/helpers/tapas_io.py new file mode 100644 index 0000000..6483b96 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helpers/tapas_io.py @@ -0,0 +1,41 @@ +from manga_py.meta import __downloader_uri__ +from manga_py.provider import Provider + + +class TapasIo: + provider = None + + def __init__(self, provider: Provider): + self.provider = provider + + def _content(self, content): + type = content.get('type', None) + if type == 'DEFAULT': + return self._type_default(content) + + def _error(self, content): + self.provider.log('\r\nERROR!\r\nCode: {}\r\nType: {}\r\nPlease, send url to developer ({})'.format( + content['code'], + content['type'], + __downloader_uri__ + )) + + def _type_default(self, content): + items = self.provider.document_fromstring(content.get('data', {}).get('html', ''), '.art-image') + return [i.get('src') for i in items] + + def chapter_url(self): + return '{}/episode/view/{}'.format( + self.provider.domain, + self.provider.chapter['id'] + ) + + def parse_chapter_content(self): + content = self.provider.json.loads(self.provider.http_get(self.chapter_url())) + if content['code'] != 200: + self._error(content) + return [] + _content = self._content(content) + if _content is None: + self._error(content) + return _content diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/tonarinoyj_jp.py b/manga-py-stable_1.x/manga_py/providers/helpers/tonarinoyj_jp.py new file mode 100644 index 0000000..0765a64 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helpers/tonarinoyj_jp.py @@ -0,0 +1,53 @@ +from manga_py.crypt import Puzzle +from manga_py.fs import get_temp_path, rename +from manga_py.provider import Provider + + +class TonariNoYjJp: + provider = None + div_num = 4 + multiply = 8 + matrix = None + temp_path = None + + def __init__(self, provider: Provider): + self.provider = provider + self.temp_path = get_temp_path('__image_matrix{}.png') + matrix = {} + for i in range(self.div_num * self.div_num): + matrix[i] = (i % self.div_num) * self.div_num + int(i / self.div_num) + self.matrix = matrix + + def _chapter_api_content(self, idx) -> dict: + api = '{}/api/viewer/readable_products?current_readable_product_id={}&' \ + 'number_since=99&number_until=-1&read_more_num=100&type=episode' + content = self.provider.http_get(api.format(self.provider.domain, idx)) + if content[0] == '{': + return self.provider.json.loads(content) + return {} + + def _check_need_next_chapter(self, next_url): + if next_url: + test = self.provider.re.search('number_since=(\d+)', next_url).group(1) + if int(test) > 1: + return True + return False + + def get_chapters(self, idx) -> list: + content = self._chapter_api_content(idx) + items = self.provider.document_fromstring(content.get('html', ''), '.series-episode-list-thumb') + need_more = self._check_need_next_chapter(content.get('nextUrl', None)) + if need_more: + items += self.get_chapters(content.get('nextUrl')) + re = self.provider.re.compile(r'/episode-thumbnail/(\d+)') + return [re.search(i.get('src')).group(1) for i in items] + + def solve_image(self, path, idx): + try: + solver = Puzzle(self.div_num, self.div_num, self.matrix, self.multiply) + solver.need_copy_orig = True + _ = self.temp_path.format(idx) + solver.de_scramble(path, _) + rename(_, path) + except Exception: + pass diff --git a/manga-py-stable_1.x/manga_py/providers/helpers/tsumino_com.py b/manga-py-stable_1.x/manga_py/providers/helpers/tsumino_com.py new file mode 100644 index 0000000..4fb72f4 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helpers/tsumino_com.py @@ -0,0 +1,29 @@ +from requests import Session + +from manga_py.base_classes import WebDriver +from manga_py.provider import Provider + + +class TsuminoCom: + provider = None + + def __init__(self, provider: Provider): + self.provider = provider + + def get_cookies(self, url): + web_driver = WebDriver() + driver = web_driver.get_driver() + driver.get(url) + iframe = driver.find_element_by_css_selector(".g-recaptcha iframe") + src = self.provider.http_get(iframe.get_attribute('src')) + driver.close() + + g_token = self.provider.html_fromstring(src).cssselect('#recaptcha-token') + session = Session() + h = session.post('{}/Read/AuthProcess'.format(self.provider.domain), data={ + 'g-recaptcha-response': g_token[0].get('value'), + 'Id': 1, + 'Page': 1, + }) + session.close() + return h.cookies diff --git a/manga-py-stable_1.x/manga_py/providers/helveticascans_com.py b/manga-py-stable_1.x/manga_py/providers/helveticascans_com.py new file mode 100644 index 0000000..2eb3e6e --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/helveticascans_com.py @@ -0,0 +1,9 @@ +from .gomanga_co import GoMangaCo + + +class HelveticaScansCom(GoMangaCo): + _name_re = '/r/[^/]+/([^/]+)/' + _content_str = '{}/r/series/{}/' + + +main = HelveticaScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/hentai2read_com.py b/manga-py-stable_1.x/manga_py/providers/hentai2read_com.py new file mode 100644 index 0000000..8ed9d4c --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hentai2read_com.py @@ -0,0 +1,36 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class Hentai2ReadCom(Provider, Std): + images_cdn = 'https://static.hentaicdn.com/hentai' + + def get_chapter_index(self) -> str: + chapter = self.chapter + idx = self.re.search('.+/([^/]+)/', chapter) + return '-'.join(idx.group(1).split('.')) + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + return self._elements('li .chapter-row') + + def get_files(self): + content = self.http_get(self.chapter) + selector = r'\'images\'\s*:\s*(\[.+\])' + items = self.json.loads(self.re.search(selector, content).group(1)) + return ['{}{}'.format(self.images_cdn, i) for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('.ribbon-primary .border-black-op') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = Hentai2ReadCom diff --git a/manga-py-stable_1.x/manga_py/providers/hentai_cafe.py b/manga-py-stable_1.x/manga_py/providers/hentai_cafe.py new file mode 100644 index 0000000..112cd00 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hentai_cafe.py @@ -0,0 +1,21 @@ +from .gomanga_co import GoMangaCo + +from .helpers.std import Std + + +class HentaiCafe(GoMangaCo, Std): + _name_re = r'\.cafe(?:/manga/read)?/([^/]+)/' + _content_str = '{}/{}/' + _chapters_selector = '.content .last .x-btn' # TODO + + def get_archive_name(self) -> str: + return 'archive_{:0>2}'.format(self.chapter_id) + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_cover(self) -> str: + return self._cover_from_content('.entry-content img') + + +main = HentaiCafe diff --git a/manga-py-stable_1.x/manga_py/providers/hentai_chan_me.py b/manga-py-stable_1.x/manga_py/providers/hentai_chan_me.py new file mode 100644 index 0000000..506c974 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hentai_chan_me.py @@ -0,0 +1,48 @@ +from shutil import copy + +from manga_py.fs import is_file, storage, root_path, path_join +from .mangachan_me import MangaChanMe + + +class HentaiChanMe(MangaChanMe): + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def _login(self, **kwargs): + url = self.domain + '/index.php' + # login = kwargs.get('login', '') + # password = kwargs.get('password', '') + login = self.quest([], 'Please, input login') + password = self.quest_password('Please, input password') + method = kwargs.get('method', 'post') + data = {'login_name': login, 'login_password': password, 'image': 'Вход', 'login': 'submit'} + response = self.http().requests(method=method, data=data, url=url) + cookies = {} + for i in response.cookies.items(): + cookies[i[0]] = i[1] + return cookies + + def prepare_cookies(self): + _storage = storage('.passwords.json') + if not is_file(_storage): + copy(path_join(root_path(), 'manga_py', '.passwords.json.dist'), _storage) + file = open(_storage, 'r').read() + data = self.json.loads(file).get('hentai_chan_me', {}) + cookies = self._login(**data) + for i in cookies: + self._storage['cookies'][i] = cookies[i] + + def get_chapters(self): + name = self.re.search(self._full_name_selector, self.get_url()) + url = '{}/related/{}'.format(self.domain, name.group(1)) + chapters = self.html_fromstring(url, '.related .related_info > h2 a') + nu = self.http().normalize_uri + return [nu(i.get('href').replace('/manga/', '/online/')) for i in chapters] + + def book_meta(self) -> dict: + # todo meta + pass + + +main = HentaiChanMe diff --git a/manga-py-stable_1.x/manga_py/providers/hentai_image_com.py b/manga-py-stable_1.x/manga_py/providers/hentai_image_com.py new file mode 100644 index 0000000..8e4f7f2 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hentai_image_com.py @@ -0,0 +1,52 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class HentaiImageCom(Provider, Std): + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + return self._get_content('{}/image/{}') + + def get_manga_name(self) -> str: + return self._get_name('/image/([^/]+)') + + def get_chapters(self): + return [b''] + + def _pages(self, parser): + pages = parser.cssselect('#paginator') + if pages: + href = pages[0].cssselect('span > a')[-1].get('href') + page = self.re.search(r'/page/(\d+)', href) + return range(2, int(page.group(1)) + 1) + return [] + + def get_files(self): + parser = self.document_fromstring(self.content) + pages = self._pages(parser) + selector = '#display_image_detail div > a > img' + images = self._images_helper(parser, selector) + for i in pages: + content = self._get_content('{}/image/{}/page/%d' % i) + parser = self.document_fromstring(content) + images += self._images_helper(parser, selector) + return images + + def get_cover(self) -> str: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = HentaiImageCom diff --git a/manga-py-stable_1.x/manga_py/providers/hentaifox_com.py b/manga-py-stable_1.x/manga_py/providers/hentaifox_com.py new file mode 100644 index 0000000..21138d3 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hentaifox_com.py @@ -0,0 +1,48 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class HentaiFoxCom(Provider, Std): + _idx_re = r'/g(?:allery)?/(\d+)' + _url_str = '{}/gallery/{}/' + _name_selector = '.info h1' + _archive_prefix = 'HentaiFox_' + + def get_archive_name(self) -> str: + return self.get_chapter_index() + + def get_chapter_index(self) -> str: + return self._archive_prefix + 'archive' + + def get_main_content(self): + idx = self._get_name(self._idx_re) + url = self._url_str.format(self.domain, idx) + return self.http_get(url) + + def get_manga_name(self) -> str: + return self.text_content(self.content, self._name_selector) + + def get_chapters(self): + return [b''] + + def get_files(self): + pages = self._elements('.gallery .preview_thumb a') + items = [] + n = self.http().normalize_uri + for i in pages: + url = self.html_fromstring(n(i.get('href')), '#gimg', 0).get('src') + items.append(n(url)) + return items + + def get_cover(self) -> str: + return self._cover_from_content('.cover img,#cover img') + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = HentaiFoxCom diff --git a/manga-py-stable_1.x/manga_py/providers/hentaihand_com.py b/manga-py-stable_1.x/manga_py/providers/hentaihand_com.py new file mode 100644 index 0000000..8538076 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hentaihand_com.py @@ -0,0 +1,17 @@ +from .hentaifox_com import HentaiFoxCom + + +class HentaiHandCom(HentaiFoxCom): + _idx_re = r'/comic/(\d+)' + _url_str = '{}/comic/{}/' + _name_selector = '#info h1' + _archive_prefix = 'HentaiHand_' + + def get_files(self): + parser = self.document_fromstring(self.content) + images = self._images_helper(parser, 'a.gallerythumb > img') + re = self.re.compile(r'(.+/images/)(\d+)') + return ['{}full/{}.jpg'.format(*re.search(i).groups()) for i in images] + + +main = HentaiHandCom diff --git a/manga-py-stable_1.x/manga_py/providers/hentaihere_com.py b/manga-py-stable_1.x/manga_py/providers/hentaihere_com.py new file mode 100644 index 0000000..58529bc --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hentaihere_com.py @@ -0,0 +1,43 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class HentaiHereCom(Provider, Std): + _cdn = 'https://hentaicdn.com/hentai' + + def get_chapter_index(self) -> str: + chapter = self.chapter + idx = self.re.search('/m/[^/]+/([^/]+(?:/[^/]+))', chapter) + return idx.group(1).replace('/', '-') + + def get_main_content(self): + url = self.re.search('(/m/[^/]+)', self.get_url()) + url = '{}{}'.format(self.domain, url.group(1)) + return self.http_get(url) + + def get_manga_name(self) -> str: + selector = 'span.hide[itemscope] span[itemprop="name"]' + name = self.document_fromstring(self.content, selector) + if not name: + selector = '#detail span[itemprop="title"]' + name = self.document_fromstring(self.content, selector) + return name[0].text_content().strip() + + def get_chapters(self): + return self._elements('ul.arf-list > li > a') + + def get_files(self): + chapter = self.chapter + content = self.http_get(chapter) + items = self.re.search(r'_imageList\s*=\s*(\[".+"\])', content).group(1) + return [self._cdn + i for i in self.json.loads(items)] + + def get_cover(self) -> str: + return self._cover_from_content('#cover img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = HentaiHereCom diff --git a/manga-py-stable_1.x/manga_py/providers/hentaiporns_net.py b/manga-py-stable_1.x/manga_py/providers/hentaiporns_net.py new file mode 100644 index 0000000..1b2fb37 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hentaiporns_net.py @@ -0,0 +1,35 @@ +from manga_py.provider import Provider +from .helpers.std import Std +from sys import stderr + + +class HentaiPornsNet(Provider, Std): + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + url = self.get_url() + if ~url.find('/tag/'): + self.log('Please, use target url', file=stderr) + exit(1) + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self._get_name(r'//[^/]+/([^/]+)') + + def get_chapters(self): + return [b''] + + def get_files(self): + parser = self.document_fromstring(self.content) + return self._images_helper(parser, '.gallery-item a', 'href') + + def get_cover(self) -> str: + return self._cover_from_content('.post-thumbnail img') + + +main = HentaiPornsNet diff --git a/manga-py-stable_1.x/manga_py/providers/hentairead_com.py b/manga-py-stable_1.x/manga_py/providers/hentairead_com.py new file mode 100644 index 0000000..4e5f640 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hentairead_com.py @@ -0,0 +1,11 @@ +from .helpers.std import Std +from .readhentaimanga_com import ReadHentaiMangaCom + + +class HentaiReadCom(ReadHentaiMangaCom, Std): + + def get_chapters(self): + return self._elements('.read-now a.lst') + + +main = HentaiReadCom diff --git a/manga-py-stable_1.x/manga_py/providers/hgamecg_com.py b/manga-py-stable_1.x/manga_py/providers/hgamecg_com.py new file mode 100644 index 0000000..96f8519 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hgamecg_com.py @@ -0,0 +1,60 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class HGameCGCom(Provider, Std): + __img_selector = '#thumbnails > div .col-thumbnail' + __img_count = 0 + + def get_archive_name(self) -> str: + return 'page_{:0>3}'.format(self.chapter) + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + return self._get_content('{}/index/category/{}') + + def get_manga_name(self) -> str: + return self._get_name('/index/category/([^/]+)') + + def get_chapters(self): + images = self.document_fromstring(self.content, self.__img_selector) + self.__img_count = len(images) + pages = self.document_fromstring(self.content, '.pagination li > a') + pages_count = 0 + if pages: + pages_count = self.re.search('/start-(\d+)', pages[-1].get('href')).group(1) + pages_count = int(int(pages_count) / self.__img_count) + return range(1, pages_count + 2)[::-1] + + def __tmb_to_img(self, tmbs): + url = '{}/action.php?id={}&part=e' + imgs = [] + re = self.re.compile(r'/picture/(\d+)') + for i in tmbs: + idx = re.search(i.get('href')).group(1) + imgs.append(url.format(self.domain, idx)) + return imgs + + def get_files(self): + pages_url = self.get_url() + '/start-{}' + if self.chapter > 1: + offset = self.__img_count * (self.chapter - 1) + images = self.html_fromstring(pages_url.format(offset), self.__img_selector) + else: + images = self.document_fromstring(self.content, self.__img_selector) + return self.__tmb_to_img(images) + + def get_cover(self) -> str: + # return self._cover_from_content('.cover img') + pass + + def book_meta(self) -> dict: + pass + + def chapter_for_json(self): + return self.get_url() + + +main = HGameCGCom diff --git a/manga-py-stable_1.x/manga_py/providers/hitmanga_eu.py b/manga-py-stable_1.x/manga_py/providers/hitmanga_eu.py new file mode 100644 index 0000000..b110ff9 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hitmanga_eu.py @@ -0,0 +1,63 @@ +from urllib.parse import unquote_plus + +from manga_py.provider import Provider +from .helpers.std import Std + + +class HitMangaEu(Provider, Std): + _n = None + postfix = None + main_domain = 'http://www.mymanga.io' + api_url = 'http://www.hitmanga.eu/listener/' + + def get_chapter_index(self) -> str: + chapter = self.chapter + idx = self.re.search('[^/]+/[^/]+/[^/]+?-([^/]+)', chapter) + return idx.group(1) + + def get_main_content(self): + return self._content(self._get_content('{}/mangas/{}/')) + + def get_manga_name(self) -> str: + url = self.get_url() + re = '{}/([^/]+)' + if ~url.find('/mangas/'): + re = '{}/mangas/([^/]+)' + re = re.format(self.postfix) + return self.re.search(re, url).group(1) + + def get_chapters(self): + return self._elements('.listchapseries li a.follow:not(.ddl)') + + def _content(self, url): + return self.re.sub(r'', '', self.http_get(url)) + + def get_files(self): + chapter = self.chapter + img = self.document_fromstring(self._content(chapter), '#chpimg', 0).get('src') + idx = self.get_chapter_index() + items = self.http_post(url=self.api_url, data={ + 'number': unquote_plus(idx), + 'permalink': self.manga_name, + 'type': 'chap-pages', + }) + if items == '0': + return [] + items = items.split('|') + return [self._n(i, img) for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('#picture img') + + def prepare_cookies(self): + domain = self.domain.split('.') + self.postfix = r'\.' + domain[-1] + n = self.http().normalize_uri + self._n = lambda u, r: n(u, r) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = HitMangaEu diff --git a/manga-py-stable_1.x/manga_py/providers/hitomi_la.py b/manga-py-stable_1.x/manga_py/providers/hitomi_la.py new file mode 100644 index 0000000..3dce9ce --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hitomi_la.py @@ -0,0 +1,25 @@ +from urllib.parse import unquote_plus + +from .hentaifox_com import HentaiFoxCom + + +class HitomiLa(HentaiFoxCom): + _idx_re = r'/(?:galleries|reader)/(\d+)' + _url_str = '{}/galleries/{}.html' + _name_selector = '.dj-gallery h1 a' + _cdn = 'http://0a.hitomi.la/galleries/' + + def get_manga_name(self): + name = super().get_manga_name() + return unquote_plus(name.split('|')[0].strip()) + + def get_files(self): + idx = self._get_name(self._idx_re) + url = 'http://ltn.hitomi.la/galleries/{}.js'.format(idx) + images = self.re.search(r'(\[.+\])', self.http_get(url)) + images = self.json.loads(images.group(1)) + p = '{}{}/'.format(self._cdn, idx) + return [p + i.get('name') for i in images] + + +main = HitomiLa diff --git a/manga-py-stable_1.x/manga_py/providers/hocvientruyentranh_com.py b/manga-py-stable_1.x/manga_py/providers/hocvientruyentranh_com.py new file mode 100644 index 0000000..6d2b5be --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hocvientruyentranh_com.py @@ -0,0 +1,40 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class HocVienTruyenTranhCom(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'/chapter/(\d+)', self.chapter) + return '{}-{}'.format(self.chapter_id, idx.group(1)) + + def _test_main_url(self, url): + if self.re.search('/chapter/', url): + url = self.html_fromstring(url, '#subNavi a', 0).get('href') + return url + + def get_main_content(self): + url = self._test_main_url(self.get_url()) + return self.http_get(self.http().normalize_uri(url)) + + def get_manga_name(self) -> str: + url = self._test_main_url(self.get_url()) + return self.re.search('/manga/[^/]+/([^/]+)', url).group(1) + + def get_chapters(self): + return self._elements('.table-scroll table.table td > a') + + def get_files(self): + selector = '.manga-container img.page' + items = self.html_fromstring(self.chapter, selector) + return [i.get('src') for i in items] + + def get_cover(self): + return self._cover_from_content('.__info-container .__image img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = HocVienTruyenTranhCom diff --git a/manga-py-stable_1.x/manga_py/providers/hoducomics_com.py b/manga-py-stable_1.x/manga_py/providers/hoducomics_com.py new file mode 100644 index 0000000..9b41e9c --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hoducomics_com.py @@ -0,0 +1,54 @@ +from manga_py.crypt.base_lib import BaseLib +from manga_py.provider import Provider +from .helpers.std import Std + + +class HoduComicsCom(Provider, Std): + + def get_archive_name(self) -> str: + return self.normal_arc_name({'vol': [ + self.chapter_id, + self.get_chapter_index() + ]}) + + def get_chapter_index(self) -> str: + return self.re.search(r'view/(\d+)', self.chapter).group(1) + + def get_main_content(self): + content = self._storage.get('main_content', None) + if content is None: + self._storage['main_content'] = self.http_get(self.get_url()) + return self._storage['main_content'] + + def get_manga_name(self) -> str: + self.http().referer = self.get_url() + element = self.document_fromstring(self.content, '[property="og:title"]', 0) + return element.get('content') + + def get_chapters(self): + items = self._elements('.episode_list .episode_tr.not_need_pay') + re = self.re.compile(r'(/webtoon/.+?/\d+)') + n = self.http().normalize_uri + if len(items) == 0: + return [] + return [n(re.search(i.get('onclick')).group(1)) for i in items] + + def get_files(self): + content = self.http_get(self.chapter) + images = self.re.search(r'toon_img\s*=\s*[\'"](.+?)[\'"]', content) + if not images: + return [] + parser = self.document_fromstring(BaseLib.base64decode(images.group(1)).decode()) + return self._images_helper(parser, 'img') + + def get_cover(self) -> str: + return self._cover_from_content('.episode_bnr > img') + + def book_meta(self) -> dict: + pass + + def prepare_cookies(self): + self.cf_protect(self.get_url()) + + +main = HoduComicsCom diff --git a/manga-py-stable_1.x/manga_py/providers/hotchocolatescans_com.py b/manga-py-stable_1.x/manga_py/providers/hotchocolatescans_com.py new file mode 100644 index 0000000..65d7a31 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/hotchocolatescans_com.py @@ -0,0 +1,9 @@ +from .gomanga_co import GoMangaCo + + +class HotChocolateScansCom(GoMangaCo): + _name_re = '/fs/[^/]+/([^/]+)/' + _content_str = '{}/fs/series/{}/' + + +main = HotChocolateScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/inmanga_com.py b/manga-py-stable_1.x/manga_py/providers/inmanga_com.py new file mode 100644 index 0000000..181ab43 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/inmanga_com.py @@ -0,0 +1,71 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class InMangaCom(Provider, Std): + __local_storage = None + + def get_chapter_index(self) -> str: + return str(self.chapter['Number']) + + def get_main_content(self): + if not self.__local_storage.get('uri_hex', False): + self.get_manga_name() + url = '{}/chapter/getall?mangaIdentification={}'.format( + self.domain, + self.__local_storage['uri_hex'] + ) + data = self.json.loads(self.http_get(url))['data'] + return self.json.loads(data) + + def get_manga_name(self) -> str: + url = self.get_url() + test = self.re.search(r'/ver/manga/[^/]+/\d+/[^/]+', url) + if test: + content = self._elements('.chapterControlsContainer label.blue a.blue')[0] + url = self.domain + content.get('href') + manga_name, uri_hex = self.re.search('/ver/manga/([^/]+)/([^/]+)', url).groups() + self.__local_storage['manga_name'] = manga_name + self.__local_storage['uri_hex'] = uri_hex + return self.__local_storage['manga_name'] + + @staticmethod + def __sort_chapters(items, reverse=False): + return sorted(items, key=lambda i: float(i['FriendlyChapterNumber']), reverse=reverse) + + def get_chapters(self): + items = self.content['result'] + return self.__sort_chapters(items, True) + + def prepare_cookies(self): + self.__local_storage = {} + + def _make_url(self, chapter): + return '{}/ver/manga/{}/{}/{}'.format( + self.domain, + self.manga_name, + chapter['FriendlyChapterNumber'], + chapter['Identification'] + ) + + def get_files(self): + files_url = '{}/page/getPageImage/?identification={}' + url = self._make_url(self.chapter) + images = self.html_fromstring(url, '.PagesContainer img.ImageContainer') + + domain = self.domain + return [files_url.format(domain, i.get('id')) for i in images] + + def get_cover(self): + idx = self.__local_storage['uri_hex'] + return '{}/manga/getMangaImage?identification={}'.format(self.domain, idx) + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self._make_url(self.chapter) + + +main = InMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/isekaiscan_com.py b/manga-py-stable_1.x/manga_py/providers/isekaiscan_com.py new file mode 100644 index 0000000..f12eea4 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/isekaiscan_com.py @@ -0,0 +1,12 @@ +from .rawdevart_com import RawDevArtCom + + +class ISekaiScanCom(RawDevArtCom): + _chapter_selector = r'/chapter-(\d+(?:-[\w\-]+)?)' + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.page-break img.wp-manga-chapter-img', 'data-src') + + +main = ISekaiScanCom diff --git a/manga-py-stable_1.x/manga_py/providers/japscan_com.py b/manga-py-stable_1.x/manga_py/providers/japscan_com.py new file mode 100644 index 0000000..9d0fc22 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/japscan_com.py @@ -0,0 +1,32 @@ +from .gomanga_co import GoMangaCo + + +class JapScanCom(GoMangaCo): + _name_re = r'\.(?:com|cc|to)/[^/]+/([^/]+)/' + _content_str = '{}/manga/{}/' + _chapters_selector = '#chapters_list .chapters_list a' + + def get_archive_name(self) -> str: + idx = self.chapter_id, self.get_chapter_index() + return self.normal_arc_name({'vol': idx}) + + def get_chapter_index(self) -> str: + selector = r'\.(?:com|cc|to)/[^/]+/[^/]+/(\d+)/' + url = self.chapter + return self.re.search(selector, url).group(1) + + def get_files(self): + n = self.http().normalize_uri + parser = self.html_fromstring(self.chapter) + + base_url = self.base_url(parser) + images = self._images_helper(parser, '#pages option', 'data-img') + + return [n(base_url+i) for i in images] + + def base_url(self, parser): + base_url = parser.cssselect('#image')[0].get('data-src') + return self.re.search(r'(.+/)\w+\.\w+', base_url).group(1) + + +main = JapScanCom diff --git a/manga-py-stable_1.x/manga_py/providers/jurnalu_ru.py b/manga-py-stable_1.x/manga_py/providers/jurnalu_ru.py new file mode 100644 index 0000000..9b55923 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/jurnalu_ru.py @@ -0,0 +1,50 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class JurnaluRu(Provider, Std): + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + name = self._get_name(r'(online-reading/[^/]+/[^/]+)') + url = self.html_fromstring( + '{}/{}'.format(self.domain, name), + '.MagList .MagListLine > a', + 0 + ).get('href') + return self.http_get(self.domain + url) + + def get_manga_name(self) -> str: + return self._get_name(r'/online-reading/[^/]+/([^/]+)') + + def get_chapters(self): + name = self.re.search(r'(online-reading/[^/]+/[^/]+)', self.get_url()) + if not name: + return [] + items = self.document_fromstring(self.content, 'select.magSelection option') + url = '{}/{}/'.format(self.domain, name.group(1)) + return [url + i.get('value') for i in items] + + @staticmethod + def __get_file(parser): + image = parser.cssselect('a[rel="shadowbox"]') + return image[0].get('href') + + def get_files(self): + chapter = self.chapter + page = self.html_fromstring(chapter, '.ForRead', 0) + pages = page.cssselect('.navigation')[0].cssselect('select.M option + option') + images = [self.__get_file(page)] + for i in pages: + uri = '{}/{}'.format(chapter, i.get('value')) + parser = self.html_fromstring(uri, '.ForRead', 0) + images.append(self.__get_file(parser)) + return images + + def get_cover(self): + return self._cover_from_content('.ops > div > img') + + +main = JurnaluRu diff --git a/manga-py-stable_1.x/manga_py/providers/kissmanga_com.py b/manga-py-stable_1.x/manga_py/providers/kissmanga_com.py new file mode 100644 index 0000000..7dc17a3 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/kissmanga_com.py @@ -0,0 +1,87 @@ +from sys import stderr + +from manga_py.crypt import KissMangaComCrypt +from manga_py.provider import Provider +from .helpers.std import Std + + +class KissMangaCom(Provider, Std): + __local_data = { + 'iv': b'a5e8e2e9c2721be0a84ad660c472c1f3', + 'key': b'mshsdf832nsdbash20asdm', + } + + def get_archive_name(self) -> str: + return '{:0>3}-{}'.format( + self.chapter_id + 1, + self.get_chapter_index() + ) + + def get_chapter_index(self) -> str: + name = self.re.search(r'/Manga/[^/]+/(.+)\?id=(\d+)', self.chapter) + return '-'.join(name.groups()) + + def get_main_content(self): + return self._get_content('{}/Manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/Manga/([^/]+)') + + def get_chapters(self): + chapters = self._elements('.listing td a') + if not len(chapters): + self.log('Chapters not found', file=stderr) + return chapters + + def prepare_cookies(self): + self._params['rename_pages'] = True + self.cf_protect(self.get_url()) + self._storage['cookies']['rco_quality'] = 'hq' + if not self._params['cf-protect']: + self.log('CloudFlare protect fail!', file=stderr) + + def __decrypt_images(self, crypt, key, hexes): + images = [] + for i in hexes: + try: + img = crypt.decrypt(self.__local_data['iv'], key, i) + images.append(img.decode('utf-8', errors='ignore').replace('\x10', '').replace('\x0f', '')) + + except Exception as e: + pass + + return images + + def __check_key(self, crypt, content): + # if need change key + need = self.re.search(r'\["([^"]+)"\].\+chko.?=.?chko', content) + key = self.__local_data['key'] + if need: + # need last group + key += crypt.decode_escape(need.group(1)) + else: + # if need change key + need = self.re.findall(r'\["([^"]+)"\].*?chko.*?=.*?chko', content) + if need: + key = crypt.decode_escape(need[-1]) + return key + + def get_files(self): + crypt = KissMangaComCrypt() + content = self.http_get(self.chapter) + key = self.__check_key(crypt, content) + hexes = self.re.findall(r'lstImages.push\(wrapKA\(["\']([^"\']+?)["\']\)', content) + if not hexes: + self.log('Images not found!', file=stderr) + return [] + return self.__decrypt_images(crypt, key, hexes) + + def get_cover(self): + return self._cover_from_content('.rightBox .barContent img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = KissMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/komikcast_com.py b/manga-py-stable_1.x/manga_py/providers/komikcast_com.py new file mode 100644 index 0000000..877b281 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/komikcast_com.py @@ -0,0 +1,38 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class KomikCastCom(Provider, Std): + _chapter_re = r'\.com/[^/]+-(\d+(?:-\d+)?)' + + def get_chapter_index(self) -> str: + re = self.re.compile('-chapter-(\d+(?:-\d+)?)') + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + url = self.get_url() + if ~url.find('/chapter/'): + url = self.html_fromstring(url, '.allc a', 0).get('href') + self._params['url'] = self.http().normalize_uri(url) + return self.get_manga_name() + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self) -> list: + return self._elements('.mangainfo .leftoff a') + + def get_files(self) -> list: + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '#readerarea img') + + def get_cover(self): + return self._cover_from_content('.topinfo img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = KomikCastCom diff --git a/manga-py-stable_1.x/manga_py/providers/komikid_com.py b/manga-py-stable_1.x/manga_py/providers/komikid_com.py new file mode 100644 index 0000000..a916a23 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/komikid_com.py @@ -0,0 +1,26 @@ +from .gomanga_co import GoMangaCo +from .helpers.std import Std + + +class KomikIdCom(GoMangaCo, Std): + _name_re = '/manga/([^/]+)' + _content_str = '{}/manga/{}' + _chapters_selector = '.chapter-title-rtl a' + + def get_chapter_index(self) -> str: + re = r'/manga/[^/]+/(\d+)(?:[^\d](\d+))?' + idx = self.re.search(re, self.chapter) + return self._join_groups(idx.groups()) + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '#all img[data-src]', 'data-src') + + def get_cover(self) -> str: + return self._cover_from_content('.boxed img') + + def prepare_cookies(self): + pass + + +main = KomikIdCom diff --git a/manga-py-stable_1.x/manga_py/providers/kumanga_com.py b/manga-py-stable_1.x/manga_py/providers/kumanga_com.py new file mode 100644 index 0000000..a319d21 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/kumanga_com.py @@ -0,0 +1,66 @@ +from math import ceil + +from manga_py.provider import Provider +from .helpers.std import Std + + +class KuMangaCom(Provider, Std): + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + url = self.re.search(r'(.+\.com/manga/\d+)', self.get_url()) + return self.http_get('%s/' % url.group(1)) + + def get_manga_name(self) -> str: + selector = r'pagination\(\d+,\'(.+)\',\'pagination\'' + parser = self.re.search(selector, self.content) + return parser.group(1).strip() + + def _chapters(self, parser): + items = parser.cssselect('.table h4.title > a') + chapters = [] + for i in items: + c = '{}/{}'.format(self.domain, i.get('href')) + chapters.append(c.replace('/c/', '/leer/')) + return chapters + + def _url_helper(self): + idx = self.re.search(r'\.com/manga/(\d+)', self.get_url()) + return '{}/manga/{}/p/%d/{}'.format( + self.domain, + idx.group(1), + self.manga_name + ) + + def get_chapters(self): + selector = r'\'pagination\',\d+,(\d+),(\d+)' + pages = self.re.search(selector, self.content).groups() + pages = ceil(float(pages[0]) / float(pages[1])) + chapters = [] + url_path = self._url_helper() + for i in range(int(pages) - 1): + parser = self.html_fromstring(url_path % (i + 1)) + chapters += self._chapters(parser) + return chapters + + def _get_real_url(self, url): + location = self.http().requests(url=url, method='head') + return location.headers.get('Location', url) + + def get_files(self): + r = self.http().get_redirect_url + selector = r'(\[\{"npage".+\}\])' + content = self.http_get(self.chapter) + items = self.json.loads(self.re.search(selector, content)) + return [r(i.get('imgURL')) for i in items.group(1)] + + def get_cover(self) -> str: + return self._cover_from_content('.container img.img-responsive') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = KuMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/lector_kirishimafansub_com.py b/manga-py-stable_1.x/manga_py/providers/lector_kirishimafansub_com.py new file mode 100644 index 0000000..2cef828 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/lector_kirishimafansub_com.py @@ -0,0 +1,9 @@ +from .gomanga_co import GoMangaCo + + +class LectorKirishimaFanSubCom(GoMangaCo): + _name_re = '/(?:reader/)?(?:series|read)/([^/]+)/' + _content_str = '{}/lector/series/{}/' + + +main = LectorKirishimaFanSubCom diff --git a/manga-py-stable_1.x/manga_py/providers/lector_ytnofan_com.py b/manga-py-stable_1.x/manga_py/providers/lector_ytnofan_com.py new file mode 100644 index 0000000..5284987 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/lector_ytnofan_com.py @@ -0,0 +1,9 @@ +from .gomanga_co import GoMangaCo + + +class LectorYTNoFanCom(GoMangaCo): + _name_re = '/(?:series|read)/([^/]+)/' + _content_str = '{}/series/{}/' + + +main = LectorYTNoFanCom diff --git a/manga-py-stable_1.x/manga_py/providers/leitor_net.py b/manga-py-stable_1.x/manga_py/providers/leitor_net.py new file mode 100644 index 0000000..e5ce252 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/leitor_net.py @@ -0,0 +1,81 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class LeitorNet(Provider, Std): + __idx = None + + def get_archive_name(self) -> str: + ch = self.chapter + return self.normal_arc_name({ + 'vol': ch['number'].split('.'), + 'ch': [ch['id_chapter'], ch['id_release']], + }) + + def get_chapter_index(self) -> str: + return '-'.join(self.chapter['number'].split('.')) + + def get_main_content(self): + idx = self.html_fromstring(self.get_url(), '[data-id-serie]', 0) + self.__idx = idx.get('data-id-serie') + return b'0' + + def get_manga_name(self) -> str: + return self._get_name(r'/manga/([^/]+)') + + @staticmethod + def __morph_chapters(items): + result = [] + for item in items: # items loop + for r in item['releases']: # items.releases loop + release = item['releases'][r] + result.append({ + 'number': item['number'], + 'id_chapter': item['id_chapter'], + 'id_serie': item['id_serie'], + 'id_release': release['id_release'], + 'link': release['link'], + }) + return result + + def get_chapters(self): + url = '{}/series/chapters_list.json?page={}&id_serie={}' + items = [] + for i in range(1, 100): + content = self.json.loads(self.http_get(url.format( + self.domain, i, self.__idx + ), headers={'x-requested-with': 'XMLHttpRequest'})) + chapters = content.get('chapters', False) + if not chapters: + break + items += chapters + return self.__morph_chapters(items) + + def get_files(self): + content = self.http_get(self.domain + self.chapter['link']) + token = self.re.search(r'token=([^&]+)', content).group(1) + url = '{}/leitor/pages.json?key={}&id_release={}' + images = self.json.loads(self.http_get(url.format( + self.domain, + token, self.chapter['id_release'] + ), headers={'x-requested-with': 'XMLHttpRequest'})) + return images.get('images', {}) + + def get_cover(self) -> str: + url = '{}/manga/{}/{}'.format( + self.domain, + self.manga_name, + self.__idx + ) + image = self.html_fromstring(url, '.cover-image') + if image and len(image): + return self.parse_background(image[0]) + + def book_meta(self) -> dict: + pass + + def chapter_for_json(self): + return self.domain + self.chapter['link'] + + +main = LeitorNet diff --git a/manga-py-stable_1.x/manga_py/providers/leomanga_com.py b/manga-py-stable_1.x/manga_py/providers/leomanga_com.py new file mode 100644 index 0000000..3e80be2 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/leomanga_com.py @@ -0,0 +1,50 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class LeoMangaCom(Provider, Std): + + def get_chapter_index(self) -> str: + url = self.chapter + idx = self.re.search(r'/manga/[^/]+/capitulo-(\d+)/([^/]+)/', url).groups() + return '{1}-{0}'.format(*idx) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def _get_first_href(self, parser): + n = self.http().normalize_uri + url = n(parser[0].get('href')) + select0 = self.html_fromstring(url, '.list-group .cap-option') + if select0: + return n(select0[0].get('href')) + return None + + def get_chapters(self): + chapter = self.document_fromstring(self.content, '.caps-list a') + if chapter: + url = self._get_first_href(chapter) + if url: + selector = '.viewcap-info select.form-control' + parser = self.html_fromstring(url) + options = self._first_select_options(parser, selector) + return [i.get('value') for i in options[::-1]] + return [] + + def get_files(self): + n = self.http().normalize_uri + items = self.html_fromstring(self.chapter, '.vertical .cap-images') + return [n(i.get('src')) for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('.cover img', 'data-original') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = LeoMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/leviatanscans_com.py b/manga-py-stable_1.x/manga_py/providers/leviatanscans_com.py new file mode 100644 index 0000000..424fcee --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/leviatanscans_com.py @@ -0,0 +1,29 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class LeviatanScansCom(Provider, Std): + + def get_chapter_index(self) -> str: + return self.re.search(r'.+/(\d+/\d+)', self.chapter).group(1).replace('/', '-') + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self.text_content(self.content, '.text-highlight') + + def get_chapters(self): + return self._elements('.list a.item-author') + + def get_files(self): + content = self.http_get(self.chapter) + images = self.re.search(r'chapterPages\s*=\s*(\[.+?\])', content).group(1) + return self.json.loads(images) + + def get_cover(self) -> str: + image = self._elements('.media-comic-card .media-content')[0] + return self.parse_background(image) + + +main = LeviatanScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/lhscans_com.py b/manga-py-stable_1.x/manga_py/providers/lhscans_com.py new file mode 100644 index 0000000..9f778ce --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/lhscans_com.py @@ -0,0 +1,30 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class LhScansCom(Provider, Std): + def get_chapter_index(self) -> str: + re = self.re.compile(r'-chapter-(\d+(?:\.\d+)?)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga-{}.html') + + def get_manga_name(self) -> str: + return self._get_name(r'(?:read|manga)-(.+?)(?:-chapter-.+)?.html') + + def get_chapters(self): + return self._elements('a.chapter') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.chapter-content .chapter-img') + + def get_cover(self) -> str: + return self._cover_from_content('.thumbnail img') + + def book_meta(self) -> dict: + pass + + +main = LhScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/lhtranslation_com.py b/manga-py-stable_1.x/manga_py/providers/lhtranslation_com.py new file mode 100644 index 0000000..59197fa --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/lhtranslation_com.py @@ -0,0 +1,24 @@ +from time import sleep + +from requests import get + +from .gomanga_co import GoMangaCo +from .helpers.std import Std + + +class LHTranslationCom(GoMangaCo, Std): + _name_re = r'/(?:truyen|manga)-([^/]+)\.html' + _content_str = '{}/manga-{}.html' + _chapters_selector = '#tab-chapper td > a.chapter,#list-chapters a.chapter' + + def get_chapter_index(self) -> str: + idx = self.re.search(r'-chapter-(.+?)\.html', self.chapter) + return idx.group(1).replace('.', '-') + + def get_files(self): + content = self.http_get(self.chapter) + parser = self.document_fromstring(content, 'article#content,.chapter-content', 0) + return self._images_helper(parser, 'img.chapter-img', 'data-original') + + +main = LHTranslationCom diff --git a/manga-py-stable_1.x/manga_py/providers/lolibooru_moe.py b/manga-py-stable_1.x/manga_py/providers/lolibooru_moe.py new file mode 100644 index 0000000..b72e3b7 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/lolibooru_moe.py @@ -0,0 +1,52 @@ +from .danbooru_donmai_us import DanbooruDonmaiUs +from .helpers.std import Std + + +class LoliBooruMoe(DanbooruDonmaiUs, Std): + _archive_prefix = 'lolibooru_' + + def get_manga_name(self) -> str: + if ~self.get_url().find(r'tags='): + self._is_tag = True + self._manga_name = self._get_name(r'[\?&]tags=([^&]+)') + else: + self._manga_name = self._get_name(r'/post/show/(\d+)') + return self._archive_prefix + self._manga_name + + def get_chapters(self): # pragma: no cover + if self._is_tag: + pages = self._elements('#paginator .pagination > a') + if pages: + count = self.re.search(r'\bpage=(\d+)', pages[-2].get('href')).group(1) + max_page = int(count) + if max_page > 1001: + self.log('1000 pages maximum!') + max_page = 1000 + return range(1, max_page)[::-1] + return [1] + + def _tag_images(self): # pragma: no cover + url = '{}/post?tags={}&page={}'.format( + self.domain, + self._manga_name, + self.chapter, + ) + parser = self.html_fromstring(url) + return self._images_helper(parser, '#post-list-posts a.directlink', 'href') + + def _post_image(self, url): # pragma: no cover + if isinstance(url, str): + parser = self.html_fromstring(url) + else: + parser = url + + full_size = parser.cssselect('.status-notice a.highres-show') + if full_size: + return [full_size[0].get('href')] + return [parser.cssselect('#image')[0].get('src')] + + def chapter_for_json(self): + return self.get_url() + + +main = LoliBooruMoe diff --git a/manga-py-stable_1.x/manga_py/providers/lolivault_net.py b/manga-py-stable_1.x/manga_py/providers/lolivault_net.py new file mode 100644 index 0000000..cb1610c --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/lolivault_net.py @@ -0,0 +1,10 @@ +from .gomanga_co import GoMangaCo + + +class LoliVaultNet(GoMangaCo): + _name_re = '/online/[^/]+/([^/]+)/' + _content_str = '{}/online/series/{}/' + _chapters_selector = '.list .group .element .title a' + + +main = LoliVaultNet diff --git a/manga-py-stable_1.x/manga_py/providers/luscious_net.py b/manga-py-stable_1.x/manga_py/providers/luscious_net.py new file mode 100644 index 0000000..fde1a7b --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/luscious_net.py @@ -0,0 +1,45 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class LusciousNet(Provider, Std): + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + name = self._get_name('/albums?/([^/]+)/') + return self.http_get('{}/albums/{}/'.format(self.domain, name)) + + def get_manga_name(self) -> str: + return self._get_name('/albums?/([^/]+)_\d+/') + + def get_chapters(self): + return [b'0'] + + def get_files(self): + items = self._elements('#album_meta_ds .item > a') + n = self.http().normalize_uri + images = [] + for i in items: + content = self.http_get(n(i.get('href')), headers={'x-requested-with': 'XMLHttpRequest'}) + image = self.document_fromstring(content, '#single_picture') + if image: + images.append(n(image[0].get('src'))) + return images + + def get_cover(self) -> str: + return self._cover_from_content('.album_cover_item img') + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = LusciousNet diff --git a/manga-py-stable_1.x/manga_py/providers/mang_as.py b/manga-py-stable_1.x/manga_py/providers/mang_as.py new file mode 100644 index 0000000..7b46894 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mang_as.py @@ -0,0 +1,43 @@ +from sys import stderr + +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangAs(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search('/manga/[^/]+/([^/]+)', self.chapter).group(1) + return idx.replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.chapter-title-rtl > a') + + def get_files(self): + content = self.http_get(self.chapter) + self.http().referer = self.chapter + items = self.re.search(r'var\s+pages\s*=\s*(\[.+\])', content) + if not items: + self.log('Images not found!', file=stderr) + return [] + n = self.http().normalize_uri + items = self.json.loads(items.group(1)) + return [n(i.get('page_image')) for i in items] + + def prepare_cookies(self): + self._base_cookies(self.get_url()) + + def get_cover(self) -> str: + return self._cover_from_content('.boxed > img.img-responsive') + + def book_meta(self) -> dict: + pass + + +main = MangAs diff --git a/manga-py-stable_1.x/manga_py/providers/manga_ae.py b/manga-py-stable_1.x/manga_py/providers/manga_ae.py new file mode 100644 index 0000000..efd8240 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manga_ae.py @@ -0,0 +1,38 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaAe(Provider, Std): + + def get_chapter_index(self) -> str: + return self.re.search(r'\.ae/[^/]+/(\d+)', self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'\.ae/([^/]+)') + + def get_chapters(self): + return self._elements('li > a.chapter') + + def get_files(self): + img_selector = '#showchaptercontainer img' + parser = self.html_fromstring(self.chapter) + pages = parser.cssselect('#morepages a + a') + images = self._images_helper(parser, img_selector) + if pages: + for i in pages: + parser = self.html_fromstring(i.get('href')) + images += self._images_helper(parser, img_selector) + return images + + def get_cover(self) -> str: + return self._cover_from_content('img.manga-cover') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaAe diff --git a/manga-py-stable_1.x/manga_py/providers/manga_fox_com.py b/manga-py-stable_1.x/manga_py/providers/manga_fox_com.py new file mode 100644 index 0000000..fb7c384 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manga_fox_com.py @@ -0,0 +1,32 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaFoxCom(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/chapter-(\d+(?:-\d+)?)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.(?:com|io)/([^/]+)') + + def get_chapters(self): + return self._elements('.list_chapter a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '#list_chapter img') + + def get_cover(self) -> str: + return self._cover_from_content('.info_image img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaFoxCom diff --git a/manga-py-stable_1.x/manga_py/providers/manga_mexat_com.py b/manga-py-stable_1.x/manga_py/providers/manga_mexat_com.py new file mode 100644 index 0000000..6d98880 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manga_mexat_com.py @@ -0,0 +1,43 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaMexatCom(Provider, Std): + + def get_chapter_index(self) -> str: + return self.chapter[1].replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/category/{}/') + + def get_manga_name(self) -> str: + return self._get_name('/category/([^/]+)') + + def get_chapters(self): + items = self._elements('.content .entry td + td > a') + return [(i.get('href', i.text_content().strip())) for i in items] + + def _get_img(self, parser): + return self._images_helper(parser, '.pic > a > img') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + pages = self._first_select_options(parser, '#manga_pid', True) + images = self._get_img(parser) + for p in pages: + url = self.chapter + '?pid=' + p.get('value') + parser = self.html_fromstring(url) + images += self._get_img(parser) + return images + + def get_cover(self) -> str: + return self._cover_from_content('.archive-meta img') + + def book_meta(self) -> dict: + pass + + def chapter_for_json(self): + return self.chapter[0] + + +main = MangaMexatCom diff --git a/manga-py-stable_1.x/manga_py/providers/manga_online_biz.py b/manga-py-stable_1.x/manga_py/providers/manga_online_biz.py new file mode 100644 index 0000000..e79a5f5 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manga_online_biz.py @@ -0,0 +1,48 @@ +from manga_py.provider import Provider +from .helpers.std import Std, Http2 + + +# Archive downloading example. Without images +class MangaOnlineBiz(Provider, Std): + chapter_url = '' + _idx = 0 + + def get_chapter_index(self) -> str: + url = self._storage['chapters'][self._idx] + idx = self.re.search(r'/download/[^/]+/.+?_(\d+)_(\d+)', url).groups() + return '{}-{}'.format(*idx) + + def get_main_content(self): + return self._get_content('{}/{}.html') + + def get_manga_name(self) -> str: + return self._get_name(r'\.biz/([^/]+)(?:/|\.html)') + + def _after_download(self, idx, _path): + self._idx = idx + 1 + + def loop_chapters(self): + http2 = Http2(self) + http2.download_archives(self._storage['chapters']) + http2.after_download = self._after_download + + def get_chapters(self): + s, c = r'MangaChapter\((.+)\);', self.content + items = self.json.loads(self.re.search(s, c).group(1)) + return [i.get('downloadUrl') for i in items] + + def get_files(self): + return [] + + def get_cover(self): + return self._cover_from_content('.item > .image > img') + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.chapter_url + + +main = MangaOnlineBiz diff --git a/manga-py-stable_1.x/manga_py/providers/manga_online_com_ua.py b/manga-py-stable_1.x/manga_py/providers/manga_online_com_ua.py new file mode 100644 index 0000000..511da35 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manga_online_com_ua.py @@ -0,0 +1,71 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaOnlineCom(Provider, Std): + __local_storage = None + + def __init_storage(self): + if not self.__local_storage: + self.__local_storage = {} + + def get_chapter_index(self) -> str: + self.__init_storage() + idx_reg = r'/\d+.+-(\d+).+?-(\d+).*?html' + idx = self.re.search(idx_reg, self.chapter).groups() + if not idx: + idx_reg = r'/\d+.+-(\d+).+?html' + idx = (self.re.search(idx_reg, self.chapter).group(1), 0) + return '{:0>3}-{:0>3}'.format(*idx) + + def get_main_content(self): + return ['0'] + + def get_manga_name(self) -> str: + self.__init_storage() + if not self.__local_storage.get('chapters', False): + self.__local_storage['chapters'] = self.get_chapters() + if len(self.__local_storage['chapters']): + return self.re.search(r'/manga/(.+)/.+\.html', self.__local_storage['chapters'][0]).group(1) + raise AttributeError() + + def _get_chapters_cmanga(self): + s = '#dle-content > div > a[href*="/manga/"]' + return self.html_fromstring(self.get_url(), s)[::-1] + + def _get_chapters_manga(self): + s = '.fullstory_main select.selectmanga option' + items = self.html_fromstring(self.get_url(), s) + return [i.get('value') for i in items[::-1]] + + def get_chapters(self): + self.__init_storage() + if self.re.search('/cmanga/', self.get_url()): + return self._get_chapters_cmanga() + if self.re.search(r'/manga/[^/]+/\d+-', self.get_url()): + return self._get_chapters_manga() + return [] + + @staticmethod + def _get_pages_count(parser): + _len = len(parser.cssselect('#pages_all a')) + return _len + 1 if _len else 0 + + def get_files(self): + chapter = self.chapter + parser = self.html_fromstring(chapter, '.main_body', 0) + pages = self._get_pages_count(parser) + images = [] + idx = self.re.search(r'/manga/[^/]+/(\d+)', chapter).group(1) + for n in range(pages): + url = '{}/engine/ajax/sof_fullstory.php?id={}&page={}'.format(self.domain, idx, n + 1) + parser = self.html_fromstring(url)[0] + images += self._images_helper(parser, 'img') + return images + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaOnlineCom diff --git a/manga-py-stable_1.x/manga_py/providers/manga_room_com.py b/manga-py-stable_1.x/manga_py/providers/manga_room_com.py new file mode 100644 index 0000000..f69bcec --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manga_room_com.py @@ -0,0 +1,45 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaRoomCom(Provider, Std): + + def get_archive_name(self) -> str: # fixme! #49 + idx = self.get_chapter_index().split('-') + if len(idx) > 1: + return '{}vol_{:0>3}'.format(*idx) + return 'vol_{:0>3}'.format(idx[0]) + + def get_chapter_index(self) -> str: + re = self.re.search(r'version-(\d+)', self.chapter) + version = '' + if re: + version = '-' + re.group(1) + re = self.re.search(r'chapter-(\d+)', self.chapter) + return '{}{}'.format(version, re.group(1)) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + # : downloaded all versions + return ['{}/manga/{}'.format( + self.domain, i.get('href') + ) for i in self._elements('.chapter-list a')] + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '#main_div .lazy_image_page') + + def get_cover(self) -> str: + return self._cover_from_content('img#manga_thumps') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaRoomCom diff --git a/manga-py-stable_1.x/manga_py/providers/manga_sh.py b/manga-py-stable_1.x/manga_py/providers/manga_sh.py new file mode 100644 index 0000000..2939edd --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manga_sh.py @@ -0,0 +1,56 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaSh(Provider, Std): + _api_url = 'https://api.manga.sh/api/v1/' + _cdn_url = 'https://cdn.manga.sh/' + + def get_chapter_index(self) -> str: + chapter = self.chapter + _ch = chapter.get('ChapterNumberAbsolute', self.chapter_id) + _vol = chapter.get('VolumeNumber', 0) + _ch_v = chapter.get('ChapterNumberVolume', '') + if _ch_v: + _ch_v = '_' + _ch_v + return '{}-{}{}'.format(_vol, _ch, _ch_v) + + def get_main_content(self): + idx = self._get_name(r'/comics/(\d+)') + url = '{}series_chapters?query=SeriesId.Id:{}&order=asc&sortby=TimeUploaded&limit=0&offset=0' + content = self.http_get(url.format(self._api_url, idx)) + return self.json.loads(content) + + def get_manga_name(self) -> str: + content = self.content.get('response')[0] + return content.get('SeriesId').get('Name') + + def get_chapters(self): + return list(self.content.get('response', [])) + + def _url_helper(self, chapter): + return '{}series_chapters/{}'.format( + self._api_url, + chapter.get('Hash') + ) + + def get_files(self): + url = self._url_helper(self.chapter) + items = self.json.loads(self.http_get(url)) + items = items.get('response', [{}])[0].get('SeriesChaptersFiles', {}) + return [self._cdn_url + i.get('Name') for i in items] + + def get_cover(self) -> str: + content = self.content.get('response')[0] + content = content.get('SeriesId').get('CoverImage') + return '{}/covers/{}'.format(self._cdn_url, content) + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self._url_helper(self.chapter) + + +main = MangaSh diff --git a/manga-py-stable_1.x/manga_py/providers/manga_tr_com.py b/manga-py-stable_1.x/manga_py/providers/manga_tr_com.py new file mode 100644 index 0000000..9fd0d49 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manga_tr_com.py @@ -0,0 +1,44 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaTrCom(Provider, Std): + + def get_chapter_index(self) -> str: + chapter = self.chapter + idx = self.re.search('-chapter-(.+)\.html', chapter).group(1) + return '-'.join(idx.split('.')) + + def get_main_content(self): + return self._get_content('{}/manga-{}.html') + + def get_manga_name(self) -> str: + url = self.get_url() + re = r'\d-read-(.+)-chapter-' + if ~url.find('/manga-'): + re = r'/manga-(.+)\.html' + return self.re.search(re, url).group(1) + + def get_chapters(self): + return self._elements('#results td.left a') + + def get_files(self): + img_selector = 'img.chapter-img' + parser = self.html_fromstring(self.chapter) + pages = self._first_select_options(parser, '.chapter-content select') + images = self._images_helper(parser, img_selector) + n = self.http().normalize_uri + for i in pages: + parser = self.html_fromstring(n(i)) + images += self._images_helper(parser, img_selector) + return images + + def get_cover(self) -> str: + return self._cover_from_content('img.thumbnail') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaTrCom diff --git a/manga-py-stable_1.x/manga_py/providers/manga_tube_me.py b/manga-py-stable_1.x/manga_py/providers/manga_tube_me.py new file mode 100644 index 0000000..724fffc --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manga_tube_me.py @@ -0,0 +1,44 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaTubeMe(Provider, Std): + + def get_chapter_index(self) -> str: + txt = self.chapter[0].strip() + idx = self.re.search(r'.*?(\d+(?:\.\d+)?)', txt) + if not idx: + return str(self.chapter_id) + return idx.group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/series/{}/') + + def get_manga_name(self) -> str: + return self._get_name('/series/([^/]+)/') + + def get_chapters(self): + items = self._elements('#chapter li > a') + return [(i.text_content(), i.get('href')) for i in items] + + def get_files(self): + n = self.http().normalize_uri + content = self.http_get(n(self.chapter[1])) + img_path = self.re.search(r'img_path[\'"]?:\s[\'"](.+)[\'"]', content) + img_path = n(img_path.group(1)) + images = self.re.search(r'pages[\'"]?:\s(\[\{.+\}\])', content) + images = self.json.loads(images.group(1)) + return ['{}{}'.format(img_path, i.get('file_name')) for i in images] + + def get_cover(self): + return self._cover_from_content('.owl-carousel img', 'data-original') + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.chapter[1] + + +main = MangaTubeMe diff --git a/manga-py-stable_1.x/manga_py/providers/mangaarabteam_com.py b/manga-py-stable_1.x/manga_py/providers/mangaarabteam_com.py new file mode 100644 index 0000000..94907a2 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaarabteam_com.py @@ -0,0 +1,9 @@ +from ._3asq_org import ThreeAsqOrg + + +class MangaArabTeamCom(ThreeAsqOrg): + + def get_chapter_index(self) -> str: + return self.chapter.split('/')[-1] + +main = MangaArabTeamCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangabat_com.py b/manga-py-stable_1.x/manga_py/providers/mangabat_com.py new file mode 100644 index 0000000..c1e9e4b --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangabat_com.py @@ -0,0 +1,12 @@ +from .manganelo_com import MangaNeloCom + + +class MangaBatCom(MangaNeloCom): + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self.text_content(self.content, 'h1.entry-title') + + +main = MangaBatCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangabb_co.py b/manga-py-stable_1.x/manga_py/providers/mangabb_co.py new file mode 100644 index 0000000..5decc02 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangabb_co.py @@ -0,0 +1,65 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangabbCo(Provider, Std): + + def get_chapter_index(self) -> str: + chapter = self.chapter + idx = chapter.rfind('/chapter-') + return chapter[1 + idx:] + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.co/(?:manga/)?([^/]+)') + + def get_chapters(self): + content = self.document_fromstring(self.content, '#chapters a') + if not content: + return [] + selector = '#asset_1 select.chapter_select > option' + result = self.html_fromstring(content, selector) + return [i.get('value') for i in result[::-1]] + + @staticmethod + def __get_img(parser): + return parser.cssselect('#manga_viewer > a > img')[0].get('src') + + @staticmethod + def _img_lifehack1(img, pages_list, images): + n = 1 + for i in pages_list: + n += 1 + images.append('{}{}{}'.format(img[0], n, img[1])) + + def _img_lifehack2(self, pages_list, images): + for page in pages_list: + parser = self.html_fromstring(page) + images.append(self.__get_img(parser)) + + def get_files(self): + parser = self.html_fromstring(self.chapter, '#body', 0) + result = parser.cssselect('#asset_2 select.page_select option + option') + pages_list = [i.get('value') for i in result] + _first_image = self.__get_img(parser) + images = [_first_image] + + img = self.re.search(r'(.+/)\d(\.\w+)', _first_image) + if img: # livehack + self._img_lifehack1(img.groups(), pages_list, images) + else: + self._img_lifehack2(pages_list, images) + + return images + + def get_cover(self): + return self._cover_from_content('#series_image') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangabbCo diff --git a/manga-py-stable_1.x/manga_py/providers/mangabox_me.py b/manga-py-stable_1.x/manga_py/providers/mangabox_me.py new file mode 100644 index 0000000..c1e8e1d --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangabox_me.py @@ -0,0 +1,34 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaBoxMe(Provider, Std): + def get_chapter_index(self) -> str: + return self.re.search(r'/episodes/(\d+)', self.chapter).group(1) + + def get_main_content(self): + idx = self._get_name(r'/reader/(\d+)/episodes/') + return self.http_get('{}/reader/{}/episodes/'.format(self.domain, idx)) + + def get_manga_name(self) -> str: + selector = 'meta[property="og:title"]' + title = self.document_fromstring(self.content, selector, 0) + return title.get('content').strip() + + def get_chapters(self): + selector = '.episodes_list .episodes_item > a' + return self._elements(selector) + + def get_files(self): + items = self.html_fromstring(self.chapter, 'ul.slides li > img') + return [i.get('src') for i in items] + + def get_cover(self): + return self._cover_from_content('.episodes_img_main', 'data-src') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaBoxMe diff --git a/manga-py-stable_1.x/manga_py/providers/mangacanblog_com.py b/manga-py-stable_1.x/manga_py/providers/mangacanblog_com.py new file mode 100644 index 0000000..41c8481 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangacanblog_com.py @@ -0,0 +1,74 @@ +from urllib.parse import unquote_plus + +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaCanBlogCom(Provider, Std): + _home_link = None + + def get_archive_name(self) -> str: + ch = self.chapter + idx = self.re.search(r'/.+/.+?(?:-indonesia-)(.+)\.html', ch) + if not idx: + idx = self.re.search(r'/.+/(.+)\.html', ch) + idx = idx.group(1) + if ~idx.find('-terbaru'): + idx = idx[:idx.find('-terbaru')] + return self.normal_arc_name({'vol': [self.chapter_id, idx]}) + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + return self.http_get(self._home_link) + + @staticmethod + def _clear_name(a): + name = a.text_content() + name = unquote_plus(name.split('|')[0].strip()) + if ~name.find(' Indonesia'): + name = name[:name.find(' Indonesia')] + return name + + def get_manga_name(self) -> str: + url = self.get_url() + selector = '.navbar a[href*=".html"]' + content = self.http_get(url) + a = self.document_fromstring(content) + is_chapter = a.cssselect(selector) + if len(is_chapter) < 1: + selector = '#latestchapters h1' + a = a.cssselect(selector) + self._home_link = url + else: + a = is_chapter[0] + self._home_link = a.get('href') + return self._clear_name(a) + + def get_chapters(self): + items = self._elements('a.chaptersrec') + result = [] + for i in items: + url = i.get('href') + _ = url.find('-terbaru-1') + if _: + url = url[:_] + '-terbaru.html' + result.append(url) + return result + + def get_files(self): + content = self.http_get(self.chapter) + items = self._elements('#imgholder .picture', content) + return [i.get('src') for i in items] + + def get_cover(self) -> str: + # return self._cover_from_content('.cover img') + pass + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaCanBlogCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangachan_me.py b/manga-py-stable_1.x/manga_py/providers/mangachan_me.py new file mode 100644 index 0000000..e27b716 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangachan_me.py @@ -0,0 +1,55 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaChanMe(Provider, Std): + _full_name_selector = r'/(?:online|manga|related)/(\d+-.+\.html)' + _idx_selector = r'/(?:online|manga|related)/(\d+)-' + + def get_chapter_index(self) -> str: + name = self.chapter + idx = self.re.search(r'_v(\d+)_ch(\d+)', name).groups() + return '{}-{}'.format(*idx) + + def get_main_content(self): + pass + + def _online_(self, url): + if self.re.search(r'/online/\d+', url): + content = self.http_get(url) + url = self.re.search(r'content_id.+?(/manga/.+\.html)', content).group(1) + return url + + def get_manga_name(self) -> str: + _name_selector = r'/(?:online|manga|related)/\d+-(.+)\.html' + url = self._online_(self.get_url()) + return self.re.search(_name_selector, url).group(1) + + def get_chapters(self): + url = self._online_(self.get_url()) + url = '{}/manga/{}'.format( + self.domain, + self.re.search(self._full_name_selector, url).group(1) + ) + return self.html_fromstring(url, '.table_cha .manga a') + + def get_files(self): + content = self.http_get(self.chapter) + items = self.re.search(r'"?fullimg"?\s?:\s?(\[.+\])', content).group(1) + images = self.json.loads(items.replace('",]', '"]')) # patch + return images + + def get_cover(self): + selector = r'\.me/[^/]+/(\d+-.+\.html)' + url = self._get_name(selector) + url = '{}/manga/{}'.format(self.domain, url) + img = self._elements('#cover', self.http_get(url)) + if img and len(img): + return img[0].get('src') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaChanMe diff --git a/manga-py-stable_1.x/manga_py/providers/mangachan_me_download.py b/manga-py-stable_1.x/manga_py/providers/mangachan_me_download.py new file mode 100644 index 0000000..3b90b4e --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangachan_me_download.py @@ -0,0 +1,44 @@ +# from manga_py.fs import dirname, path_join, get_temp_path, rename +from manga_py.provider import Provider +from .helpers.std import Std, Http2 + + +class MangaChanMe(Provider, Std): + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + pass + + def get_manga_name(self) -> str: + name = r'\.me/[^/]+/\d+-(.+)\.html' + return self._get_name(name) + + def loop_chapters(self): + items = self._storage['chapters'][::-1] + n = self.http().normalize_uri + Http2(self).download_archives([n(i) for i in items]) + + def get_chapters(self): + selector = r'\.me/[^/]+/(\d+-.+\.html)' + url = self._get_name(selector) + url = '{}/download/{}'.format(self.domain, url) + return self.html_fromstring(url, 'table#download_table tr td + td > a') + + def get_files(self): + return [] + + def get_cover(self): + selector = r'\.me/[^/]+/(\d+-.+\.html)' + url = self._get_name(selector) + url = '{}/manga/{}'.format(self.domain, url) + img = self._elements('#cover', self.http_get(url)) + if img and len(img): + return img[0].get('src') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaChanMe diff --git a/manga-py-stable_1.x/manga_py/providers/mangaclub_ru.py b/manga-py-stable_1.x/manga_py/providers/mangaclub_ru.py new file mode 100644 index 0000000..aa5baba --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaclub_ru.py @@ -0,0 +1,40 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaClubRu(Provider, Std): + local_storage = None + + def get_archive_name(self) -> str: + idx = self.get_chapter_index().split('-') + return self.normal_arc_name({'vol': idx[0], 'ch': idx[1]}) + + def get_chapter_index(self) -> str: + idx = self.re.search(r'/manga/view/[^/]+/v(\d+)-c(\d+).html') + return '{}-{}'.format(*idx.groups()) + + def get_main_content(self): + if not self.local_storage: + self.get_manga_name() + return self.http_get('{}/{}.html'.format(self.domain, self.local_storage[0])) + + def get_manga_name(self) -> str: + selector = r'\.ru(?:/manga/view)?/(?:(\d+-.+)/(.+)\.html)' + html = self.re.search(selector, self.get_url()) + self.local_storage = html.groups() + return self.local_storage[1] + + def get_chapters(self): + selector = '.manga-ch-list-item > a[href^="http"]' + return self.document_fromstring(self.content, selector) + + def get_files(self): + result = self.html_fromstring(self.chapter, '.manga-lines-page a.manga-lines') + return [i.get('data-i') for i in result] + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaClubRu diff --git a/manga-py-stable_1.x/manga_py/providers/mangadeep_com.py b/manga-py-stable_1.x/manga_py/providers/mangadeep_com.py new file mode 100644 index 0000000..d828cb0 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangadeep_com.py @@ -0,0 +1,42 @@ +from .helpers.std import Std +from .mangaonline_today import MangaOnlineToday + + +class MangaDeepCom(MangaOnlineToday, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'\.com/[^/]+/([^/]+)', self.chapter) + return idx.group(1) + + def document_fromstring(self, body, selector: str = None, idx: int = None): + if ~body.find(' str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + selector = 'ul.lst a.lst' + items = self.document_fromstring(self.content, selector) + pages = self.document_fromstring(self.content, '.pgg li > a') + if pages: + idx = self.re.search(r'-list/(\d+)', pages[-1].get('href')) + for i in range(1, int(idx.group(1))): + content = self.http_get('{}/{}/chapter-list/{}/'.format( + self.domain, + self.manga_name, + i + 1 + )) + items += self.document_fromstring(content, selector) + return items + + def get_cover(self) -> str: + return self._cover_from_content('img.cvr') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaDeepCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangadex_com.py b/manga-py-stable_1.x/manga_py/providers/mangadex_com.py new file mode 100644 index 0000000..f8e17d2 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangadex_com.py @@ -0,0 +1,133 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaDexCom(Provider, Std): + _links_on_page = 100 + _home_url = '' + + def get_archive_name(self) -> str: + vol = self.chapter['vol'] + if len(vol) == 0: + vol = '0' + return self.normal_arc_name({ + 'vol': vol, + 'ch': [*self.chapter['ch'].split('.'), self.chapter['lng']], + }) + + def get_chapter_index(self) -> str: + fmt = '{}-{}' + if len(self.chapter['lng']) > 0: + fmt += '-{}' + return fmt.format( + self.chapter['ch'].replace('.', '-'), + self.chapter['vol'], + self.chapter['lng'], + ) + + def get_main_content(self): + url = self.get_url() + if url.find('/title/') < 0: + url = self.html_fromstring(url, 'a.manga-link', 0) + url = self.http().normalize_uri(url.get('href')) + self._home_url = self.re.search(r'(.+/title/\d+/[^/])', url).group(1) + return self.http_get(self._home_url) + + def get_manga_name(self) -> str: + url = self.get_url() + if ~url.find('/title/'): + name = self.html_fromstring(url, '.card-header', 0).text_content() + else: + name = self.html_fromstring(url, '.manga-link', 0).get('title') + return name.strip() + + def _all_langs(self, items): + languages = [] + for i in items: + languages.append(i['flag'] + '\t--- ' + i['lng']) + return list(set(languages)) + + def _filter_langs(self, chapters, lng): + if len(lng) < 1: + return chapters + lng = lng.split(' ') + result = [] + for i in chapters: + if i['flag'] == '' or i['flag'] in lng: + result.append(i) + return result + + def get_chapters(self): + parser = self.document_fromstring(self.content) + # https://mangadex.org/manga/153/detective-conan + pages = parser.cssselect('.pagination li.paging a') + items = self._get_chapters_links(parser) + if pages: + pages = self.re.search(r'.+/(\d+)', pages[0].get('href')).group(1) + for i in range(2, int(pages) + 1): + _parser = self.html_fromstring('{}/chapters/{}/'.format( + self._home_url, i + )) + items += self._get_chapters_links(_parser) + chapters = self._parse_chapters(items) + lng = self.quest( + [], + 'Available languages:\n{}\n\nPlease, select your lang (empty for all, space for delimiter lang):'.format( + '\n'.join(self._all_langs(chapters)) + )) + return self._filter_langs(chapters, lng) + + def _get_chapters_links(self, parser): + return parser.cssselect('div.chapter-row[data-chapter]') + + def get_files(self): + idx = self.re.search(r'/chapter/(\d+)', self.chapter['link']).group(1) + try: + data = self.json.loads(self.http_get('{}/api/chapter/{}'.format( + self.domain, idx + ))) + n = self.http().normalize_uri + items = [] + for item in data.get('page_array', []): + items.append('{}{}/{}'.format( + n(data.get('server', '/data/')), data.get('hash'), item + )) + return items + except Exception as e: + return [] + + def get_cover(self) -> str: + return self._cover_from_content('.card-body .rounded') + + def prepare_cookies(self): + self._storage['cookies']['mangadex_h_toggle'] = '1' + + def _parse_chapters(self, items): + n = self.http().normalize_uri + result = [] + for tr in items: + ch = tr.cssselect('a[href*="/chapter/"]')[0] + lng = tr.cssselect('span.flag') + _ch = { + 'ch': tr.get('data-chapter'), + 'vol': tr.get('data-volume'), + 'link': n(ch.get('href')), + } + if lng: + _ch['lng'] = lng[0].attrib['title'] + _ch['flag'] = lng[0].attrib['class'].replace('rounded flag flag-', '') + else: + _ch['lng'] = '' + _ch['flag'] = '' + result.append(_ch) + return result + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.chapter['link'] + + +main = MangaDexCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangadex_info.py b/manga-py-stable_1.x/manga_py/providers/mangadex_info.py new file mode 100644 index 0000000..c69d3c7 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangadex_info.py @@ -0,0 +1,33 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaDexCom(Provider, Std): + _content = None + + def get_chapter_index(self) -> str: + return self.re.search(r'-chapter-(\d+(?:\.\d+)?)', self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + if self._content is None: + self._content = self.http_get(self.get_url()) + return self._content + + def get_manga_name(self) -> str: + return self.text_content(self.content, '.info-title') + + def get_chapters(self): + return self._elements('.widget .table.table-striped td > a') + + def get_files(self): + parser = self.html_fromstring(self.chapter + '/0') + return self._images_helper(parser, '#view-chapter img') + + def get_cover(self) -> str: + return self._cover_from_content('.img-thumbnail') + + def prepare_cookies(self): + self.http().allow_send_referer = False + + +main = MangaDexCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangadex_org.py b/manga-py-stable_1.x/manga_py/providers/mangadex_org.py new file mode 100644 index 0000000..16bde11 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangadex_org.py @@ -0,0 +1,80 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaDexOrg(Provider, Std): + __content = None + __chapters = None + + def get_archive_name(self) -> str: + return self.normal_arc_name({ + 'vol': self.chapter['volume'], + 'ch': self.chapter['chapter'], + }) + + def get_chapter_index(self) -> str: + return self.chapter_for_json() + + def manga_idx(self): + return self.re.search(r'/manga/(\d+)', self.get_url()).group(1) + + def get_main_content(self): + if self.__content is None: + content = self.http_get('https://mangadex.org/api/?id={}&type=manga'.format(self.manga_idx())) + self.__content = self.json.loads(content) + return self.__content + + def get_manga_name(self) -> str: + return self.content['manga']['title'] + + @property + def _chapters(self): + chapters = [] + for idx in self.content['chapter']: + ch = self.content['chapter'][idx] # type: dict + ch.update({ + 'key': idx, + }) + chapters.append(ch) + return chapters + + def get_chapters(self): + if self.__chapters is None: + languages = self.quest( + [], + 'Available languages:\n{}\n\nPlease, select your lang (empty for all, space for delimiter lang):'.format( + '\n'.join(self.languages()) + )).split(' ') + self.__chapters = self.filter_chapters(languages) + return self.__chapters + + def languages(self) -> list: + languages = [] + for lang in self._chapters: + if lang['lang_code'] not in languages: + languages.append(lang['lang_code']) + return languages + + def filter_chapters(self, languages: list) -> list: + if len(languages) == 0 or languages[0] == '': + return self._chapters + return [chapter for chapter in self._chapters if chapter['lang_code'] in languages] + + def get_files(self): + content = self.json.loads(self.http_get('{}/api/?id={}&server=null&type=chapter'.format( + self.domain, + self.chapter['key'] + ))) + return ['{}{}/{}'.format(content['server'], content['hash'], img) for img in content['page_array']] + + def get_cover(self) -> str: + return '{}{}'.format( + self.domain, + self.content['manga']['cover_url'], + ) + + def chapter_for_json(self) -> str: + return '{}-{}'.format(self.chapter['volume'], self.chapter['chapter']) + + +main = MangaDexOrg diff --git a/manga-py-stable_1.x/manga_py/providers/mangaeden_com.py b/manga-py-stable_1.x/manga_py/providers/mangaeden_com.py new file mode 100644 index 0000000..eb79aec --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaeden_com.py @@ -0,0 +1,50 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaEdenCom(Provider, Std): + uriRegex = r'/([^/]+)/[^/]+-manga/([^/]+)/?' + apiUri = '{}/api/{}/{}/' # (domain, chapter|manga, + __lang = 'en' + __cdn_url = 'https://cdn.mangaeden.com/mangasimg/' + + def get_chapter_index(self) -> str: + return str(self.chapter[0]).replace('.', '-') + + def get_main_content(self): + return self.http_get('{domain}/{lang}/{lang}-manga/{name}/'.format( + domain=self.domain, + lang=self.__lang, + name=self.manga_name, + )) + + def get_manga_name(self) -> str: + re = self.re.search(self.uriRegex, self.get_url()) + self.__lang = re.group(1) + return re.group(2) + + def get_chapters(self): # issue #61 + manga_idx = self.re.search(r'.manga_id2\s?=\s?"(.+?)";', self.content).group(1) + return self.json.loads(self.http_get(self.apiUri.format( + self.domain, + 'manga', + manga_idx, + ))).get('chapters', []) + + def get_files(self): + items = self.json.loads(self.http_get(self.apiUri.format( + self.domain, + 'chapter', + self.chapter[3] + ))).get('images', []) + return [self.__cdn_url + i[1] for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('#rightContent .info img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaEdenCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangafans_us.py b/manga-py-stable_1.x/manga_py/providers/mangafans_us.py new file mode 100644 index 0000000..4266855 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangafans_us.py @@ -0,0 +1,16 @@ +from .manganelo_com import MangaNeloCom + + +class MangaFansUs(MangaNeloCom): + chapter_re = r'[/-]chap(?:ter)?[_-](\d+(?:\.\d+)?(?:-v\d)?)' + + def get_chapter(self): + return '%s/0' % self.chapter + + def get_manga_name(self) -> str: + _ = self.re.search(r'(/(?:read-)?manga/)([^/]+)', self.get_url()) + self._prefix = _.group(1) + return _.group(2) + + +main = MangaFansUs diff --git a/manga-py-stable_1.x/manga_py/providers/mangaforall_com.py b/manga-py-stable_1.x/manga_py/providers/mangaforall_com.py new file mode 100644 index 0000000..1816e72 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaforall_com.py @@ -0,0 +1,40 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaForAllCom(Provider, Std): + + def get_archive_name(self) -> str: + idx = self.get_chapter_index().split('-') + return 'vol_{:0>3}-{}_{}'.format( + *self._idx_to_x2(idx), + self.chapter_id + ) + + def get_chapter_index(self) -> str: + re = self.re.compile(r'-(\d+(?:\.\d+)?)-') + ch = self.chapter + return '-'.join(re.search(ch).group(1).split('.')) + + def get_main_content(self): + return self._get_content('{}/m/{}') + + def get_manga_name(self) -> str: + return self._get_name('/m/([^/]+)') + + def get_chapters(self): + return self._elements('.Chapters ul.list-unstyled > li a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.container ul.list-unstyled > li img') + + def get_cover(self) -> str: + return self._cover_from_content('meta[property="og:image"]', 'content') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaForAllCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangafreak_net_download.py b/manga-py-stable_1.x/manga_py/providers/mangafreak_net_download.py new file mode 100644 index 0000000..ff131fa --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangafreak_net_download.py @@ -0,0 +1,44 @@ +from manga_py.provider import Provider +from .helpers.std import Std, Http2 + + +class MangaFreakNet(Provider, Std): + + def get_archive_name(self): + return self.chapter[0] + + def get_chapter_index(self) -> str: + return self.re.search(r'.+_(\d+)', self.chapter[1]).group(1) + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self._get_name('/Manga/([^?/#]+)') + + def get_chapters(self): + items = self._elements('.manga_series_list td a[download]') + return [(i.get('download'), i.get('href')) for i in items] + + def loop_chapters(self): + items = self._storage['chapters'][::-1] + Http2(self).download_archives([i[1] for i in items]) + + def get_files(self): + pass + + def prepare_cookies(self): + self.cf_protect(self.get_url()) + + def get_cover(self) -> str: + return self._cover_from_content('.manga_series_image img') + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.chapter[1] + + +main = MangaFreakNet diff --git a/manga-py-stable_1.x/manga_py/providers/mangafull_org.py b/manga-py-stable_1.x/manga_py/providers/mangafull_org.py new file mode 100644 index 0000000..2b29fd6 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangafull_org.py @@ -0,0 +1,28 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaFullOrg(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.compile('\w/chapter-(\d+(?:\.\d+)?)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.content .chapter-list a') + + def get_files(self): + parser = self.html_fromstring(self.chapter + '/0') + return self._images_helper(parser, '.each-page > img') + + def get_cover(self) -> str: + return self._cover_from_content('.cover-detail > img') + + +main = MangaFullOrg diff --git a/manga-py-stable_1.x/manga_py/providers/mangago_me.py b/manga-py-stable_1.x/manga_py/providers/mangago_me.py new file mode 100644 index 0000000..5c90b55 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangago_me.py @@ -0,0 +1,79 @@ +from manga_py.crypt import mangago_me +from manga_py.fs import rename, unlink +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaGoMe(Provider, Std): + _enc_images = None + _crypt = None + + def get_archive_name(self) -> str: + idx = self.get_chapter_index().split('-') + tp = self.re.search('/(\w{1,4})/[^/]*?\d+', self.chapter) + idx = [self.chapter_id, idx[-1]] # do not touch this! + if tp: + idx.append(tp.group(1)) + return self.normal_arc_name({'vol': idx}) + + def get_chapter_index(self) -> str: + selector = r'/\w{1,4}/[^/]*?(\d+)(?:[^\d]+(\d+))?' + idx = self.re.search(selector, self.chapter).groups() + if idx[1] is not None: + fmt = '{}-{}' + return fmt.format(*idx) + return idx[0] + + def get_main_content(self): + return self._get_content(self.get_url()) + + def get_manga_name(self) -> str: + return self._get_name(r'/read-manga/([^/]+)/') + + def get_chapters(self): + content = self._elements('#information') + if not content: + return [] + chapters = content[0].cssselect('#chapter_table a.chico') + raws = content[0].cssselect('#raws_table a.chicor') + return chapters + raws + + def prepare_cookies(self): + self._crypt = mangago_me.MangaGoMe() + self.cf_protect(self.domain) + + def get_files(self): + self._enc_images = {} + content = self.http(True, { + 'referer': self.chapter, + 'cookies': self.http().cookies, + 'user_agent': self.http().user_agent + }).get(self.chapter) + re = self.re.search(r"imgsrcs\s*=\s*['\"](.+)['\"]", content) + if not re: + return [] + items = self._crypt.decrypt(re.group(1)) + return items.split(',') + + def before_file_save(self, url, idx): + if ~url.find('/cspiclink/'): + self._enc_images[idx] = url + return url + + def after_file_save(self, _path: str, idx: int): + url = self._enc_images.get(idx, None) + if url is not None: + _dst = _path[:_path.rfind('.')] + '_' + _path[_path.rfind('.'):] + self._crypt.puzzle(_path, _dst, url) + unlink(_path) + rename(_dst, _path) + + def get_cover(self): + return self._cover_from_content('#information .cover img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaGoMe diff --git a/manga-py-stable_1.x/manga_py/providers/mangahasu_se.py b/manga-py-stable_1.x/manga_py/providers/mangahasu_se.py new file mode 100644 index 0000000..021368c --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangahasu_se.py @@ -0,0 +1,41 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaHasuSe(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.compile('chapter-+(\d+)(?:-+(\d+))?') + idx = re.search(self.chapter).groups() + if idx[1] is not None: + return '{}-{}'.format(*idx) + return idx[0] + + def get_main_content(self): + url = self.get_url() + test = self.re.search(r'\.\w{2,5}/[^/]+-p\d+.html', url) + if not test: + self.cf_protect(self.get_url()) + url = self.html_fromstring(url, 'a.itemcrumb.active', 0).get('href') + return self.http_get(url) + + def get_manga_name(self) -> str: + return self._get_name(r'\.\w{2,5}/([^/]+)(?:-p\d+.html|/)') + + def get_chapters(self): + return self._elements('.list-chapter .name a') + + def get_files(self): + if not self._params.get('cf-protect'): + self.cf_protect(self.chapter) + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.img > img') + + def get_cover(self) -> str: + return self._cover_from_content('.info-img > img') + + def book_meta(self) -> dict: + pass + + +main = MangaHasuSe diff --git a/manga-py-stable_1.x/manga_py/providers/mangaheaven_club.py b/manga-py-stable_1.x/manga_py/providers/mangaheaven_club.py new file mode 100644 index 0000000..2b048ff --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaheaven_club.py @@ -0,0 +1,27 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaHeavenClub(Provider, Std): + def get_chapter_index(self) -> str: + ch = self.re.search(r'-chapter-(\d+(?:\.\d+)?)', self.chapter) + return ch.group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/read-manga/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'/read-manga/([^/]+)') + + def get_chapters(self): + return self._elements('.chapter > a') + + def get_files(self): + parser = self.html_fromstring('%s/0' % self.chapter) + return self._images_helper(parser, '.page-chapter > img') + + def get_cover(self) -> str: + return self._cover_from_content('.detail-info img') + + +main = MangaHeavenClub diff --git a/manga-py-stable_1.x/manga_py/providers/mangaheaven_xyz.py b/manga-py-stable_1.x/manga_py/providers/mangaheaven_xyz.py new file mode 100644 index 0000000..342c625 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaheaven_xyz.py @@ -0,0 +1,27 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaHeavenXyz(Provider, Std): + def get_chapter_index(self) -> str: + ch = self.re.search(r'/chapter-(\d+(?:\.\d+)?)', self.chapter) + return ch.group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.table-scroll > .table td > a') + + def get_files(self): + parser = self.html_fromstring('%s/0' % self.chapter) + return self._images_helper(parser, '.manga-container > img') + + def get_cover(self) -> str: + return self._cover_from_content('.__image > img') + + +main = MangaHeavenXyz diff --git a/manga-py-stable_1.x/manga_py/providers/mangahere_cc.py b/manga-py-stable_1.x/manga_py/providers/mangahere_cc.py new file mode 100644 index 0000000..020eaaf --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangahere_cc.py @@ -0,0 +1,54 @@ +from manga_py.provider import Provider +from .helpers.std import Std +from manga_py.crypt.base_lib import BaseLib + + +class MangaHereCc(Provider, Std): + + def get_chapter_index(self) -> str: + chapter = self.chapter + selector = r'/manga/[^/]+/[^\d]+(\d+)/[^\d]+(\d+)' + idx = self.re.search(selector, chapter) + if idx: + return '-'.join(idx.groups()) + selector = r'/manga/[^/]+/[^\d]+(\d+)' + return self.re.search(selector, chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.detail-main-list a') + + def get_files(self): + n = self.http().normalize_uri + content = self.http_get(self.chapter) + parser = self.document_fromstring(content) + pages = parser.cssselect('.pager-list-left span span + a')[0].get('data-page') + chapter_id = self.re.search(r'chapterid\s*=\s*(\d+)', content).group(1) + skip = 0 + images = [] + url = self.re.search(r'(.+/)', self.chapter).group(1) + for page in range(1, int(pages) + 1): + if skip > 0: + skip -= 1 + continue + js = self.http_get('{}chapterfun.ashx?cid={}&page={}&key={}'.format(url, chapter_id, page, '')) + result = BaseLib.exec_js('m = ' + self.re.search(r'eval\((.+)\)', js).group(1), 'm') + img = BaseLib.exec_js(result, 'd') + skip = len(img) - 1 + images += img + return [n(i) for i in images] + + def get_cover(self): + return self._cover_from_content('.detail-info-cover-img') + + def prepare_cookies(self): + self._base_cookies() + self.http().cookies['isAdult'] = '1' + + +main = MangaHereCc diff --git a/manga-py-stable_1.x/manga_py/providers/mangahi_net.py b/manga-py-stable_1.x/manga_py/providers/mangahi_net.py new file mode 100644 index 0000000..b536279 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangahi_net.py @@ -0,0 +1,8 @@ +from .zmanga_net import ZMangaNet + + +class MangaHiNet(ZMangaNet): + _type = 'chapter' + + +main = MangaHiNet diff --git a/manga-py-stable_1.x/manga_py/providers/mangahome_com.py b/manga-py-stable_1.x/manga_py/providers/mangahome_com.py new file mode 100644 index 0000000..6a1b7f1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangahome_com.py @@ -0,0 +1,42 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaHomeCom(Provider, Std): + + def get_chapter_index(self) -> str: + selector = r'/manga/[^/]+/[^\d]+(\d+)(?:\.(\d+))?' + idx = self.re.search(selector, self.chapter).groups() + return self._join_groups(idx) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.detail-chlist a') + + def get_files(self): + n = self.http().normalize_uri + img_selector = 'img#image' + _url = n(self.chapter) + parser = self.html_fromstring(_url) + p_selector = '.mangaread-top .mangaread-pagenav select' + pages = self._first_select_options(parser, p_selector) + images = self._images_helper(parser, img_selector) + for i in pages: + parser = self.html_fromstring(n(i.get('value'))) + images += self._images_helper(parser, img_selector) + return images + + def get_cover(self): + return self._cover_from_content('.detail-cover') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaHomeCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangahub_io.py b/manga-py-stable_1.x/manga_py/providers/mangahub_io.py new file mode 100644 index 0000000..e6462d7 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangahub_io.py @@ -0,0 +1,34 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaHubIo(Provider, Std): + + def get_chapter_index(self) -> str: + chapter = self.chapter + return self.re.search(r'/chapter/[^/]+/\w+-([^/]+)', chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/(?:manga|chapter)/([^/]+)') + + def get_chapters(self): + return self._elements('.list-group .list-group-item > a') + + def get_files(self): + content = self.http_get(self.chapter) + items = self._elements('#mangareader img[src*="/file/"]', content) + n = self.http().normalize_uri + return [n(i.get('src')) for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('.row > div > img.img-responsive') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaHubIo diff --git a/manga-py-stable_1.x/manga_py/providers/mangahub_ru.py b/manga-py-stable_1.x/manga_py/providers/mangahub_ru.py new file mode 100644 index 0000000..0288cf7 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangahub_ru.py @@ -0,0 +1,39 @@ +import html + +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaHubRu(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'/read/[^/]+/[^\d]+(\d+)/(\d+)/', self.chapter).groups() + return '{}-{}'.format(*idx) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.ru/([^/]+)/?') + + def get_chapters(self): + return self._elements('.b-catalog-list__name a[href^="/"]') + + def get_files(self): + parser = self.html_fromstring(self.chapter, '.b-main-container .b-reader__full') + if not parser: + return [] + result = parser[0].get('data-js-scans') + result = self.json.loads(html.unescape(result.replace(r'\/', '/'))) + n = self.http().normalize_uri + return [n(i['src']) for i in result] + + def get_cover(self): + return self._cover_from_content('.manga-section-image__img img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaHubRu diff --git a/manga-py-stable_1.x/manga_py/providers/mangaid_me.py b/manga-py-stable_1.x/manga_py/providers/mangaid_me.py new file mode 100644 index 0000000..c21709a --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaid_me.py @@ -0,0 +1,26 @@ +from .gomanga_co import GoMangaCo +from .helpers.std import Std + + +class KomikIdCom(GoMangaCo, Std): + _name_re = '/manga/([^/]+)' + _content_str = '{}/manga/{}/' + _chapters_selector = '.animeinfo ul > li span a:not([rel])' + + def get_chapter_index(self) -> str: + re = r'/[^/]+\.\w+/[\w-]+?-(\d+)(?:[^\d](\d+))?' + idx = self.re.search(re, self.chapter) + return self._join_groups(idx.groups()) + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '#readerarea img') + + def get_cover(self) -> str: + return self._cover_from_content('.attachment-post-thumbnail') + + def prepare_cookies(self): + pass + + +main = KomikIdCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangaindo_web_id.py b/manga-py-stable_1.x/manga_py/providers/mangaindo_web_id.py new file mode 100644 index 0000000..caa5717 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaindo_web_id.py @@ -0,0 +1,40 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaIndoWebId(Provider, Std): + + def get_chapter_index(self) -> str: + selector = r'-chapter-([^/]+)' + return self.re.search(selector, self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + url = self.get_url() + pos = url.find('-chapter-') + if pos > 0: + item = self.html_fromstring(self.get_url(), 'article[id^="post-"]', 0) + item = self.re.search(r'category-([^\s]+)', item.get('class')).group(1) + return item + return self.re.search(r'\.id/([^/]+)', url).group(1) + + def get_chapters(self): + return self._elements('.lcp_catlist li > a') + + def get_files(self): + r = self.http().get_redirect_url + params = self.chapter, '.entry-content img.aligncenter' + items = self.html_fromstring(*params) + return [r(i.get('src')) for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('#m-cover > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaIndoWebId diff --git a/manga-py-stable_1.x/manga_py/providers/mangainn_net.py b/manga-py-stable_1.x/manga_py/providers/mangainn_net.py new file mode 100644 index 0000000..c25dbab --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangainn_net.py @@ -0,0 +1,36 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaInnNet(Provider, Std): + + def get_chapter_index(self) -> str: + chapter = self.chapter + idx = self.re.search(r'\.net/[^/]+/([^/]+)', chapter).group(1).split('.') + return '{}-{}'.format(*self._idx_to_x2(idx)) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + name = self.re.search(r'\.net/([^/]+)', self.get_url()) + return name.group(1) + + def get_chapters(self): + return self.document_fromstring(self.content, '#chapter_list a[href]') + + def get_files(self): + content = self.http_get(self.chapter) + images = self.re.search(r'var\s+images\s*=\s*(\[\{.+?\}\])', content).group(1) + images = self.json.loads(images) + return [i.get('url') for i in images] + + def get_cover(self): + pass # TODO + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaInnNet diff --git a/manga-py-stable_1.x/manga_py/providers/mangajinnofansub_com.py b/manga-py-stable_1.x/manga_py/providers/mangajinnofansub_com.py new file mode 100644 index 0000000..48a99de --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangajinnofansub_com.py @@ -0,0 +1,9 @@ +from .gomanga_co import GoMangaCo + + +class MangaJinnoFansubCom(GoMangaCo): + _name_re = '/lector/[^/]+/([^/]+)' + _content_str = '{}/lector/{}' + + +main = MangaJinnoFansubCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangakakalot_com.py b/manga-py-stable_1.x/manga_py/providers/mangakakalot_com.py new file mode 100644 index 0000000..8f626d8 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangakakalot_com.py @@ -0,0 +1,29 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaKakalotCom(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.search('/chapter_([^/]+)', self.chapter) + return re.group(1).replace('.', '-', 2) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/(?:manga|chapter)/([^/]+)/?') + + def get_chapters(self): + return self._elements('.chapter-list span a') + + def get_files(self): + result = self.html_fromstring(self.chapter, '#vungdoc img') + return [i.get('src') for i in result] + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaKakalotCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangakatana_com.py b/manga-py-stable_1.x/manga_py/providers/mangakatana_com.py new file mode 100644 index 0000000..eb84de1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangakatana_com.py @@ -0,0 +1,48 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaKatanaCom(Provider, Std): + name_re = r'/manga/([^/]+)' + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/manga/.+?\d/c(\d+(?:\.\d)?(?:-v\d)?)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + return self.http_get('{}/manga/{}'.format( + self.domain, self._name() + )) + + def get_manga_name(self) -> str: + name = self._name() + return name[:name.rindex('.')] + + def get_chapters(self): + return self._elements('.chapters .chapter a') + + def get_files(self): + content = self.http_get(self.chapter) + items = self.re.search( + r'\w\s?=\s?(\[[\'"]\d.+\d[\'"]).?\]\s?;', + content + ).group(1).replace("'", '"') + ']' + images = [] + for img in self.json.loads(items): + uri = '' + for c in img.split(' '): + uri += chr(int(c)) + images.append(uri) + return images + + def get_cover(self) -> str: + return self._cover_from_content('.cover img') + + def book_meta(self) -> dict: + pass + + def _name(self): + return self.re.search(self.name_re, self.get_url()).group(1) + + +main = MangaKatanaCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangaku_web_id.py b/manga-py-stable_1.x/manga_py/providers/mangaku_web_id.py new file mode 100644 index 0000000..bd0fc99 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaku_web_id.py @@ -0,0 +1,39 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangakuWebId(Provider, Std): + + def get_archive_name(self) -> str: + ch = self.chapter + return self.normal_arc_name({'vol': [ + self.chapter_id, + self.re.search(':[^/]+/([^/]+)', ch).group(1) + ]}) + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self._get_name(r'\.in/([^/]+)') + + def get_chapters(self): + return self._elements('div[style] a[target]') + + def get_files(self): + content = self.http_get(self.chapter) + items = self._elements('.entry .separator > a > img', content) + return [i.get('src') for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('span > small img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangakuWebId diff --git a/manga-py-stable_1.x/manga_py/providers/mangalib_me.py b/manga-py-stable_1.x/manga_py/providers/mangalib_me.py new file mode 100644 index 0000000..63d83ae --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangalib_me.py @@ -0,0 +1,40 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaLibMe(Provider, Std): + + def get_chapter_index(self) -> str: + selector = r'\.me/[^/]+/[^\d]+(\d+)/[^\d]+([^/]+)' + idx = self.re.search(selector, self.chapter).groups() + return '-'.join(idx) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.me/([^/]+)') + + def get_chapters(self): + return self._elements('.chapters-list .chapter-item__name a') + + def get_files(self): + content = self.http_get(self.chapter) + base_url = self.re.search(r'\bimgUrl: *[\'"]([^\'"]+)', content).group(1) + images = self.re.search(r'\bpages: *(\[\{.+\}\])', content).group(1) + images = self.json.loads(images) + imgs = ['https://img2.mangalib.me{}{}'.format( + base_url, + i.get('page_image'), + ) for i in images] + return imgs + + def get_cover(self): + return self._cover_from_content('img.manga__cover') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaLibMe diff --git a/manga-py-stable_1.x/manga_py/providers/mangalife_us.py b/manga-py-stable_1.x/manga_py/providers/mangalife_us.py new file mode 100644 index 0000000..21eb7a9 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangalife_us.py @@ -0,0 +1,52 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaLifeUs(Provider, Std): + img_selector = '.image-container .CurImage' + + def get_chapter_index(self) -> str: + selector = r'-chapter-(\d+).+-index-(\d+)' + chapter = self.re.search(selector, self.chapter) + if chapter is None: # http://mangalife.us/manga/Ubau-Mono-Ubawareru-Mono #51 + selector = r'-chapter-(\d+(?:\.\d+)?)' + chapter = self.re.search(selector, self.chapter).group(1).split('.') + return '-'.join(chapter) + return '{}-{}'.format( + 1 if chapter[1] is None else chapter[1], # todo: maybe 0 ? + chapter[0] + ) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + uri = self.get_url() + test = uri.find('.us/read-online/') > 0 + if test: + uri = self.html_fromstring(uri, 'a.list-link', 0).get('href') + return self.re.search(r'(?:\.us)?/manga/([^/]+)', uri).group(1) + + def get_chapters(self): + return self._elements('.chapter-list a.list-group-item') + + def get_files(self): + url = self.chapter + parser = self.html_fromstring(url, '.mainWrapper', 0) + pages = parser.cssselect('select.PageSelect')[0].cssselect('option + option') + images = self._images_helper(parser, self.img_selector) + for page in pages: + page_url = self.re.sub(r'(.+page-)\d+(.+)', r'\1{}\2', url) + parser = self.html_fromstring(page_url.format(page.get('value'))) + images += self._images_helper(parser, self.img_selector) + return images + + def get_cover(self) -> str: + return self._cover_from_content('.leftImage img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaLifeUs diff --git a/manga-py-stable_1.x/manga_py/providers/mangamew_com.py b/manga-py-stable_1.x/manga_py/providers/mangamew_com.py new file mode 100644 index 0000000..468cfc6 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangamew_com.py @@ -0,0 +1,37 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaMewCom(Provider, Std): + _type = 'manga' + + def get_chapter_index(self) -> str: + re = r'%s/[^/]+/.+?-(\d+(?:-\d+)?)-\d+' % self._type + return self.re.search(re, self.chapter).group(1) + + def get_main_content(self): + url = self.get_url() + if url.find('/' + self._type + '/') == -1: # not found + a = self.html_fromstring(url, 'h1.name a', 0) + url = a.get('href') + return self.http_get(url) + + def get_manga_name(self) -> str: + content = self.http_get(self.get_url()) + return self.text_content(content, 'h1.name a,h1.title') + + def get_chapters(self): + return self._elements('.chapter .item a')[::-1] + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '#content .item > img') + + def get_cover(self) -> str: + return self._cover_from_content('.images img') + + def book_meta(self) -> dict: + pass + + +main = MangaMewCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangamew_com_vn.py b/manga-py-stable_1.x/manga_py/providers/mangamew_com_vn.py new file mode 100644 index 0000000..8589aa1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangamew_com_vn.py @@ -0,0 +1,8 @@ +from .mangamew_com import MangaMewCom + + +class MangaMewComVn(MangaMewCom): + _type = 'truyen' + + +main = MangaMewComVn diff --git a/manga-py-stable_1.x/manga_py/providers/manganelo_com.py b/manga-py-stable_1.x/manga_py/providers/manganelo_com.py new file mode 100644 index 0000000..0458d69 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manganelo_com.py @@ -0,0 +1,36 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaNeloCom(Provider, Std): + chapter_re = r'[/-]chap(?:ter)?[_-](\d+(?:\.\d+)?)' + _prefix = '/manga/' + + def get_chapter_index(self) -> str: + return self.re.search(self.chapter_re, self.get_chapter())\ + .group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}%s{}' % self._prefix) + + def get_manga_name(self) -> str: + return self._get_name('/(?:manga|chapter)/([^/]+)') + + def get_chapters(self): + return self._elements('.chapter-list a') + + def get_files(self): + parser = self.html_fromstring(self.get_chapter()) + images = self._images_helper(parser, '#vungdoc img') + if not len(images): + images = self._images_helper(parser, '.vung_doc img,.vung-doc img') + return images + + def get_cover(self) -> str: + return self._cover_from_content('.manga-info-pic img') + + def get_chapter(self): + return self.chapter + + +main = MangaNeloCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangaon_net.py b/manga-py-stable_1.x/manga_py/providers/mangaon_net.py new file mode 100644 index 0000000..eb00e7b --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaon_net.py @@ -0,0 +1,56 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaOnNet(Provider, Std): + __has_ch = False + + def get_archive_name(self) -> str: + idx = self.get_chapter_index().split('-') + if self.__has_ch: + var = {'vol': idx[0], 'ch': idx[1]} + else: + var = {'vol': idx} + return self.normal_arc_name(var) + + def get_chapter_index(self) -> str: + selector = r'(?:vol-?(\d+))?(?:-ch-?(\d+))' + ch = self.chapter + re = self.re.search(selector, ch) + if re: + self.__has_ch = True + re = re.groups() + return '{}-{}'.format( + 0 if not re[0] else re[0], + re[1] + ) + selector = r'.+-(\d+)' + re = self.re.search(selector, ch) + return '0-{}'.format(re.group(1)) + + def get_main_content(self): + url = '{}/manga-info/{}'.format(self.domain, self.manga_name) + return self.http_get(url) + + def get_manga_name(self) -> str: + url = self.get_url() + if ~url.find('read-online'): + url = self.html_fromstring(url, '.back-info a', 0).get('href') + return self.re.search(r'/manga-info/([^/]+)', url).group(1) + + def get_chapters(self): + return self._elements('.list-chapter li > a') + + def get_files(self): + items = self.html_fromstring(self.chapter, '#list-img img') + return [i.get('src') for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('.cover img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaOnNet diff --git a/manga-py-stable_1.x/manga_py/providers/mangaonline_com_br.py b/manga-py-stable_1.x/manga_py/providers/mangaonline_com_br.py new file mode 100644 index 0000000..bb375cc --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaonline_com_br.py @@ -0,0 +1,53 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaOnlineComBr(Provider, Std): + + def get_chapter_index(self) -> str: + selector = r'\.br/[^/]+/[^/]+/([^/]+)' + return self.re.search(selector, self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'\.br/([^/]+)') + + def get_chapters(self): + return self._elements('#volumes-capitulos span > a') + + @staticmethod + def _get_pages_count(parser): + pages = parser.cssselect('select.pagina-capitulo') + if pages: + return len(pages[0].cssselect('option + option')) + return 0 + + def get_files(self): + img_selector = '#imgPadraoVisualizacao img' + url = '{}/capitulo.php?act=getImg&anime={}&capitulo={}&src={}&view=1' + params = ( + self.domain, + self.manga_name, + self.get_chapter_index() + ) + parser = self.html_fromstring(url.format(*params, 1)) + images = self._images_helper(parser, img_selector) + pages = self._get_pages_count(parser) + if pages: + for i in range(int(pages / 2)): + parser = self.html_fromstring(url.format(*params, ((i + 1) * 2 + 1))) + images += self._images_helper(parser, img_selector) + + return images + + def get_cover(self) -> str: + return self._cover_from_content('.image > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaOnlineComBr diff --git a/manga-py-stable_1.x/manga_py/providers/mangaonline_today.py b/manga-py-stable_1.x/manga_py/providers/mangaonline_today.py new file mode 100644 index 0000000..3dd6a46 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaonline_today.py @@ -0,0 +1,49 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaOnlineToday(Provider, Std): + _img_selector = '#sct_content img' + + def get_chapter_index(self) -> str: + idx = self.re.search(r'\.today/[^/]+/([^/]+)', self.chapter) + return idx.group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'\.today/([^/]+)') + + def get_chapters(self): + return self._elements('ul.chp_lst a') + + def _pages_helper(self, options): + images = [] + chapter = self.chapter + for n in range(1, int(options)): + content = self.html_fromstring('{}{}/'.format(chapter, n * 2 + 1)) + img = content.cssselect(self._img_selector) + for i in img: + images.append(i.get('src')) + return images + + def get_files(self): + images = [] + content = self.html_fromstring(self.chapter) + img = content.cssselect(self._img_selector) + if img: + images = [i.get('src') for i in img] + + options = len(content.cssselect('.cbo_wpm_pag')[0].cssselect('option')) / 2 + .5 + return images + self._pages_helper(options) + + def get_cover(self): + pass # TODO + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaOnlineToday diff --git a/manga-py-stable_1.x/manga_py/providers/mangaonlinehere_com.py b/manga-py-stable_1.x/manga_py/providers/mangaonlinehere_com.py new file mode 100644 index 0000000..621b3da --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaonlinehere_com.py @@ -0,0 +1,46 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaOnlineHereCom(Provider, Std): + __local_storage = None + + def get_chapter_index(self) -> str: + selector = r'/read-online/[^/]+?(\d+)(?:.(\d+))?' + idx = self.re.search(selector, self.chapter) + return '-'.join([ + idx[0], + 0 if idx[1] is None else idx[1] + ]) + + def get_main_content(self): + return self._get_content('{}/manga-info/{}') + + def get_manga_name(self) -> str: + if not self.__local_storage.get('name', None): + url = self.get_url() + if self.re.search(r'/read-online/', url): + url = self.html_fromstring(url, '.back-info a', 0).get('href') + name = self.re.search('/manga-info/Fuuka', url).group(1) + self.__local_storage['name'] = name + return self.__local_storage['name'] + + def get_chapters(self): + return self._elements('.list-chapter a') + + def prepare_cookies(self): + self.__local_storage = {} + + def get_files(self): + items = self.html_fromstring(self.chapter, '#list-img img') + return [i.get('src') for i in items] + + def get_cover(self): + return self._cover_from_content('.image-info img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaOnlineHereCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangapanda_com.py b/manga-py-stable_1.x/manga_py/providers/mangapanda_com.py new file mode 100644 index 0000000..d485f61 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangapanda_com.py @@ -0,0 +1,44 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaPandaCom(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'\.com/[^/]+/([^/]+)', self.chapter) + return idx.group(1) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + return self._elements('#listing a') + + def get_files(self): + img_selector = '#imgholder img' + url = self.http().normalize_uri(self.chapter) + + parser = self.html_fromstring(url, '#container', 0) + count_pages = self._first_select_options(parser, '#selectpage') + images = self._images_helper(parser, img_selector) + + n = 1 + while n < len(count_pages): + parser = self.html_fromstring('{}/{}'.format(url, 1 + n)) + images += self._images_helper(parser, img_selector) + n += 1 + + return images + + def get_cover(self): + return self._cover_from_content('#mangaimg img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaPandaCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangapark_me.py b/manga-py-stable_1.x/manga_py/providers/mangapark_me.py new file mode 100644 index 0000000..d86f8f1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangapark_me.py @@ -0,0 +1,38 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaParkMe(Provider, Std): + + def get_chapter_index(self) -> str: + selector = r'/manga/[^/]+/s.+?(?:/v(\d+))?/c(\d+[^/]*)' + idx = self.re.search(selector, self.chapter).groups() + if idx[0] is None: + return '0-' + idx[1] + return '-'.join(idx) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('#list a.ch') + + def get_files(self): + content = self.http_get(self.chapter) + data = self.re.search(r'var\simages\s?=\s?(\[.+\])', content) + if not data: + return [] + return self.json.loads(data.group(1)) + + def get_cover(self): + return self._cover_from_content('.cover img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaParkMe diff --git a/manga-py-stable_1.x/manga_py/providers/mangapark_org.py b/manga-py-stable_1.x/manga_py/providers/mangapark_org.py new file mode 100644 index 0000000..a8d9068 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangapark_org.py @@ -0,0 +1,72 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaParkOrg(Provider, Std): + __url = None + + def get_chapter_index(self) -> str: + return self.chapter[0].replace('.', '-') + + def get_main_content(self): + return self.http_get(self.__url) + + def get_manga_name(self) -> str: + title = self.html_fromstring(self.get_url(), 'h3 > a, h4 > a', 0) + self.__url = self.http().normalize_uri(title.get('href')) + return title.text_content().strip() + + def _print_variants(self, variants): + self.log('Please, select lang. (empty for all langs)') + for n, i in enumerate(variants): + lng = i.cssselect('.card-header a')[0].text_content() + self.log('\n%d: ' % (n + 1) + lng, end='') + + def _answer(self, max_digit): + while True: + answer = self.quest([], 'Answer (digit): ') + if len(answer) > 0 and (int(answer) > max_digit or int(answer) <= 0): + self.log('Wrong answer! Try one more.') + else: + return answer + + def get_chapters(self): + # multi-lang! + variants = self._elements('div.card') + answer = '1' + if len(variants) > 1: + self._print_variants(variants) + answer = self._answer(len(variants)) + if len(variants) > 1 and not len(answer): + parser = self.document_fromstring(self.content) + else: + parser = variants[int(answer) - 1] + items = parser.cssselect('.card-body i + a') + result = [] + re = self.re.compile(r'[Cc]h\.(\d+(?:\.\d+)?)') + n = self.http().normalize_uri + for i in items: + text = i.text_content() + result.append(( + re.search(text).group(1), + n(i.get('href')), + )) + return result + + def get_files(self): + re = self.re.compile(r'images\s*=\s*(\[.+\]);') + content = self.http_get(self.chapter[1]) + items = self.json.loads(re.search(content).group(1)) + return items + + def get_cover(self) -> str: + return self._cover_from_content('.order-0 > img') + + def book_meta(self) -> dict: + pass + + def chapter_for_json(self) -> str: + return self.chapter[1] + + +main = MangaParkOrg diff --git a/manga-py-stable_1.x/manga_py/providers/mangareader_net.py b/manga-py-stable_1.x/manga_py/providers/mangareader_net.py new file mode 100644 index 0000000..372dbe6 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangareader_net.py @@ -0,0 +1,41 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaReaderNet(Provider, Std): + + def get_chapter_index(self) -> str: + chapter = self.chapter + return self.re.search(r'\.net/[^/]+/([^/]+)', chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.net/([^/]+)') + + def get_chapters(self): + return self._elements('#listing a')[::-1] + + @staticmethod + def _get_img(parser): + return [i.get('src') for i in parser.cssselect('#img')] + + def get_files(self): + parser = self.html_fromstring(self.chapter) + pages = self._first_select_options(parser, 'select#pageMenu') + images = self._get_img(parser) + for i in pages: + parser = self.html_fromstring(self.domain + i.get('value')) + images += self._get_img(parser) + return images + + def get_cover(self): + return self._cover_from_content('#mangaimg img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaReaderNet diff --git a/manga-py-stable_1.x/manga_py/providers/mangareader_site.py b/manga-py-stable_1.x/manga_py/providers/mangareader_site.py new file mode 100644 index 0000000..ecabba6 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangareader_site.py @@ -0,0 +1,29 @@ +from .mangahub_io import MangaHubIo + + +class MangaReaderSite(MangaHubIo): + __manga_id = None + __graphql = 'https://api2.mangahub.io/graphql?query={}' + + def get_chapter_index(self) -> str: + return self.chapter_for_json().replace('.', '-') + + def get_chapters(self): + chapters = self.json.loads(self.http_get(self.__graphql.format( + '{manga(x:mr01,slug:"%s"){chapters{id,number,title,slug,date}}}' % self.manga_name + ))) + return chapters.get('data', {}).get('manga', {}).get('chapters', [])[::-1] + + def get_files(self): + data = self.http_get(self.__graphql.format( + '{chapter(x:mr01,slug:"%s",number:%d){pages}}' % (self.manga_name, self.chapter['number']) + )) + images = self.json.loads(data) + pages = self.json.loads(images.get('data', {}).get('chapter', {}).get('pages', '{}')) + return ['https://cdn.mangahub.io/file/imghub/%s' % pages[i] for i in pages] + + def chapter_for_json(self) -> str: + return str(self.chapter['number']) + + +main = MangaReaderSite diff --git a/manga-py-stable_1.x/manga_py/providers/mangareader_xyz.py b/manga-py-stable_1.x/manga_py/providers/mangareader_xyz.py new file mode 100644 index 0000000..28a96ab --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangareader_xyz.py @@ -0,0 +1,35 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaReaderXyz(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/chapter-(\d+(?:\.\d+)?)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + if ~self.get_url().find('/manga/'): + re = '/manga/([^/]+)' + else: + re = '/([^/]+)/chapter-' + return self._get_name(re) + + def get_chapters(self): + return self._elements('.table td > a,.chapter-list div.row a') + + def get_files(self): + parser = self.html_fromstring(self.chapter + '/0') + return self._images_helper(parser, '#view-chapter img,#vungdoc img') + + def get_cover(self) -> str: + return self._cover_from_content('img.img-thumbnail,.manga-info-pic > img') + + def book_meta(self) -> dict: + pass + + +main = MangaReaderXyz diff --git a/manga-py-stable_1.x/manga_py/providers/mangarock_com.py b/manga-py-stable_1.x/manga_py/providers/mangarock_com.py new file mode 100644 index 0000000..0aa47de --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangarock_com.py @@ -0,0 +1,79 @@ +from manga_py.crypt import MangaRockComCrypt +from manga_py.fs import rename, unlink, basename +from manga_py.provider import Provider +from .helpers.std import Std + +# api example: +""" +curl 'https://api.mangarockhd.com/query/web401/manga_detail?country=Japan' --compressed --data '{"oids":{"mrs-serie-100226981":0},"sections":["basic_info","summary","artworks","sub_genres","social_stats","author","character","publisher","scanlator","other_fact","chapters","related_series","same_author","feature_collections"]}' +""" + + +class MangaRockCom(Provider, Std): + crypt = None + __content = '' + __api_uri = 'https://api.mangarockhd.com/query/' + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + name = self._get_name(r'/manga/([^/]+-\d+)') + return self.http_get('{}/manga/{}'.format( + self.domain, + name + )) + + def get_manga_name(self) -> str: + return self.text_content(self.content, 'h1') + + def get_chapters(self): + idx = self._get_name('/manga/([^/]+)') + url = '{}info?oid={}&last=0&country=Japan'.format(self.__api_uri, idx) + items = self.json.loads(self.http_get(url)) + return [(i.get('oid'),) for i in items.get('data', {}).get('chapters', [])][::-1] + + def __get_url(self): + return '{}pages?oid={}&country=Japan'.format(self.__api_uri, self.chapter[0]) + + def get_files(self): + items = self.json.loads(self.http_get(self.__get_url())) + return items.get('data') + + # decrypt + def after_file_save(self, _path, idx: int): + _path_wp = _path + 'wp' + with open(_path, 'rb') as file_r: + with open(_path_wp, 'wb') as file_w: + file_w.write(self.crypt.decrypt(file_r.read())) + unlink(_path) + rename(_path_wp, _path) + + def save_file(self, idx=None, callback=None, url=None, in_arc_name=None): + _path, idx, _url = self._save_file_params_helper(url, idx) + in_arc_name = basename(_path) + '.webp' + return super().save_file(idx, callback, _url, in_arc_name) + + def get_cover(self) -> str: + selector = 'div:not([class]) > div[class] > div[class] > div[class] > div[class] > img' + url = '{}{}'.format(self.domain, self._get_name('(/manga/[^/]+)')) + img = self._elements(selector, self.http_get(url)) + if img and len(img): + return img[0].get('src') + + def prepare_cookies(self): + # patch api version + v = self.re.compile(r'\bAJAX_MRAPI_VERSION\b\s*=\s*[\'"]?(web\d+)') + self.__api_uri += v.search(self.content).group(1) + '/' + + self.crypt = MangaRockComCrypt() + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = MangaRockCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangarussia_com.py b/manga-py-stable_1.x/manga_py/providers/mangarussia_com.py new file mode 100644 index 0000000..6a4de07 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangarussia_com.py @@ -0,0 +1,67 @@ +from urllib.parse import unquote, quote + +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaRussiaCom(Provider, Std): + + @staticmethod + def path_url(url): + return quote(unquote(url)).replace('%3A//', '://') + + def get_chapter_index(self) -> str: + chapter = self.chapter + result = self.re.search(r'\+(\d+)\+-\+(\d+)', chapter) + return '-'.join(result.groups()) + + def get_main_content(self): + url = '{}/manga/{}.html'.format(self.domain, quote(self.manga_name)) + self._storage['referer'] = self.path_url(self.get_url()) + return self.http_get(url) + + def __name(self, url): + return self.re.search(r'/manga/(.+)\.html', url).group(1) + + def get_manga_name(self) -> str: + url = self.get_url() + if self.re.search('/manga/', url): + name = self.__name(url) + else: + url = self.html_fromstring(url, '.sitemaplist .red', 0).get('href') + name = self.__name(url) + return unquote(name) + + def get_chapters(self): + return self._elements('.chapterlist .col1 a') + + def _get_img(self, parser): + img = parser.cssselect('img#comicpic')[0] + urls = [img.get('src')] + onload = img.get('onload') + if ~onload and onload.find('(\''): + urls.append(self.re.search('\(\'(.+)\'\)', onload).group(1)) + return urls + + def get_files(self): + parser = self.html_fromstring(self.chapter) + result = parser.cssselect('select#page option + option') + images = self._get_img(parser) + for n, i in enumerate(result): + if n and n % 2: + parser = self.html_fromstring(i.get('value')) + images += self._get_img(parser) + return images + + def get_cover(self): + self._cover_from_content('.bookfrontpage > a > img') + + def before_download_chapter(self): + self._storage['referer'] = self.path_url(self.chapter) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaRussiaCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangasaurus_com.py b/manga-py-stable_1.x/manga_py/providers/mangasaurus_com.py new file mode 100644 index 0000000..b6503c1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangasaurus_com.py @@ -0,0 +1,55 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaSaurusCom(Provider, Std): + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + return self.http_get('{}/manga/{}/{}'.format( + self.domain, + *self.manga_name.split('_') + )) + + def get_manga_name(self) -> str: + url = self.get_url() + if ~url.find('/view/'): + url = self.html_fromstring(url, '#m_reader_bottom + div > a', 0).get('href') + result = self.re.search(r'/manga/(\d+)/([^/]+)', url).groups() + return '{1}_{0}'.format(*result) + + def get_chapters(self): + return self._elements('.table--chapters td > a')[::-1] + + def __files_helper(self): + content = self.http_get(self.chapter) + _path = self.document_fromstring(content, '#imageZone-next > img', 0).get('src') + path = self.re.search('(http.+?/original)/', _path).group(1) + '/{}/{}-{}{}' + parser = self.re.search(r'ImageReader\.setImages.+?(\{.+\})', content) + return path, parser + + def get_files(self): + path, parser = self.__files_helper() + if not parser: + return [] + images = [] + o = self.json.loads(parser.group(1)) + for i in o: + n = o.get(i) + _ = n.get('original', {}).get('file', '') + idx = _.find('.') + src = path.format(_[:idx], self.manga_name, n['id'], _[idx:]) + images.append(src) + return images + + def get_cover(self): + self._cover_from_content('.gallery-info__cover img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaSaurusCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangaseeonline_us.py b/manga-py-stable_1.x/manga_py/providers/mangaseeonline_us.py new file mode 100644 index 0000000..5728c1d --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangaseeonline_us.py @@ -0,0 +1,8 @@ +from .mangalife_us import MangaLifeUs + + +class MangaSeeOnlineUs(MangaLifeUs): + img_selector = '.image-container-manga .CurImage' + + +main = MangaSeeOnlineUs diff --git a/manga-py-stable_1.x/manga_py/providers/mangashiro_net.py b/manga-py-stable_1.x/manga_py/providers/mangashiro_net.py new file mode 100644 index 0000000..e4aae78 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangashiro_net.py @@ -0,0 +1,46 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaShiroNet(Provider, Std): + alter_re_name = r'\.net/([^/]+)-\d+' + chapter_re = r'\.net/[^/]+-(\d+(?:-\d+)?)' + chapters_selector = 'span.leftoff > a' + + def get_chapter_index(self) -> str: + chapter = self.chapter + return self.re.search(self.chapter_re, chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/manga/{}/') + + def get_manga_name(self) -> str: + url = self.get_url() + if ~url.find('/manga/'): + re = '/manga/([^/]+)' + else: + re = self.alter_re_name + return self.re.search(re, url).group(1) + + def get_chapters(self): + return self._elements(self.chapters_selector) + + def get_files(self): + url = self.chapter + parser = self.html_fromstring(url) + items = parser.cssselect('#readerarea a[imageanchor]') + attr = 'href' + if not items: + items = parser.cssselect('#readerarea img[id]') + attr = 'src' + return [i.get(attr) for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('img.attachment-post-thumbnail') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaShiroNet diff --git a/manga-py-stable_1.x/manga_py/providers/mangasupa_com.py b/manga-py-stable_1.x/manga_py/providers/mangasupa_com.py new file mode 100644 index 0000000..af569c7 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangasupa_com.py @@ -0,0 +1,33 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaSupaCom(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search('/chapter_([^/]+)', self.chapter) + return '-'.join(idx.group(1).split('.')) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + selector = r'\.com/(?:manga|chapter)/([^/]+)' + return self._get_name(selector) + + def get_chapters(self): + return self._elements('.chapter-list .row a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.vung_doc img') + + def get_cover(self): + return self._cover_from_content('.info_image img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaSupaCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangasushi_net.py b/manga-py-stable_1.x/manga_py/providers/mangasushi_net.py new file mode 100644 index 0000000..66cf764 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangasushi_net.py @@ -0,0 +1,18 @@ +from .rawdevart_com import RawDevArtCom + + +class MangaSushiNet(RawDevArtCom): + _chapter_selector = r'/chapter-(\d+(?:-\d+)?)' + + def get_chapter_index(self) -> str: + idx = self.re.search(self._chapter_selector, self.chapter) + return idx.group(1) + + def get_files(self): + chapter = self.chapter.replace('p/1/', '').replace('?style=paged', '') + '?style=list' + parser = self.html_fromstring(chapter) + _class = '.page-break img.wp-manga-chapter-img' + return self._images_helper(parser, _class, 'data-src') + + +main = MangaSushiNet diff --git a/manga-py-stable_1.x/manga_py/providers/mangatail_com.py b/manga-py-stable_1.x/manga_py/providers/mangatail_com.py new file mode 100644 index 0000000..e33c436 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangatail_com.py @@ -0,0 +1,90 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaTailCom(Provider, Std): + __local_storage = None + + def get_archive_name(self) -> str: + return self.normal_arc_name([ + self.chapter_id, + *self._parse_ch(self.chapter[0]).split('.') + ]) + + def get_chapter_index(self) -> str: # Oh ... + pass + + def _parse_ch(self, chapter): + _re = r'.+?[^\d](\d+(?:\.\d+)?)' + if ~chapter.find('-fixed'): + _re = r'.+?[^\d](\d+(?:\.\d+)?).+fixed' + re = self.re.search(_re, chapter, self.re.I) + if re: + return re.group(1) + return chapter + + def get_main_content(self): + url = self.get_url() + if self.__local_storage: + url = self.__local_storage + return self.http_get(url) + + def get_manga_name(self) -> str: + selector = '.main-content-inner .page-header' + header = self.html_fromstring(self.get_url(), selector, 0) + link = header.cssselect('a.active + a') + if link: + link = self.http().normalize_uri(link.get('href')) + self.__local_storage = link + header = self.html_fromstring(link, selector, 0) + return header.text_content().strip().replace('/', '_') # http://www.mangasail.com/content/12-prince-manga + + def _fix_chapters(self, items): + # http://www.mangasail.com/content/go-toubun-no-hanayome-30-%E2%80%93-fixed + # I wanted to sleep. Maybe fix it someday. + found = [] + result = [] + n = self.http().normalize_uri + for i in items: + name, url = i.text_content().strip(), i.get('href') + _name = self._parse_ch(name) + if ~url.find('-fixed'): + found.append(_name) + for i in items: + name = i.text_content().strip() + _name = self._parse_ch(name) + url = i.get('href') + if _name not in found or ~url.find('-fixed'): + result.append((name, n(url))) + return result + + def get_chapters(self): + items = self.document_fromstring(self.content, '.chlist td a') + items = self._fix_chapters(items) + return sorted(items, key=lambda n: float(self._parse_ch(n[0])), reverse=True) + + def prepare_cookies(self): + self._base_cookies() + + def get_files(self): + url = self.chapter[1] + items = self.html_fromstring('{}{}'.format(url, '?page=all'), '#images img') + n = self.http().normalize_uri + return [n(i.get('src')) for i in items] + + def get_cover(self) -> str: # TODO + cover = self.document_fromstring(self.content, 'iframe.authcache-esi-assembly', 0) + cover = self.json.loads(cover.text_content().strip()).get('field') + key = cover.keys()[0] + cover = self.document_fromstring(cover.get(key), '.field-type-image img', 0) + return self.http().normalize_uri(cover.get('src')) + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.chapter[1] + + +main = MangaTailCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangatown_com.py b/manga-py-stable_1.x/manga_py/providers/mangatown_com.py new file mode 100644 index 0000000..0c3d784 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangatown_com.py @@ -0,0 +1,57 @@ +import urllib3 + +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaTownCom(Provider, Std): + + def get_archive_name(self) -> str: + idx = self.re.search('/manga/[^/]+(?:/v(\d+))?/c([^/]+)', self.chapter).groups() + if idx[0]: + var = {'vol': idx[0], 'ch': idx[1].split('.')} + else: + var = {'vol': '0', 'ch': idx[1].split('.')} + return self.normal_arc_name(var) + + def get_chapter_index(self) -> str: + idx = self.re.search('/manga/[^/]+(?:/v\d+)?/c([^/]+)', self.chapter) + return idx.group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}/') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)/?') + + def get_chapters(self): + return self.document_fromstring(self.content, '.chapter_list a') + + def prepare_cookies(self): + self._storage['domain_uri'] = self.domain.replace('//m.', '//') + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + self._http_kwargs['verify'] = False + + def get_files(self): + img_selector = 'img#image' + url = self.http().normalize_uri(self.chapter) + parser = self.html_fromstring(url) + pages = self._first_select_options(parser, '.page_select') + images = self._images_helper(parser, img_selector) + + for i in pages: + url = self.http().normalize_uri(i.get('value')) + img = self.html_fromstring(url) + images += self._images_helper(img, img_selector) + + return images + + def get_cover(self): + return self._cover_from_content('.detail_info > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MangaTownCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangatrue_com.py b/manga-py-stable_1.x/manga_py/providers/mangatrue_com.py new file mode 100644 index 0000000..90e4040 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangatrue_com.py @@ -0,0 +1,15 @@ +from manga_py.providers.rawdevart_com import RawDevArtCom +from .helpers.std import Std + + +class MangaTrueCom(RawDevArtCom, Std): + def get_chapter_index(self) -> str: + re = self.re.compile(r'/manga/[^/]+/[^\d]*((?:\d+-?)+\d*)') + idx = re.search(self.chapter).group(1).split('-') + return '-'.join(idx[:int(len(idx) / 2 + .5)]) + + def get_cover(self) -> str: + return self._cover_from_content('.summary_image img.img-responsive', 'data-src') + + +main = MangaTrueCom diff --git a/manga-py-stable_1.x/manga_py/providers/mangawindow_net.py b/manga-py-stable_1.x/manga_py/providers/mangawindow_net.py new file mode 100644 index 0000000..9d0d827 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangawindow_net.py @@ -0,0 +1,49 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaWindowNet(Provider, Std): + __url = None + + def get_chapter_index(self) -> str: + return self.chapter[0].replace('.', '-') + + def get_main_content(self): + return self.http_get(self.__url) + + def get_manga_name(self) -> str: + title = self.html_fromstring(self.get_url(), '.item-title > a, .nav-title > a', 0) + self.__url = self.http().normalize_uri(title.get('href')) + return title.text_content().strip() + + def get_chapters(self): + items = self._elements('.chapter-list a.chapt') + result = [] + re = self.re.compile(r'[Cc]h\.(\d+(?:\.\d+)?)') + n = self.http().normalize_uri + for i in items: + text = i.cssselect('b')[0].text_content() + if 'deleted' not in text.casefold(): + result.append(( + re.search(text).group(1), + n(i.get('href')), + )) + return result + + def get_files(self): + re = self.re.compile(r'images\s*=\s*({.+});') + content = self.http_get(self.chapter[1]) + items = self.json.loads(re.search(content).group(1)) + return [items[i] for i in sorted(items, key=lambda i: int(i))] + + def get_cover(self) -> str: + return self._cover_from_content('.attr-cover > img') + + def book_meta(self) -> dict: + pass + + def chapter_for_json(self) -> str: + return self.chapter[1] + + +main = MangaWindowNet diff --git a/manga-py-stable_1.x/manga_py/providers/mangax_net.py b/manga-py-stable_1.x/manga_py/providers/mangax_net.py new file mode 100644 index 0000000..b342912 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangax_net.py @@ -0,0 +1,40 @@ +from urllib.parse import unquote_plus + +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaXNet(Provider, Std): + __name = None + + def get_chapter_index(self) -> str: + re = self.re.compile(r'\.\w+/\w/[^/]+/([^/]+)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + url = '{}/m/{}'.format( + self.domain, + self.__name, + ) + return self.http_get(url) + + def get_manga_name(self) -> str: + self.__name = self._get_name(r'\.\w+/\w/([^/]+)') + return unquote_plus(self.__name) + + def get_chapters(self): + return self._elements('.chlist li a') + + def get_files(self): + ch = self.re.sub(r'(\.\w+)/\w/', r'\1/f/', self.chapter) + parser = self.html_fromstring(ch) + return self._images_helper(parser, 'img.center-block') + + def get_cover(self) -> str: + return self._cover_from_content('.thumbnail > img') + + def book_meta(self) -> dict: + pass + + +main = MangaXNet diff --git a/manga-py-stable_1.x/manga_py/providers/mangazuki_me.py b/manga-py-stable_1.x/manga_py/providers/mangazuki_me.py new file mode 100644 index 0000000..e4a0b38 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mangazuki_me.py @@ -0,0 +1,43 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MangaZukiMe(Provider, Std): + + def get_chapter_index(self) -> str: + try: + re = self.re.compile(r'/manga/[^/]+/.+?(\d+(?:-\d+)?)[\?/]') + return re.search(self.chapter).group(1) + except AttributeError: + # mangazuki.online + re = self.re.compile(r'/manga/[^/]+/.+?(\d+(?:-\d+)?)$') + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + chapters = [] + n = self.http().normalize_uri + re = self.re.compile(r'(.+?)(?:\?style=list)?(?:/)?$') + for ch in self._elements('.wp-manga-chapter > a'): + href = re.search(ch.get('href')).group(1) + chapters.append(n(href) + '?style=list') + return chapters + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, 'img.wp-manga-chapter-img') + + def get_cover(self) -> str: + image = self._cover_from_content('.summary_image > a > img', 'data-src') + if len(image) < 1: + # mangazuki.online + image = self._cover_from_content('.summary_image > a > img') + return image + + +main = MangaZukiMe diff --git a/manga-py-stable_1.x/manga_py/providers/manhuagui_com.py b/manga-py-stable_1.x/manga_py/providers/manhuagui_com.py new file mode 100644 index 0000000..0d38924 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manhuagui_com.py @@ -0,0 +1,94 @@ +import random + +from manga_py.crypt import ManhuaGuiComCrypt +from manga_py.provider import Provider +from .helpers.std import Std + + +class ManhuaGuiCom(Provider, Std): + servers = [ + 'i.hamreus.com:8080', + 'us.hamreus.com:8080', + 'dx.hamreus.com:8080', + 'eu.hamreus.com:8080', + 'lt.hamreus.com:8080', + ] + + def _get_ch_idx(self): + chapter = self.chapter + return self.re.search(r'/comic/[^/]+/(\d+)', chapter.get('href')).group(1) + + def get_archive_name(self) -> str: + return super().get_archive_name() + '-' + self._get_ch_idx() + + def get_chapter_index(self) -> str: + chapter = self.chapter + span = chapter.cssselect('span') + idx = self._get_ch_idx() + if span: + span = span[0].text_content() + i = self.re.search(r'(\d+)', span).group(1) + return '{}-{}'.format(i, idx) + return '0-{}'.format(idx) + + def get_main_content(self): + _ = self._get_name(r'/comic/(\d+)') + return self.http_get('{}/comic/{}/'.format(self.domain, _)) + + def get_manga_name(self) -> str: + url = self.get_url() + selector = 'h1' + if self.re.search(r'/comic/\d+/\d+\.html', url): + selector = 'h1 > a' + return self.html_fromstring(url, selector, 0).text_content() + + def get_chapters(self): + parser = self.document_fromstring(self.content) + chapters = parser.cssselect('.chapter-list li > a') + if not len(chapters): + code = parser.cssselect('#__VIEWSTATE')[0].get('value') + manhuagui = ManhuaGuiComCrypt() + js = manhuagui.decrypt('LZString.decompressFromBase64("' + code + '")', '') + chapters = self.document_fromstring(js, '.chapter-list li > a') + return chapters + + def parse_files_data(self, data): + images = [] + md5 = data.get('sl', {}).get('md5', '') + cid = data.get('cid', '') + for i in data.get('files', []): + prior = 3 + ln = len(self.servers) + server = int(random.random() * (ln + prior)) + server = 0 if server < prior else server - prior + images.append('http://{}{}{}?cid={}&md5={}'.format( + self.servers[server], + data.get('path', ''), + i, cid, md5 + )) + return images + + def get_files(self): + url = self.chapter + self._storage['referer'] = url + content = self.http_get(url) + js = self.re.search(r'\](\(function\(.+\))\s?<', content) + if not js: + return [] + manhuagui = ManhuaGuiComCrypt() + data = manhuagui.decrypt(js.group(1), '') + data = self.re.search(r'\(({.+})\)', data) + if not data: + return [] + data = self.json.loads(data.group(1)) + return self.parse_files_data(data) + + def get_cover(self): + return self._cover_from_content('.hcover img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ManhuaGuiCom diff --git a/manga-py-stable_1.x/manga_py/providers/manhuatai_com.py b/manga-py-stable_1.x/manga_py/providers/manhuatai_com.py new file mode 100644 index 0000000..98d96d9 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manhuatai_com.py @@ -0,0 +1,78 @@ +from random import randrange + +from manga_py.provider import Provider +from .helpers.std import Std + + +class ManhuaTaiCom(Provider, Std): + servers = [ + 'http://mhpic.mh51.com', + 'http://mhpic.manhualang.com', + 'http://mhpic.jumanhua.com', + 'http://mhpic.yyhao.com', + ] + + def get_archive_name(self) -> str: + idx = self.get_chapter_index() + return self.normal_arc_name({'vol': [ + self.chapter_id, idx + ]}) + + def get_chapter_index(self) -> str: + ch = self.chapter + return self.re.search(r'/([^/]+)\.html', ch).group(1) + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + topics = self._elements('[id^=topic]') + items = [] + for i in topics[::-1]: + items += i.cssselect('a') + return items + + @staticmethod + def _decode_img_path(page_id, img_path): + result = '' + pid = int(page_id) % 10 + for i in img_path: + result += chr(ord(i) - pid) + return result + + def get_server(self): + idx = randrange(0, len(self.servers)) + return self.servers[idx] + + def get_files(self): + content = self.http_get(self.chapter) + pageid = self.re.search(r'pageid:\s*(\d+)', content).group(1) + imgpath = self.re.search(r'imgpath:\s*[\'"](.+?)[\'"]', content).group(1) + startimg = self.re.search(r'startimg:\s*(\d+)', content).group(1) + totalimg = self.re.search(r'totalimg:\s*(\d+)', content).group(1) + comic_size = self.re.search(r'comic_size:\s*[\'"](.+?)[\'"]', content).group(1) + + imgpath = self._decode_img_path(pageid, imgpath) + + items = [] + for i in range(int(startimg), int(totalimg) + 1): + items.append('{}/comic/{}{}.jpg{}'.format( + self.get_server(), + imgpath, + i, + comic_size + )) + return items + + def get_cover(self) -> str: + return self._cover_from_content('.comic-cover > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ManhuaTaiCom diff --git a/manga-py-stable_1.x/manga_py/providers/manhwa_co.py b/manga-py-stable_1.x/manga_py/providers/manhwa_co.py new file mode 100644 index 0000000..f41dc02 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manhwa_co.py @@ -0,0 +1,32 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ManhwaCo(Provider, Std): + + def get_chapter_index(self) -> str: + chapter = self.chapter + return self.re.search(r'\.co/[^/]+/([^/]+)', chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.co/([^/]+)') + + def get_chapters(self): + return self._elements('.list-group .list-group-item') + + def get_files(self): + content = self.http_get(self.chapter) + return self._images_helper(content, 'img.img-fluid') + + def get_cover(self) -> str: + return self._cover_from_content('.row > div > img.img-responsive') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ManhwaCo diff --git a/manga-py-stable_1.x/manga_py/providers/manhwahentai_com.py b/manga-py-stable_1.x/manga_py/providers/manhwahentai_com.py new file mode 100644 index 0000000..55f7004 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/manhwahentai_com.py @@ -0,0 +1,30 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ManhwaHentaiCom(Provider, Std): + + def get_chapter_index(self) -> str: + return self.re.search(r'/manhwa/[^/]+/[\w-]+-(\d+(?:-\d+)?)', self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/manhwa/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'./manhwa/([^/]+)') + + def get_chapters(self): + return self._elements('.version-chap > li > a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.reading-content .wp-manga-chapter-img') + + def get_cover(self) -> str: + return self._cover_from_content('.summary_image img.img-responsive') + + def book_meta(self) -> dict: + pass + + +main = ManhwaHentaiCom diff --git a/manga-py-stable_1.x/manga_py/providers/merakiscans_com.py b/manga-py-stable_1.x/manga_py/providers/merakiscans_com.py new file mode 100644 index 0000000..051b82a --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/merakiscans_com.py @@ -0,0 +1,49 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MerakiScansCom(Provider, Std): + _content_url = '{}/manga/{}/' + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/manga/[^/]+/(\d+(?:\.\d+)?)') + idx = re.search(self.chapter).group(1) + return idx.replace('.', '-') + + def _home_url(self): + return self._content_url.format(self.domain, self.manga_name) + + def get_main_content(self): + return self.http_get(self._home_url()) + + def get_manga_name(self) -> str: + return self._get_name('com/manga/([^/]+)') + + def get_chapters(self): + selector = '.clickable-chapter' + items = self._elements(selector) + return [i.get('data-href') for i in items] + + def get_files(self): + content = self.http_get(self.chapter) + slug = self.re.search(r'manga_slug\s*=\s*[\'"](.+)[\'"]', content).group(1) + chapter = self.re.search(r'viewschapter\s*=\s*[\'"](.+)[\'"]', content).group(1) + images = self.re.search(r'images\s*=\s*(\[.+\])', content).group(1).replace('\'', '"') + images = self.json.loads(images) + + # SRC RULE: "/manga/" + manga_slug + "/" + currentChapter + "/" + images[pageNum - 1]; + + return ['{}/manga/{}/{}/{}'.format(self.domain, slug, chapter, i) for i in images] + + def get_cover(self) -> str: + return self._cover_from_content('#cover_img') + + def book_meta(self) -> dict: + # todo meta + pass + + def prepare_cookies(self): + self.http().cookies['reading_type'] = 'long' + + +main = MerakiScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/mintmanga_com.py b/manga-py-stable_1.x/manga_py/providers/mintmanga_com.py new file mode 100644 index 0000000..a8543ad --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mintmanga_com.py @@ -0,0 +1,11 @@ +from .helpers.std import Std +from .readmanga_me import ReadmangaMe + + +class MintMangaCom(ReadmangaMe, Std): + + def get_manga_name(self): + return self._get_name(r'\.com/([^/]+)') + + +main = MintMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/mngcow_co.py b/manga-py-stable_1.x/manga_py/providers/mngcow_co.py new file mode 100644 index 0000000..64ff2fc --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mngcow_co.py @@ -0,0 +1,20 @@ +from .authrone_com import AuthroneCom +from .helpers.std import Std + + +class MngCowCo(AuthroneCom, Std): + + def get_chapter_index(self) -> str: + return self.re.search( + r'\.co/[^/]+/([^/]+)', + self.chapter + ).group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'\.co/([^/]+)') + + +main = MngCowCo diff --git a/manga-py-stable_1.x/manga_py/providers/mngdoom_com.py b/manga-py-stable_1.x/manga_py/providers/mngdoom_com.py new file mode 100644 index 0000000..cf174b8 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mngdoom_com.py @@ -0,0 +1,17 @@ +from .funmanga_com import FunMangaCom + + +class MngDoomCom(FunMangaCom): + def get_files(self): + content = self.http_get(self.chapter) + items = self.re.search(r' images = (\[{[^;]+}\])', content) + if not items: + return [] + try: + images = self.json.loads(items.group(1)) + return [i['url'] for i in images] + except self.json.JSONDecodeError: + return [] + + +main = MngDoomCom diff --git a/manga-py-stable_1.x/manga_py/providers/mymangalist_org.py b/manga-py-stable_1.x/manga_py/providers/mymangalist_org.py new file mode 100644 index 0000000..969421c --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/mymangalist_org.py @@ -0,0 +1,42 @@ +from manga_py.provider import Provider +from .helpers.std import Std, Http2 + + +class MyMangaListOrg(Provider, Std): + + def get_chapter_index(self) -> str: + # re = self.re.compile(r'/chapter-[^/]+-(\d+)') + re = self.re.compile(r'/download/[^/]+?(\d+)') + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/read-{}') + + def get_manga_name(self) -> str: + if ~self.get_url().find('/read'): + re = r'/read-([^/]+)' + else: + re = r'/chapter-([^/]+)-\d+' + return self._get_name(re) + + def get_chapters(self): + return self._elements('.chapter_info_download a') + + def loop_chapters(self): + http2 = Http2(self) + http2.download_archives(self._storage['chapters']) + + def get_files(self): + return [] + + def prepare_cookies(self): + self.cf_protect(self.get_url()) + + def get_cover(self) -> str: + return self._cover_from_content('img.manga_info_image') + + def book_meta(self) -> dict: + pass + + +main = MyMangaListOrg diff --git a/manga-py-stable_1.x/manga_py/providers/myreadingmanga_info.py b/manga-py-stable_1.x/manga_py/providers/myreadingmanga_info.py new file mode 100644 index 0000000..d7c8a07 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/myreadingmanga_info.py @@ -0,0 +1,40 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class MyReadingMangaInfo(Provider, Std): + + def get_chapter_index(self, no_increment=False) -> str: + return str(self.chapter_id) + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'\.info/([^/]+)') + + def get_chapters(self): + v = [self.get_url()] # current chapter + parser = self._elements('.pagination > a') + if not parser: + parser = self._elements('.entry-content p > a') + v += parser + return v[::-1] + + def prepare_cookies(self): + self.cf_protect(self.get_url()) + + def get_files(self): + selector = '.entry-content div img,.entry-content p img' + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, selector) + + def get_cover(self): + pass + + def book_meta(self) -> dict: + # todo meta + pass + + +main = MyReadingMangaInfo diff --git a/manga-py-stable_1.x/manga_py/providers/neumanga_tv.py b/manga-py-stable_1.x/manga_py/providers/neumanga_tv.py new file mode 100644 index 0000000..41268b1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/neumanga_tv.py @@ -0,0 +1,39 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class NeuMangaTv(Provider, Std): + + def get_chapter_index(self) -> str: + chapter = self.chapter + idx = self.re.search(r'/manga/[^/]+/(\d+(?:\+\d+))', chapter).group(1) + return '-'.join(idx.split('+')) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('#scans .item-content a') + + def get_files(self): + img_selector = '.imagechap' + parser = self.html_fromstring(self.chapter) + pages = self._first_select_options(parser, '.readnav select.page') + images = self._images_helper(parser, img_selector) + for i in pages: + url = i.get('value').replace('//', '/').replace(':/', '://') + images += self._images_helper(self.html_fromstring(url), img_selector) + return images + + def get_cover(self) -> str: + return self._cover_from_content('.info img.imagemg') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = NeuMangaTv diff --git a/manga-py-stable_1.x/manga_py/providers/nhentai_net.py b/manga-py-stable_1.x/manga_py/providers/nhentai_net.py new file mode 100644 index 0000000..d483c12 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/nhentai_net.py @@ -0,0 +1,27 @@ +from .hentaifox_com import HentaiFoxCom + + +class nHentaiNet(HentaiFoxCom): + _idx_re = r'/g/(\d+)' + _url_str = '{}/g/{}/' + _name_selector = '#info h1' + _cdn = 'https://i.nhentai.net/galleries/' + __ext = {'j': 'jpg', 'p': 'png', 'g': 'gif'} + + def get_files(self): + page = self._elements('#thumbnail-container a')[0] + n = self.http().normalize_uri + content = self.http_get(n(page.get('href'))) + imgs = self.re.search(r'gallery\s*:\s*(\{.+\}),', content) + imgs = self.json.loads(imgs.group(1)) + idx = imgs.get('media_id') + images = [] + for n, i in enumerate(imgs.get('images', {}).get('pages', [])): + images.append('{}{}/{}.{}'.format(self._cdn, idx, n + 1, self.__ext.get(i.get('t')))) + return images + + def get_cover(self) -> str: + return self._cover_from_content('#cover img', 'data-src') + + +main = nHentaiNet diff --git a/manga-py-stable_1.x/manga_py/providers/niadd_com.py b/manga-py-stable_1.x/manga_py/providers/niadd_com.py new file mode 100644 index 0000000..57aed44 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/niadd_com.py @@ -0,0 +1,38 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class NiAddCom(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/chapter/.*?_(\d+(?:_\d+)?)/') + return re.search(self.chapter).group(1).replace('_', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.detail-chlist > a') + + def get_files(self): + img_selector = 'img.manga_pic' + url = self.chapter + '-10-{}.html' + parser = self.html_fromstring(url.format(1)) + pages = len(self._first_select_options(parser, '.sl-page')) + 1 + images = self._images_helper(parser, img_selector) + for p in range(2, pages): + parser = self.html_fromstring(url.format(p)) + images += self._images_helper(parser, img_selector) + return images + + def get_cover(self) -> str: + return self._cover_from_content('.manga-detailtop img') + + def book_meta(self) -> dict: + pass + + +main = NiAddCom diff --git a/manga-py-stable_1.x/manga_py/providers/nightow_net.py b/manga-py-stable_1.x/manga_py/providers/nightow_net.py new file mode 100644 index 0000000..d81f4f1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/nightow_net.py @@ -0,0 +1,47 @@ +from urllib.parse import unquote_plus + +from manga_py.provider import Provider +from .helpers.std import Std + + +class NightowNet(Provider, Std): + _name_re = r'manga=(.+?)(?:&.+)?$' + + def get_chapter_index(self) -> str: + ch = unquote_plus(self.chapter) + idx = self.re.search(r'chapter=(?:.+?)\+(\d+(?:\.\d+)?)', ch) + if idx: + return '-'.join(idx.group(1).split('.')) + return self.re.search('chapter=(.+?)(?:&.+)?$', ch).group(1) + + def get_main_content(self): + name = self._get_name(self._name_re) + return self.http_get('{}/online/?manga={}'.format( + self.domain, + name + )) + + def get_manga_name(self) -> str: + return unquote_plus(self._get_name(self._name_re)) + + def get_chapters(self): + return self._elements('.selector .options a') + + def prepare_cookies(self): + self._storage['referer'] = self.domain + '/online/' + + def get_files(self): + content = self.http_get(self.chapter) + items = self.re.findall(r'imageArray\[\d+\]\s*=\s*[\'"](.+)[\'"];', content) + n = self.http().normalize_uri + return [n(i) for i in items] + + def get_cover(self) -> str: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + +main = NightowNet diff --git a/manga-py-stable_1.x/manga_py/providers/nineanime_com.py b/manga-py-stable_1.x/manga_py/providers/nineanime_com.py new file mode 100644 index 0000000..9c03281 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/nineanime_com.py @@ -0,0 +1,37 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class NineAnimeCom(Provider, Std): + _ch = None + + def get_chapter_index(self) -> str: + ch_result = self._ch.search(self.chapter) + if ch_result: + return ch_result.group(1).replace('_', '-') + return '000-' + self.chapter_id + + def get_main_content(self): + return self._get_content('{}/manga/{}.html?waring=1') + + def get_manga_name(self) -> str: + return self._get_name(r'/manga/(.+)\.html') + + def get_chapters(self): + return self._elements('.detail-chlist a') + + def get_files(self): + parser = self.html_fromstring(self.chapter.rstrip('/') + '-0-1.html') + return self._images_helper(parser, 'img.manga_pic') + + def get_cover(self) -> str: + return self._cover_from_content('img.detail-cover') + + def book_meta(self) -> dict: + pass + + def prepare_cookies(self): + self._ch = self.re.compile(r'/chapter/.*?(?:_((?:\d+)(?:_\d+)?))[^/]*?[/]') + + +main = NineAnimeCom diff --git a/manga-py-stable_1.x/manga_py/providers/ninemanga_com.py b/manga-py-stable_1.x/manga_py/providers/ninemanga_com.py new file mode 100644 index 0000000..3f5f27a --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/ninemanga_com.py @@ -0,0 +1,56 @@ +from .helpers.nine_manga import NineHelper +from .helpers.std import Std + + +class NineMangaCom(NineHelper, Std): + _local_storage = None + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + name = self.get_manga_name(False) + return self.http_get('{}/manga/{}.html?waring=1'.format(self.domain, name)) + + def get_manga_name(self, normalize=True) -> str: + if not self._local_storage: + name = self.re_name(self.get_url()) + if name: + self._local_storage = name.group(1) + else: + url = self.html_fromstring(self.get_url(), '.subgiude > li + li > a', 0).get('href') + self._local_storage = self.re_name(url).group(1) + return self.normalize_name(self._local_storage, normalize) + + def get_chapters(self): + result = self._elements('.chapterbox li a.chapter_list_a') + items = [] + for i in result: + u = self.re.search(r'(/chapter/.*/\d+)\.html', i.get('href')) + items.append('{}{}-10-1.html'.format(self.domain, u.group(1))) + return items + + def get_files_on_page(self, content): + parser = self.document_fromstring(content) + return self._images_helper(parser, 'img.manga_pic') + + def get_files(self): + content = self._get_page_content(self.chapter) + parser = self.document_fromstring(content) + pages = self._first_select_options(parser, '.changepage #page') + images = self.get_files_on_page(content) + for i in pages: + url = self.http().normalize_uri(i.get('value')) + content = self._get_page_content(url) + images += self.get_files_on_page(content) + return images + + def prepare_cookies(self): + self._base_cookies(self.get_url()) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = NineMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/noranofansub_com.py b/manga-py-stable_1.x/manga_py/providers/noranofansub_com.py new file mode 100644 index 0000000..e607d62 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/noranofansub_com.py @@ -0,0 +1,16 @@ +from .gomanga_co import GoMangaCo + + +class NoraNoFansubCom(GoMangaCo): + _name_re = r'\.com/(?:lector/)?(?:series/|read/)?([^/]+)/' + _content_str = '{}/{}/' + _chapters_selector = '.entry-content td a[href]' + + def get_chapters(self): + return super().get_chapters()[::-1] + + def get_cover(self) -> str: + return self._cover_from_content('.entry-content img.size-full') + + +main = NoraNoFansubCom diff --git a/manga-py-stable_1.x/manga_py/providers/nozominofansub_com.py b/manga-py-stable_1.x/manga_py/providers/nozominofansub_com.py new file mode 100644 index 0000000..eabd1e9 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/nozominofansub_com.py @@ -0,0 +1,13 @@ +from .komikid_com import KomikIdCom + + +class NozomiNoFansubCom(KomikIdCom): + _content_str = '{}/public/manga/{}' + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/manga/[^/]+/.+?(\d+(?:[^\d/]\d+)?)') + split = self.re.compile(r'[^\d+]') + return '-'.join(split.split(re.search(self.chapter).group(1))) + + +main = NozomiNoFansubCom diff --git a/manga-py-stable_1.x/manga_py/providers/nude_moon_me.py b/manga-py-stable_1.x/manga_py/providers/nude_moon_me.py new file mode 100644 index 0000000..eb2fb83 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/nude_moon_me.py @@ -0,0 +1,46 @@ +from manga_py.fs import basename +from manga_py.provider import Provider +from .helpers.std import Std + + +class NudeMoonMe(Provider, Std): + __content = None + __url = None + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + return self.__content + + def get_manga_name(self) -> str: + self.__url = self.get_url() + if not self.re.search(r'\d+-online--', self.__url): + _url = self.re.search(r'(.+?/\d+)--(.+\.html)', self.__url) + self.__url = '{}-online--{}'.format(*_url.groups()) + self.__content = self.http_get(self.__url + '?row') + name = self.document_fromstring(self.__content, 'meta[property="og:title"]', 0).get('content') + name = self.re.search(r'(.+?) [#/]', name) + return name.group(1) if name else basename(self.__url) + + def get_chapters(self): + return [b''] + + def get_files(self): + return [i.get('src') for i in self._elements('.square-red center > img')] + + def get_cover(self) -> str: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.__url + + +main = NudeMoonMe diff --git a/manga-py-stable_1.x/manga_py/providers/otakusmash_com.py b/manga-py-stable_1.x/manga_py/providers/otakusmash_com.py new file mode 100644 index 0000000..4ab359d --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/otakusmash_com.py @@ -0,0 +1,67 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class OtakuSmashCom(Provider, Std): + selector = r'https?://[^/]+/(comics/|read-\w+/|reader/)?([^/]+)' + prefix = '/' + + def get_chapter_index(self) -> str: + selector = self.selector + '/([^/]+)' + idx = self.re.search(selector, self.chapter) + return '-'.join(idx.group(3).split('.')) + + def get_main_content(self): + return self.http_get(self._get_manga_url()) + + def get_manga_name(self) -> str: + result = self.re.search(self.selector, self.get_url()) + self.prefix = result.group(1) + return result.group(2) + + def get_chapters(self): + parser = self.document_fromstring(self.content) + items = self._first_select_options(parser, '.pager select[name="chapter"]', False) + url = self._get_manga_url() + return ['{}{}/'.format(url, i.get('value')) for i in items] + + def get_files(self): + chapter = self.chapter + parser = self.html_fromstring(chapter) + pages = self._first_select_options(parser, '.mid .pager select[name="page"]') + images = [] + _img = self._get_image(parser) + _img and images.append(_img) + self.log('Get pages... Please, wait') + for page in pages: + parser = self.html_fromstring('{}{}/'.format(chapter, page.get('value'))) + _img = self._get_image(parser) + _img and images.append(_img) + return images + + def _get_manga_url(self): + return '{}/{}{}/'.format(self.domain, self.prefix, self.manga_name) + + def _get_image(self, parser): + image = parser.cssselect('a > img.picture') + if not len(image): + return False + image = image[0].get('src') + if image[0] == '/': + return self.http().normalize_uri(image) + base_uri = parser.cssselect('base') + if len(base_uri): + base_uri = base_uri[0].get('href') + else: + base_uri = self.chapter + return base_uri + image + + def get_cover(self): + pass # TODO + + def book_meta(self) -> dict: + # todo meta + pass + + +main = OtakuSmashCom diff --git a/manga-py-stable_1.x/manga_py/providers/otscans_com.py b/manga-py-stable_1.x/manga_py/providers/otscans_com.py new file mode 100644 index 0000000..211fff8 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/otscans_com.py @@ -0,0 +1,10 @@ +from .gomanga_co import GoMangaCo + + +class OtScansCom(GoMangaCo): + _name_re = '/foolslide/[^/]+/([^/]+)/' + _content_str = '{}/foolslide/series/{}/' + _chapters_selector = '.list .group .element .title a' + + +main = OtScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/pecintakomik_com.py b/manga-py-stable_1.x/manga_py/providers/pecintakomik_com.py new file mode 100644 index 0000000..26b5daa --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/pecintakomik_com.py @@ -0,0 +1,41 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class PecintaKomikCom(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.chapter + idx = self.re.search('/manga/[^/]+/(\d+(?:,\d)?)', idx) + return '-'.join(idx.group(1).split(',')) + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'\.\w{2,5}/([^/]+)') + + def get_chapters(self): + return self._elements('.post-cnt ul > li > a') + + def get_files(self): + url = self.chapter + '/full' + parser = self.html_fromstring(url) + items = parser.cssselect('td a .picture') + base = parser.cssselect('base[href]') + if base: + base = base[0].get('href') + else: + base = url + n = self.http().normalize_uri + return [n(i.get('src'), base) for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('img.pecintakomik') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = PecintaKomikCom diff --git a/manga-py-stable_1.x/manga_py/providers/pecintakomik_com_manga.py b/manga-py-stable_1.x/manga_py/providers/pecintakomik_com_manga.py new file mode 100644 index 0000000..367be6b --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/pecintakomik_com_manga.py @@ -0,0 +1,26 @@ +from .helpers.std import Std +from .pecintakomik_com import PecintaKomikCom + + +class PecintaKomikComManga(PecintaKomikCom, Std): + + def get_main_content(self): + return self._get_content('{}/manga/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'/manga/([^/]+)') + + def get_chapters(self): + parser = self.document_fromstring(self.content) + items = self._first_select_options(parser, 'select[name="chapter"]', False) + url = '{}/manga/{}/%s/full'.format( + self.domain, + self.manga_name + ) + return [url % i.get('value') for i in items] + + def get_cover(self) -> str: + pass + + +main = PecintaKomikComManga diff --git a/manga-py-stable_1.x/manga_py/providers/plus_comico_jp.py b/manga-py-stable_1.x/manga_py/providers/plus_comico_jp.py new file mode 100644 index 0000000..9532382 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/plus_comico_jp.py @@ -0,0 +1,103 @@ +from time import time +from urllib import parse + +from manga_py.crypt.puzzle import Puzzle +from manga_py.fs import rename +from manga_py.provider import Provider +from .helpers.std import Std + + +class PlusComicoJp(Provider, Std): + scrambles = [] + + def get_chapter_index(self) -> str: + return self.re.search('/store/\d+/(\d+)', self.chapter).group(1) + + def get_main_content(self): + content = self._storage.get('main_content', None) + if content: + return content + idx = self.re.search('/store/(\d+)', self.get_url()) + url = '{}/store/{}/'.format(self.domain, idx.group(1)) + return self.http_get(url) + + def get_manga_name(self) -> str: + return self.text_content(self.content, 'h1 > ._title') + + def get_chapters(self): + idx = self.re.search(r'/store/(\d+)', self.get_url()).group(1) + data = self.http_post('{}/store/api/getTitleArticles.nhn'.format( + self.domain + ), data={ + 'titleNo': idx + }) + json = self.json.loads(data) + items = [] + for i in json.get('result', {}).get('list', {}): + for m in i.get('articleList'): + if m.get('freeFlg') == 'Y': + items.append(m.get('articleDetailUrl')) + return items + + def get_files(self): + url = self.http().requests(self.chapter, method='head') + location = url.headers.get('location') + self.http().requests(location, method='head') + + location = parse.urlparse(location) + params = parse.parse_qs(location.query) + + ts = int(time()) + base_url = '{}://{}{}/diazepam_hybrid.php?param={}&ts={}&_={}&reqtype=0'.format( + location.scheme, + location.netloc, + self.re.search(r'(.+)/\w+\.php', location.path).group(1), + parse.quote_plus(params.get('param')[0]), + ts, + ts + 1305, + ) + + pages_url = base_url + '&mode=7&file=face.xml&callback=jQ12_34' + scramble_url = base_url + '&mode=8&file={:0>4}.xml' + file_url = base_url + '&mode=1&file={:0>4}_0000.bin' + + total_pages = self.re.search(r'TotalPage>(\d+)(.+?) str: + return self._cover_from_content('.cover img') + + def after_file_save(self, _path: str, idx: int): + _matrix = self.scrambles[idx].split(',') + div_num = 4 + matrix = {} + n = 0 + for i in _matrix: + matrix[int(i)] = n + n += 1 + p = Puzzle(div_num, div_num, matrix, 8) + p.need_copy_orig = True + p.de_scramble(_path, '{}.jpg'.format(_path)) + rename('{}.jpg'.format(_path), _path) + + def save_file(self, idx=None, callback=None, url=None, in_arc_name=None): + if in_arc_name is None: + in_arc_name = '{}_image.jpg'.format(idx) + super().save_file(idx, callback, url, in_arc_name) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = PlusComicoJp diff --git a/manga-py-stable_1.x/manga_py/providers/plus_comico_jp_manga.py b/manga-py-stable_1.x/manga_py/providers/plus_comico_jp_manga.py new file mode 100644 index 0000000..788bd41 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/plus_comico_jp_manga.py @@ -0,0 +1,56 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class PlusComicoJp(Provider, Std): + + def get_chapter_index(self) -> str: + return self.re.search('/manga/\d+/(\d+)', self.chapter).group(1) + + def get_main_content(self): + content = self._storage.get('main_content', None) + if content: + return content + idx = self.re.search('/manga/(\d+)', self.get_url()) + url = '{}/manga/{}/'.format(self.domain, idx.group(1)) + return self.http_get(url) + + def get_manga_name(self) -> str: + return self.text_content(self.content, '.stage__body h1') + + def get_chapters(self): + idx = self.re.search(r'/manga/(\d+)', self.get_url()).group(1) + data = self.http_post('{}/api/getArticleList.nhn'.format( + self.domain + ), data={ + 'titleNo': idx + }) + json = self.json.loads(data) + items = [] + for i in json.get('result', {}).get('list', {}): + if i.get('freeFlg', 'N') == 'Y': + items.append(i.get('articleDetailUrl')) + return items[::-1] + + def get_files(self): + content = self.http_get(self.chapter) + images = [i.get('src') for i in self.document_fromstring(content, '.comic-image > img')] + images += self.re.findall(r'\'(http://comicimg.comico.jp/onetimecontents/.+)\'', content) + return images + + def get_cover(self) -> str: + item = self.document_fromstring(self.content, '.stage div[class^="article-hero"]') + if item: + return self.parse_background(item[0]) + + def save_file(self, idx=None, callback=None, url=None, in_arc_name=None): + if in_arc_name is None: + in_arc_name = '{}_image.jpg'.format(idx) + super().save_file(idx, callback, url, in_arc_name) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = PlusComicoJp diff --git a/manga-py-stable_1.x/manga_py/providers/porncomix_info.py b/manga-py-stable_1.x/manga_py/providers/porncomix_info.py new file mode 100644 index 0000000..86229a8 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/porncomix_info.py @@ -0,0 +1,42 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class PornComixInfo(Provider, Std): + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.info/([^/]+)') + + def get_chapters(self): + return [b''] + + def get_files(self): + items = self._elements('.gallery-item a > img') + images = [] + re = self.re.compile(r'(.+/images/.+\d)-\d+x\d+(\.\w+)') + for i in items: + g = re.search(i.get('data-lazy-src')).groups() + images.append('{}{}'.format(*g)) + return images + + def get_cover(self) -> str: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = PornComixInfo diff --git a/manga-py-stable_1.x/manga_py/providers/psychoplay_co.py b/manga-py-stable_1.x/manga_py/providers/psychoplay_co.py new file mode 100644 index 0000000..e23d426 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/psychoplay_co.py @@ -0,0 +1,42 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class PsychoPlayCo(Provider, Std): + + def get_chapter_index(self) -> str: + ch = self.chapter + idx = self.re.search('/read/[^/]+/([^/]+)', ch) + return idx.group(1) + + def get_main_content(self): + return self._get_content('{}/series/{}') + + def get_manga_name(self) -> str: + return self._get_name('/(?:series|read)/([^/]+)') + + def get_chapters(self): + _chapter = 'a.media-link' + items = self._elements(_chapter) + selector = '.pagination li:not([class]) a' + pages = self.document_fromstring(self.content, selector) + n = self.http().normalize_uri + for i in pages: # TODO! Warning! + items += self._elements(_chapter, self.http_get(n(i.get('href')))) + return items + + def get_files(self): # TODO! Warning! + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.img-responsive', 'data-src') + + def get_cover(self) -> str: + item = self._elements('.profile-cover-img') + if item: + return self.parse_background(item[0]) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = PsychoPlayCo diff --git a/manga-py-stable_1.x/manga_py/providers/pururin_io.py b/manga-py-stable_1.x/manga_py/providers/pururin_io.py new file mode 100644 index 0000000..8bb6ebb --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/pururin_io.py @@ -0,0 +1,64 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class PururinIo(Provider, Std): + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + url = self.get_url() + if ~url.find('/gallery/'): + re = r'/gallery/(\d+)/([^/]+)' + else: + re = r'/read/(\d+)/\d+/([^/]+)' + return self._get_content('{}/gallery/{}/{}'.format( + self.domain, + *self.re.search(re, url).groups() + )) + + def get_manga_name(self) -> str: + url = self.get_url() + if ~url.find('/gallery/'): + re = r'/gallery/\d+/([^/]+)' + else: + re = r'/read/\d+/\d+/([^/]+)' + return self._get_name(re) + + def get_chapters(self): + return [b''] + + def _images(self, content): + items = self.json.loads(content) + images = [] + for i in items: + url = items.get(i, {}).get('image', False) + url and images.append(url) + return images + + def get_files(self): + items = self._elements('.col-md-10 .well-pururin > div[class*="preview"] > a') + if items: + url = self.http().normalize_uri(items[0].get('href')) + content = self.http_get(url) + images = self.re.search(r'chapters\s*=\s*(\{.+\})\s*;', content) + if images: + return self._images(images.group(1)) + return [] + + def get_cover(self) -> str: + return self._cover_from_content('.cover > a > img') + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = PururinIo diff --git a/manga-py-stable_1.x/manga_py/providers/puzzmos_com.py b/manga-py-stable_1.x/manga_py/providers/puzzmos_com.py new file mode 100644 index 0000000..d4e9f03 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/puzzmos_com.py @@ -0,0 +1,40 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class PuzzmosCom(Provider, Std): + + def get_chapter_index(self) -> str: + chapter = self.chapter + idx = self.re.search('/manga/[^/]+/([^/]+)', chapter) + return '-'.join(idx.group(1).split('.')) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('#bolumler td:first-child a') + + def get_files(self): + img_selector = '.chapter-content img.chapter-img' + url = self.chapter + parser = self.html_fromstring(url) + pages = parser.cssselect('.col-md-12 > .text-center > select option + option') + images = self._images_helper(parser, img_selector) + for i in pages: + parser = self.html_fromstring(i.get('value')) + images += self._images_helper(parser, img_selector) + return images + + def get_cover(self) -> str: + return self._cover_from_content('img.thumbnail.manga-cover') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = PuzzmosCom diff --git a/manga-py-stable_1.x/manga_py/providers/pzykosis666hfansub_com.py b/manga-py-stable_1.x/manga_py/providers/pzykosis666hfansub_com.py new file mode 100644 index 0000000..c4cd630 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/pzykosis666hfansub_com.py @@ -0,0 +1,9 @@ +from .read_powermanga_org import ReadPowerMangaOrg + + +class Pzykosis666HFansubCom(ReadPowerMangaOrg): + _name_re = '/online/[^/]+/([^/]+)/' + _content_str = '{}/online/series/{}/' + + +main = Pzykosis666HFansubCom diff --git a/manga-py-stable_1.x/manga_py/providers/ravens_scans_com.py b/manga-py-stable_1.x/manga_py/providers/ravens_scans_com.py new file mode 100644 index 0000000..a7756ae --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/ravens_scans_com.py @@ -0,0 +1,24 @@ +from .gomanga_co import GoMangaCo +from .helpers.std import Std + + +class RavensScansCom(GoMangaCo, Std): + _name_re = '/(?:serie|read)/([^/]+)' + __api_url = '/lector/api/v2/comic?stub=' + + def get_main_content(self): + url = '{}{}{}'.format(self.domain, self.__api_url, self.manga_name) + return self.json.loads(self.http_get(url)).get('languages', []) + + def get_chapters(self): + items = [] + for i in self.content: + url = '{}{}{}&lang={}'.format(self.domain, self.__api_url, self.manga_name, i) + items += self.json.loads(self.http_get(url)).get('chapters', []) + return [i.get('href') for i in items[::-1]] # DON'T TOUCH THIS! + + def get_cover(self) -> str: + return self.content.get('fullsized_thumb_url', None) + + +main = RavensScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/raw_senmanga_com.py b/manga-py-stable_1.x/manga_py/providers/raw_senmanga_com.py new file mode 100644 index 0000000..94cd462 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/raw_senmanga_com.py @@ -0,0 +1,15 @@ +from .senmanga_com import SenMangaCom + + +class RawSenmangaCom(SenMangaCom): + + def get_archive_name(self) -> str: + return self.get_chapter_index() + + def get_chapter_index(self): + ch = self.chapter + re = r'\.com/[^/]+/([^/]+)' + return self.re.search(re, ch) + + +main = RawSenmangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/rawdevart_com.py b/manga-py-stable_1.x/manga_py/providers/rawdevart_com.py new file mode 100644 index 0000000..4330468 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/rawdevart_com.py @@ -0,0 +1,33 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class RawDevArtCom(Provider, Std): + _chapter_selector = r'/chapter/[^\d]+(\d+(?:\.\d+)?)' + + def get_chapter_index(self) -> str: + idx = self.re.search(self._chapter_selector, self.chapter) + return '-'.join(idx.group(1).split('.')) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.wp-manga-chapter > a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.page-break img.wp-manga-chapter-img') + + def get_cover(self) -> str: + return self._cover_from_content('.summary_image img.img-responsive') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = RawDevArtCom diff --git a/manga-py-stable_1.x/manga_py/providers/rawlh_com.py b/manga-py-stable_1.x/manga_py/providers/rawlh_com.py new file mode 100644 index 0000000..0bc949f --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/rawlh_com.py @@ -0,0 +1,43 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class RawLHCom(Provider, Std): + _root_uri = None + + def get_chapter_index(self) -> str: + re = self.re.compile(r'-chapter-(.+)\.html') + return re.search(self.chapter).group(1) + + def get_main_content(self): + content = self._storage.get('main_content', None) + if content is not None: + return content + return self.http_get(self._root_uri) + + def get_manga_name(self) -> str: + url = self.get_url() + if ~url.find('/read-'): + title = self.html_fromstring(url, '.navbar-brand.manga-name', 0) + self._root_uri = self.http().normalize_uri(title.get('href')) + else: + self._root_uri = url + title = self.document_fromstring(self.content, '.manga-info h1', 0) + return title.text_content().strip(' \t\r\n\0') + + def get_chapters(self): + return self._elements('#tab-chapper a.chapter') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.chapter-content img.chapter-img') + + def get_cover(self) -> str: + return self._cover_from_content('.info-cover img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = RawLHCom diff --git a/manga-py-stable_1.x/manga_py/providers/rawneko_com.py b/manga-py-stable_1.x/manga_py/providers/rawneko_com.py new file mode 100644 index 0000000..e962260 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/rawneko_com.py @@ -0,0 +1,9 @@ +from .helpers.std import Std +from .rawdevart_com import RawDevArtCom + + +class RawNekoCom(RawDevArtCom, Std): + _chapter_selector = r'/chapter-(\d+(?:-\d+)?)' + + +main = RawNekoCom diff --git a/manga-py-stable_1.x/manga_py/providers/read_egscans_com.py b/manga-py-stable_1.x/manga_py/providers/read_egscans_com.py new file mode 100644 index 0000000..ac7d5ac --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/read_egscans_com.py @@ -0,0 +1,38 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReadEgScansCom(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'/Chapter_(\d+)(.*)', self.chapter) + return self._join_groups(idx.groups()) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + parser = self.document_fromstring(self.content) + items = self._first_select_options(parser, 'select[name="chapter"]', False) + url = '%s/%s/{}' % (self.domain, self.manga_name) + return [url.format(i.get('value')) for i in items[::-1]] + + def get_files(self): + url = self.chapter + content = self.http_get(url) + items = self.re.findall(r'img_url\.push\s?\(\s?\'(.+)\'\s?\)', content) + domain = self.domain + return ['{}/{}'.format(domain, i) for i in items] + + def get_cover(self) -> str: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ReadEgScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/read_powermanga_org.py b/manga-py-stable_1.x/manga_py/providers/read_powermanga_org.py new file mode 100644 index 0000000..39e2f1a --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/read_powermanga_org.py @@ -0,0 +1,15 @@ +from .gomanga_co import GoMangaCo + + +class ReadPowerMangaOrg(GoMangaCo): + _name_re = '[^/]/[^/]+/([^/]+)' + _content_str = '{}/series/{}/' + + def get_chapter_index(self) -> str: + url = self.chapter + index_re = r'/rea\w+/[^/]+/(?:[^/]+/)?(\d+/\d+(?:/\d+)?)' + group = self.re.search(index_re, url).group(1) + return group.replace('/', '-') + + +main = ReadPowerMangaOrg diff --git a/manga-py-stable_1.x/manga_py/providers/read_yagami_me.py b/manga-py-stable_1.x/manga_py/providers/read_yagami_me.py new file mode 100644 index 0000000..4ddbb34 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/read_yagami_me.py @@ -0,0 +1,34 @@ +from .gomanga_co import GoMangaCo + + +class ReadYagamiMe(GoMangaCo): + _name_re = '/read/[^/]+/([^/]+)/' + _content_str = '{}/series/{}/' + _chapter_re = r'/rea\w+/[^/]+/(?:[^/]+/)?(\d+/\d+(?:/\d+)?)' + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + e = self._elements('h1.title')[0] + return e.text.strip() + + def get_files(self): + try: + return super().get_files() + except AttributeError: + # web manga + parser = self.document_fromstring(self._go_chapter_content) + return self._images_helper(parser, '.web_pictures > img') + + def _get_json_selector(self, content): + return r'pages\s*=\s*(\[.+?\])' + + def prepare_cookies(self): + response = self.http().requests(method='post', data={'adult': 'true'}, url=self.get_url()) + cookies = response.cookies.items() + for i in cookies: + self._storage['cookies'][i[0]] = i[1] + + +main = ReadYagamiMe diff --git a/manga-py-stable_1.x/manga_py/providers/readcomicbooksonline_org.py b/manga-py-stable_1.x/manga_py/providers/readcomicbooksonline_org.py new file mode 100644 index 0000000..35dd692 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/readcomicbooksonline_org.py @@ -0,0 +1,53 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReadComicBooksOnlineOrg(Provider, Std): + _name_re = r'\.(?:org|net)/(?:reader/)?([^/]+)' + + def get_chapter_index(self) -> str: + idx = self.re.search(r'/reader/[^/]+/[^/]+_(\d+(?:[\./]\d+)?)', self.chapter) + if not idx: + idx = self.re.search(r'/reader/[^/]+_(\d+(?:[\./]\d+)?)', self.chapter) + return '-'.join(self.re.split(r'[/\.]', idx.group(1))) + + def get_main_content(self): + if ~self.get_url().find('/reader/'): + _url = self.html_fromstring(self.get_url(), 'td .verse a', 0).get('href') + self._params['url'] = _url + return self.http_get('{}/{}'.format(self.domain, self._get_name(self._name_re))) + + def get_manga_name(self) -> str: + return self._get_name(self._name_re) + + def get_chapters(self): + return self._elements('#chapterlist .chapter > a') + + def prepare_cookies(self): + self._storage['domain_uri'] = self.domain.replace('/www.', '/') + + def _get_image(self, parser): + src = parser.cssselect() + if not src: + return None + return '{}/reader/{}'.format(self.domain, src[0].get('src')) + + def get_files(self): + parser = self.html_fromstring(self.chapter + '?q=fullchapter') + base = parser.cssselect('base') + if base is not None and len(base): + base = base[0].get('href') + else: + base = None + n = self.http().normalize_uri + return [n(i, base) for i in self._images_helper(parser, '#omv td > img')] + + def get_cover(self): + self._cover_from_content(self.content, '.pic > img.series') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ReadComicBooksOnlineOrg diff --git a/manga-py-stable_1.x/manga_py/providers/readcomicbooksonline_org_manga.py b/manga-py-stable_1.x/manga_py/providers/readcomicbooksonline_org_manga.py new file mode 100644 index 0000000..e94a8b2 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/readcomicbooksonline_org_manga.py @@ -0,0 +1,38 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReadComicBooksOnlineOrg(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'/manga/[^/]+/[^/]+[-_](\d+(?:\.\d+)?)', self.chapter) + return idx.group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'/manga/([^/]+)') + + def get_chapters(self): + return self._elements('#mangachapterlist .chapter > a') + + def _get_image(self, parser): + src = parser.cssselect('a > img.mangapic') + if not src: + return None + return '{}/reader/{}'.format(self.domain, src[0].get('src')) + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, 'img.mangapic') + + def get_cover(self): + self._cover_from_content(self.content, '.field-item > a > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ReadComicBooksOnlineOrg diff --git a/manga-py-stable_1.x/manga_py/providers/readcomiconline_to.py b/manga-py-stable_1.x/manga_py/providers/readcomiconline_to.py new file mode 100644 index 0000000..fee5625 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/readcomiconline_to.py @@ -0,0 +1,35 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReadComicOnlineTo(Provider, Std): + def get_archive_name(self) -> str: + chapter = self.re.search(r'id=(\d+)', self.chapter).group(1) + return self.normal_arc_name([self.chapter_id, chapter]) + + def get_chapter_index(self, no_increment=False) -> str: + return str(self.chapter_id) + + def get_main_content(self): + return self._get_content(r'{}/Comic/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'/Comic/([^/]+)') + + def get_chapters(self): + return self._elements('table.listing td > a') + + def prepare_cookies(self): + self.cf_protect(self.get_url()) + self._storage['cookies']['rco_quality'] = 'hq' + + def get_files(self): + content = self.http_get(self.chapter + '&readType=1') + items = self.re.findall(r'lstImages.push\("([^"]+)"\)', content) + return items + + def get_cover(self): + return self._cover_from_content('.rightBox .barContent img[width]') + + +main = ReadComicOnlineTo diff --git a/manga-py-stable_1.x/manga_py/providers/readcomicsonline_ru.py b/manga-py-stable_1.x/manga_py/providers/readcomicsonline_ru.py new file mode 100644 index 0000000..13bbcdd --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/readcomicsonline_ru.py @@ -0,0 +1,27 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReadComicsOnlineRu(Provider, Std): + def get_chapter_index(self, no_increment=False) -> str: + chapter = self.re.search(r'/([\w\d_-]+)$', self.chapter).group(1) + return chapter.replace('.', '-').replace('_', '-') + + def get_main_content(self): + return self._get_content(r'{}/comic/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'/comic/([^/]+)') + + def get_chapters(self): + return self._elements('.chapters .chapter-title-rtl a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '#all > img', 'data-src') + + def get_cover(self): + return self._cover_from_content('.boxed .img-responsive') + + +main = ReadComicsOnlineRu diff --git a/manga-py-stable_1.x/manga_py/providers/reader_championscans_com.py b/manga-py-stable_1.x/manga_py/providers/reader_championscans_com.py new file mode 100644 index 0000000..980374f --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/reader_championscans_com.py @@ -0,0 +1,13 @@ +from .read_powermanga_org import ReadPowerMangaOrg + + +class ReaderChampionScansCom(ReadPowerMangaOrg): + + def get_chapter_index(self): + idx = super().get_chapter_index().split('-') + if idx[0] == '0': + del idx[0] + return '-'.join(idx) + + +main = ReaderChampionScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/reader_imangascans_org.py b/manga-py-stable_1.x/manga_py/providers/reader_imangascans_org.py new file mode 100644 index 0000000..39be441 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/reader_imangascans_org.py @@ -0,0 +1,43 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReaderIMangaScansOrg(Provider, Std): + + def get_archive_name(self) -> str: + return self.normal_arc_name(self.get_chapter_index()) + + def get_chapter_index(self) -> str: + re = self.re.search(r'://.+?/[^/]+/([^/]+)', self.chapter) + return re.group(1) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'://.+?/([^/]+)') + + def get_chapters(self): + sel = '.subnav-bind-top > .controls > div > .dropdown-menu li a' + return self._elements(sel) + + def get_files(self): + try: + content = self.http_get(self.chapter) + items = self.re.search(r'var\s+pages\s*=\s*(\[.+\])', content) + items = self.json.loads(items.group(1)) + url = items[0] + del items[0] + return ['{}/{}{}'.format(self.domain, url, i) for i in items] + except Exception: + return [] + + def get_cover(self) -> str: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ReaderIMangaScansOrg diff --git a/manga-py-stable_1.x/manga_py/providers/readhentaimanga_com.py b/manga-py-stable_1.x/manga_py/providers/readhentaimanga_com.py new file mode 100644 index 0000000..3f54017 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/readhentaimanga_com.py @@ -0,0 +1,39 @@ +from urllib.parse import unquote_plus + +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReadHentaiMangaCom(Provider, Std): + + def get_archive_name(self) -> str: + return self.normal_arc_name(self.get_chapter_index()) + + def get_chapter_index(self) -> str: + return self.re.search(r'\.com/[^/]+/([^/]+)', self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/{}/') + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + return self._elements('ul.lst a.lst') + + def get_files(self): + content = self.http_get(self.chapter) + escaped_images = self.re.search(r'_img_lst\s*=.+?unescape\(\'(.+)\'\)', content) + if escaped_images: + return self.json.loads(unquote_plus(escaped_images.group(1))) + return [] + + def get_cover(self) -> str: + return self._cover_from_content('img.cvr') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ReadHentaiMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/readmanga_eu.py b/manga-py-stable_1.x/manga_py/providers/readmanga_eu.py new file mode 100644 index 0000000..2da3ba3 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/readmanga_eu.py @@ -0,0 +1,44 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReadMangaEu(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search('/manga/\d+/[^/]+/([^/]+)', self.chapter) + return '-'.join(idx.group(1).split('.')) + + def get_main_content(self): + name = self._get_name('/(manga/\d+/[^/]+)') + return self.http_get('{}/{}'.format(self.domain, name)) + + def get_manga_name(self) -> str: + return self._get_name('/manga/\d+/([^/]+)') + + def get_chapters(self): + selector = '#chapters_b a[href*="/manga/"]' + return self._elements(selector) + + def parse_files(self, parser): + images_class = '.mainContent img.ebook_img' + return self._images_helper(parser, images_class) + + def get_files(self): + parser = self.html_fromstring(self.chapter) + pages = parser.cssselect('#jumpto > option + option') + images = self.parse_files(parser) + for i in pages: + url = self.http().normalize_uri(i.get('value')) + parser = self.html_fromstring(url) + images += self.parse_files(parser) + return images + + def get_cover(self): + return self._cover_from_content('.ebook_cover') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ReadMangaEu diff --git a/manga-py-stable_1.x/manga_py/providers/readmanga_me.py b/manga-py-stable_1.x/manga_py/providers/readmanga_me.py new file mode 100644 index 0000000..5c31675 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/readmanga_me.py @@ -0,0 +1,43 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReadmangaMe(Provider, Std): + def get_archive_name(self) -> str: + idx = self.get_chapter_index() + vol, ch = idx.split('-') + return self.normal_arc_name({'vol': vol, 'ch': ch}) + + def get_chapter_index(self): + _re = r'/.+/(?:vol)?([^/]+/[^/]+)(?:/|\?ma?t)?' + name = self.re.search(_re, self.chapter).group(1) + if ~name.find('?'): + name = name[:name.find('?')] + return name.replace('/', '-') + + def get_main_content(self): + return self._get_content('{}/{}?mature=1&mtr=1') + + def get_manga_name(self): + return self._get_name(r'\.me/([^/]+)') + + def get_chapters(self): + return self._elements('div.chapters-link tr > td > a') + + def get_files(self): + _uri = self.http().normalize_uri(self.chapter) + content = self.http_get(_uri) + result = self.re.search(r'rm_h\.init.+?(\[\[.+\]\])', content, self.re.M) + if not result: + return [] + return [i[1] + i[0] + i[2] for i in self.json.loads(result.groups()[0].replace("'", '"'))] + + def get_cover(self): + return self._cover_from_content('.picture-fotorama > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ReadmangaMe diff --git a/manga-py-stable_1.x/manga_py/providers/readmng_com.py b/manga-py-stable_1.x/manga_py/providers/readmng_com.py new file mode 100644 index 0000000..dc00039 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/readmng_com.py @@ -0,0 +1,37 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReadMngCom(Provider, Std): + + def get_chapter_index(self) -> str: + ch = self.chapter + re = r'\.com/[^/]+/(\d+(?:\.\d+)?)/?' + idx = self.re.search(re, ch) + if not idx: + return self.re.search(r'\.com/[^/]+/([^/]+)', ch).group(1) + return '-'.join(idx.group(1).split('.')) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + return self._elements('.chp_lst li > a') + + def get_files(self): + url = self.chapter + '/all-pages' + parser = self.html_fromstring(url) + return self._images_helper(parser, '.content-list img.img-responsive') + + def get_cover(self) -> str: + return self._cover_from_content('.panel-body img.img-responsive') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ReadMngCom diff --git a/manga-py-stable_1.x/manga_py/providers/readms_net.py b/manga-py-stable_1.x/manga_py/providers/readms_net.py new file mode 100644 index 0000000..454320f --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/readms_net.py @@ -0,0 +1,45 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReadMsNet(Provider, Std): + + def get_archive_name(self) -> str: + idx = self.re.search('/r/[^/]+/([^/]+)/([^/]+)', self.chapter).groups() + return self.normal_arc_name(idx) + + def get_chapter_index(self) -> str: + idx = self.re.search('/r/[^/]+/[^/]+/([^/]+)', self.chapter) + return idx.group(1) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.net/(?:manga|r)/([^/]+)') + + def get_chapters(self): + return self._elements('.table-striped td > a') + + def get_files(self): + img_selector = 'img#manga-page' + parser = self.html_fromstring(self.chapter) + img = self._images_helper(parser, img_selector) + images = [] + img and images.append(img) + pages = parser.cssselect('.btn-reader-page .dropdown-menu li + li a') + for i in pages: + parser = self.html_fromstring(self.http().normalize_uri(i.get('href'))) + img = self._images_helper(parser, img_selector) + img and images.append(img) + return images + + def get_cover(self): + pass # FIXME HOME + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ReadMsNet diff --git a/manga-py-stable_1.x/manga_py/providers/remanga_org.py b/manga-py-stable_1.x/manga_py/providers/remanga_org.py new file mode 100644 index 0000000..ef13ae6 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/remanga_org.py @@ -0,0 +1,33 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ReMangaOrg(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'/vol(\d+)/(\d+(?:\.\d+)?)', self.chapter) + return '{}-{}'.format( + *idx.groups() + ).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.chapter-list a.row') + + def get_files(self): + n = self.http().normalize_uri + content = self.http_get(self.chapter) + match = self.re.search('all_pages = (\[.+?\]);', content).group(1) + images = self.json.loads(match.replace("'", '"')) + return [n(i.get('link')) for i in images] + + def get_cover(self) -> str: + return self._cover_from_content('.item img.head') + + +main = ReMangaOrg diff --git a/manga-py-stable_1.x/manga_py/providers/riceballicious_info.py b/manga-py-stable_1.x/manga_py/providers/riceballicious_info.py new file mode 100644 index 0000000..740b008 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/riceballicious_info.py @@ -0,0 +1,10 @@ +from .gomanga_co import GoMangaCo +from .helpers.std import Std + + +class RiceBallIciousInfo(GoMangaCo, Std): + _name_re = '/fs/reader/[^/]+/([^/]+)/' + _content_str = '{}/fs/reader/series/{}/' + + +main = RiceBallIciousInfo diff --git a/manga-py-stable_1.x/manga_py/providers/rocaca_com.py b/manga-py-stable_1.x/manga_py/providers/rocaca_com.py new file mode 100644 index 0000000..3050b39 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/rocaca_com.py @@ -0,0 +1,29 @@ +from .mangago_me import MangaGoMe + + +class RocacaCom(MangaGoMe): + + def get_chapter_index(self) -> str: + re = self.re.search(r'/.+?chapter-(\d+(?:\.\d+)?)', self.chapter) + if not re: + re = self.re.search(r'/.+?Ch(\d+(?:\.\d+)?)', self.chapter) + if not re: + re = self.re.search(r'/c(\d+(?:\.\d+)?)', self.chapter) + if not re: + re = self.re.search(r'/(\d+(?:\.\d+)?)/', self.chapter) + return re.group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}/') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.section-list > table a.chico') + + def get_cover(self) -> str: + return self._cover_from_content('.cartoon-intro > img.pic') + + +main = RocacaCom diff --git a/manga-py-stable_1.x/manga_py/providers/santosfansub_com.py b/manga-py-stable_1.x/manga_py/providers/santosfansub_com.py new file mode 100644 index 0000000..c2967fb --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/santosfansub_com.py @@ -0,0 +1,9 @@ +from .helveticascans_com import HelveticaScansCom + + +class SantosFansubCom(HelveticaScansCom): + _name_re = '/slide/[^/]+/([^/]+)/' + _content_str = '{}/slide/series/{}/' + + +main = SantosFansubCom diff --git a/manga-py-stable_1.x/manga_py/providers/selfmanga_ru.py b/manga-py-stable_1.x/manga_py/providers/selfmanga_ru.py new file mode 100644 index 0000000..48e7fcb --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/selfmanga_ru.py @@ -0,0 +1,10 @@ +from .readmanga_me import ReadmangaMe + + +class SelfMangaRu(ReadmangaMe): + + def get_manga_name(self): + return self._get_name(r'\.ru/([^/]+)') + + +main = SelfMangaRu diff --git a/manga-py-stable_1.x/manga_py/providers/senmanga_com.py b/manga-py-stable_1.x/manga_py/providers/senmanga_com.py new file mode 100644 index 0000000..c088b29 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/senmanga_com.py @@ -0,0 +1,46 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class SenMangaCom(Provider, Std): + + def get_chapter_index(self) -> str: + ch = self.chapter + re = r'\.com/[^/]+/(\d+)([^/\d][^/]*)?/' + idx = self.re.search(re, ch).groups() + fmt = '{}' + if idx[1]: + fmt += '-{}' + return fmt.format(*idx) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + return self._elements('.list .element > .title > a')[::-1] + + def get_files(self): + url = self.chapter + parser = self.html_fromstring(url) + pages = self._first_select_options(parser, 'select[name="page"]') + src = parser.cssselect('#picture')[0].get('src') + images = [src] + for i in pages: + images.append(self.re.sub(r'\d+\?token', i.get('value') + '?token', src)) + return images + + def prepare_cookies(self): + self._base_cookies() + + def get_cover(self) -> str: + return self._cover_from_content('.thumbnail > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = SenMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/shakai_ru.py b/manga-py-stable_1.x/manga_py/providers/shakai_ru.py new file mode 100644 index 0000000..476073f --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/shakai_ru.py @@ -0,0 +1,43 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ShakaiRu(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.chapter.get('data-first') + return idx.replace('_', '-') + + def get_main_content(self): + idx = self._get_name(r'/manga[^/]*/(\d+)') + _ = { + 'dataRun': 'api-manga', + 'dataRequest': idx + } + page_content = str(self.http_post('http://shakai.ru/take/api-manga/request/shakai', data=_)) + return self.json.loads(page_content) + + def get_manga_name(self) -> str: + parser = self.content.get('post', []) + idx = self._get_name(r'/manga[^/]*/(\d+)') + parser = parser[3] if len(parser) > 3 else idx + return parser.split('/')[0].strip() + + def get_chapters(self): + return self.content.get('data', [])[::-1] + + def get_files(self): + chapter = self.chapter + if isinstance(chapter, dict): + return chapter.get('data-second', []) + return [] + + def get_cover(self): + pass # FIXME HOME + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ShakaiRu diff --git a/manga-py-stable_1.x/manga_py/providers/shogakukan_co_jp.py b/manga-py-stable_1.x/manga_py/providers/shogakukan_co_jp.py new file mode 100644 index 0000000..9df6e7e --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/shogakukan_co_jp.py @@ -0,0 +1,42 @@ +from .helpers.std import Std +from .shogakukan_tameshiyo_me import ShogakukanTameshiyoMe + + +class ShogakukanCoJp(ShogakukanTameshiyoMe, Std): + + def get_archive_name(self) -> str: + return self.normal_arc_name([ + self.get_chapter_index(), + self.re.search(r'/(\d+)', self.chapter).group(1) + ]) + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + types = [ + 'magazines/series', + 'books' + ] + re = r'(/(?:{})/\d+)'.format('|'.join(types)) + url = self.re.search(re, self.get_url()).group(1) + return self.http_get(self.domain + url) + + def get_manga_name(self) -> str: + return self._get_name(r'/(?:series|books)/(\d+)') + + def get_chapters(self): + return self._elements('a[href*="shogakukan.tameshiyo.me"]') # todo: watch this + + def get_cover(self) -> str: + img = self._cover_from_content('.mainimg01') + if not img: + img = self._cover_from_content('.image01 > img', 'data-original') + return img + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ShogakukanCoJp diff --git a/manga-py-stable_1.x/manga_py/providers/shogakukan_tameshiyo_me.py b/manga-py-stable_1.x/manga_py/providers/shogakukan_tameshiyo_me.py new file mode 100644 index 0000000..5c4a173 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/shogakukan_tameshiyo_me.py @@ -0,0 +1,79 @@ +from manga_py.crypt import BaseLib +from manga_py.provider import Provider +from .helpers.std import Std + + +class ShogakukanTameshiyoMe(Provider, Std): + __local_storage = None + _site = 'https://shogakukan.tameshiyo.me' + img_url = '/imgDeliver?gcode=' + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + pass + + def get_manga_name(self) -> str: + return self._get_name('/(\d+)') + + def get_chapters(self): + return [b''] + + def before_file_save(self, url, idx): + _url = self._site + self.img_url + self.__local_storage['code'] + _url = self.http_post(_url, data={ + 'base64': '1', + 'vsid': self.__local_storage['vsid'], + 'trgCode': url, + }, headers={'Referer': self._site}) + return _url + + def save_file(self, idx=None, callback=None, url=None, in_arc_name=None): + if url is None: + _url = self.get_current_file() + else: + _url = url + + _path, idx, _url = self._save_file_params_helper(_url, idx) + _path += '.jpg' + + with open(_path, 'wb') as file: + file.write(BaseLib.base64decode(_url)) + + callable(callback) and callback() + self.after_file_save(_path, idx) + self._archive.add_file(_path, in_arc_name) + + return _path + + @property + def chapter_url(self): + url = self.chapter + if isinstance(url, bytes): + url = self.get_url() + return url + + def get_files(self): + parser = self.html_fromstring(self.chapter_url) + self.__local_storage = { + 'vsid': parser.cssselect('input[data-key="vsid"]')[0].get('value'), + 'code': parser.cssselect('input[data-key="isbn1kan"]')[0].get('value'), + } + return [i.get('value') for i in parser.cssselect('input[data-key="imageCodes"]')] + + def get_cover(self) -> str: + pass + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = ShogakukanTameshiyoMe diff --git a/manga-py-stable_1.x/manga_py/providers/siberowl_com.py b/manga-py-stable_1.x/manga_py/providers/siberowl_com.py new file mode 100644 index 0000000..0568ecf --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/siberowl_com.py @@ -0,0 +1,52 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class SiberOwlCom(Provider, Std): + _main_fmt = '{}/mangas/{}/' + n = None + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/mangas/[^/]+/([^/]+)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content(self._main_fmt) + + def get_manga_name(self) -> str: + return self._get_name(r'/mangas/([^/]+)') + + def get_chapters(self): + re = self.re.compile(r'chapString\s*=\s*"(.+)";') + elements = self.document_fromstring( + re.search(self.content).group(1), + 'a' + ) + return ['{}/mangas/{}/{}'.format( + self.domain, + self.manga_name, + i.get('href') + ) for i in elements] + + def get_files(self): + content = self.http_get(self.chapter) + re = self.re.search(r'imageUrls\s*=\s*(\[.*\])', content) + items = re.group(1) + if not items: + return [] + items = self.json.loads(self.re.sub(r'(.+)",\]', r'\1"]', items)) + return ['{}{}'.format(self.domain, i) for i in items] + + def get_cover(self) -> str: + re = self.re.compile(r'imageUrl\s*=\s*"(.+)";') + return '{}{}'.format( + self.domain, + re.search(self.content).group(1) + ) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = SiberOwlCom diff --git a/manga-py-stable_1.x/manga_py/providers/sleepypandascans_co.py b/manga-py-stable_1.x/manga_py/providers/sleepypandascans_co.py new file mode 100644 index 0000000..3fb2183 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/sleepypandascans_co.py @@ -0,0 +1,33 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ManhwaCo(Provider, Std): + + def get_chapter_index(self) -> str: + chapter = self.chapter + return self.re.search(r'\.co/Reader/[^/]+/([^/]+)', chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/Series/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.co/(?:Series|Reader)/([^/]+)') + + def get_chapters(self): + return self._elements('.list-group .list-group-item') + + def get_files(self): + content = self.http_get(self.chapter) + parser = self.document_fromstring(content) + return self._images_helper(parser, 'img.img-fluid') + + def get_cover(self) -> str: + return self._cover_from_content('img.card-img-top') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ManhwaCo diff --git a/manga-py-stable_1.x/manga_py/providers/somanga_net.py b/manga-py-stable_1.x/manga_py/providers/somanga_net.py new file mode 100644 index 0000000..ee0ecc9 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/somanga_net.py @@ -0,0 +1,35 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class SoMangaNet(Provider, Std): + + def get_archive_name(self) -> str: + return self.normal_arc_name(self.get_chapter_index()) + + def get_chapter_index(self) -> str: + re = self.re.compile('/leitor/[^/]+/([^/]+)') + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.net/[^/]+/([^/]+)') + + def get_chapters(self): + return self._elements('ul.capitulos li > a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, 'img.img-manga') + + def get_cover(self): + return self._cover_from_content('.manga .col-sm-4 .img-responsive') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = SoMangaNet diff --git a/manga-py-stable_1.x/manga_py/providers/subapics_com.py b/manga-py-stable_1.x/manga_py/providers/subapics_com.py new file mode 100644 index 0000000..ff5b1ff --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/subapics_com.py @@ -0,0 +1,23 @@ +from .helpers.std import Std +from .mangashiro_net import MangaShiroNet + + +class SubaPicsCom(MangaShiroNet, Std): + alter_re_name = r'\.(?:com|net)/([^/]+)-chapter-\d+' + chapter_re = r'-chapter-(\d+(?:-\d+)?)' + + def get_cover(self) -> str: + return self._cover_from_content('.imgdesc > img') + + def get_files(self): + url = self.chapter + parser = self.html_fromstring(url) + items = parser.cssselect('#readerarea img') + return [i.get('src') for i in items] + + def book_meta(self) -> dict: + # todo meta + pass + + +main = SubaPicsCom diff --git a/manga-py-stable_1.x/manga_py/providers/submanga_online.py b/manga-py-stable_1.x/manga_py/providers/submanga_online.py new file mode 100644 index 0000000..d0cb3ac --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/submanga_online.py @@ -0,0 +1,38 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class SubMangaOnline(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/manga/[^/]+/(\d+(?:\.\d+)?)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.capitulos-list td > a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '#all img.img-responsive', 'data-src') + + def get_cover(self) -> str: + return self._cover_from_content('.list-group-item .img-responsive') + + def book_meta(self) -> dict: + return { + 'author': self.text_content(self.content, '.manga .col-sm-8 h5 + h5'), + 'title': self.text_content(self.content, '.manga .col-sm-8 > h2'), + 'annotation': self.text_content(self.content, '.manga .col-sm-8 h5 + .clear20 + div'), + 'keywords': None, + 'cover': self.get_cover(), + 'rating': None, + } + + +main = SubMangaOnline diff --git a/manga-py-stable_1.x/manga_py/providers/sunday_webry_com.py b/manga-py-stable_1.x/manga_py/providers/sunday_webry_com.py new file mode 100644 index 0000000..a3209d9 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/sunday_webry_com.py @@ -0,0 +1,64 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class SundayWebryCom(Provider, Std): + cdn_url = None + + def get_archive_name(self) -> str: + return self.get_chapter_index() # Hmm.. + + def get_chapter_index(self) -> str: + return self.re.search(r'cti=([^&]*)', self.chapter).group(1) + + def get_main_content(self): + return self.http_get('{}/series/{}'.format( + self.domain, + self._get_name(r'/series/(\d+)') + )) + + def get_manga_name(self) -> str: + return 'sunday_' + self._get_name(r'/series/(\d+)') + + def _chapters(self, content): + return self._elements('li[id^="product_"] a.button_free', content) + + def get_chapters(self): + pages = self._elements('.pagination') + chapters = self._chapters(self.content) + if pages: + pages = pages[0].cssselect('a:not([class])') + n = self.http().normalize_uri + for i in pages: + chapters += self._chapters(n(i.get('href'))) + return chapters + + def _prepare_urls(self): + cid = self.re.search('cid=([^&]+)', self.chapter).group(1) + license_url = '{}/api4js/contents/license?cid={}'.format(self.domain, cid) + self.cdn_url = self.json.loads(self.http_get(license_url)).get('url', None) + items = self.json.loads(self.http_get(self.cdn_url + 'configuration_pack.json')) + return items.get('configuration', {}).get('contents', []) + + def get_files(self): + items = [] + for i in self._prepare_urls(): + file = '%s/0.jpeg' % i.get('file') + items.append(self.cdn_url + file) + return items + + def after_file_save(self, _path: str, idx: int): # todo issue #36 + pass + + def get_cover(self) -> str: + return self._cover_from_content('#series .image > img') + + def prepare_cookies(self): + self._base_cookies() + + def book_meta(self) -> dict: + # todo meta + pass + + +main = SundayWebryCom diff --git a/manga-py-stable_1.x/manga_py/providers/taadd_com.py b/manga-py-stable_1.x/manga_py/providers/taadd_com.py new file mode 100644 index 0000000..96a57d3 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/taadd_com.py @@ -0,0 +1,56 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class TaaddCom(Provider, Std): + __local_storage = None + _name_selector = 'h1.chapter_bar a[href*="/book/"]' + _pages_selector = '#page' + _chapters_selector = '.chapter_list td[align="left"] a' + img_selector = '#comicpic' + + def get_archive_name(self) -> str: + idx = self.get_chapter_index() + return self.remove_not_ascii(self.normal_arc_name([ + self.chapter_id, idx + ])) + + def get_chapter_index(self) -> str: + idx = self.re.search('/chapter/([^/]+)/', self.chapter).group(1) + return idx + + def get_main_content(self): + return self.http_get('{}/book/{}.html'.format(self.domain, self.manga_name)) + + def _re_name(self, url): + return self.re.search(r'/book/([^/]+)\.html', url) + + def get_manga_name(self) -> str: + url = self.get_url() + name = self._re_name(url) + if not name: + name = self.html_fromstring(url, self._name_selector, 0).get('href') + name = self._re_name(name) + return name.group(1) + + def get_chapters(self): + return self._elements(self._chapters_selector) + + def prepare_cookies(self): + self.__local_storage = 0 + + def get_files(self): + parser = self.html_fromstring(self.chapter) + pages = parser.cssselect(self._pages_selector)[0].cssselect('option + option') + images = self._images_helper(parser, self.img_selector) + for i in pages: + c = self.html_fromstring(self.http().normalize_uri(i.get('value'))) + images += self._images_helper(parser, self.img_selector) + return images + + def book_meta(self) -> dict: + # todo meta + pass + + +main = TaaddCom diff --git a/manga-py-stable_1.x/manga_py/providers/tapas_io.py b/manga-py-stable_1.x/manga_py/providers/tapas_io.py new file mode 100644 index 0000000..6341efa --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/tapas_io.py @@ -0,0 +1,47 @@ +from manga_py.provider import Provider +from .helpers import tapas_io +from .helpers.std import Std + + +class TapasIo(Provider, Std): # TODO: Login\Password + helper = None + + def get_archive_name(self) -> str: + ch = self.chapter + return self.normal_arc_name([ + ch['scene'], + ch['title'] + ]) + + def get_chapter_index(self) -> str: + return str(self.chapter['scene']) + + def get_main_content(self): + content = self._storage.get('main_content', False) + return content if content else self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self.re.search(r'seriesTitle\s*:\s*\'(.+)\',', self.content).group(1) + + def get_chapters(self): + items = self.re.search(r'episodeList\s*:\s*(\[.+\]),', self.content).group(1) + return [i for i in self.json.loads(items)[::-1] if not i['locked']] + + def get_files(self): + return self.helper.parse_chapter_content() + + def get_cover(self) -> str: + return self._cover_from_content('#series-thumb img') + + def prepare_cookies(self): + self.helper = tapas_io.TapasIo(self) + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.helper.chapter_url() + + +main = TapasIo diff --git a/manga-py-stable_1.x/manga_py/providers/tenmanga_com.py b/manga-py-stable_1.x/manga_py/providers/tenmanga_com.py new file mode 100644 index 0000000..04ce81d --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/tenmanga_com.py @@ -0,0 +1,11 @@ +from .taadd_com import TaaddCom + + +class TenMangaCom(TaaddCom): + _name_selector = '.read-page a[href*="/book/"]' + _pages_selector = '.sl-page' + _chapters_selector = '.chapter-box .choose-page a:last-child' + img_selector = '.pic_box .manga_pic' + + +main = TenMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/tmofans_com.py b/manga-py-stable_1.x/manga_py/providers/tmofans_com.py new file mode 100644 index 0000000..d2b24f3 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/tmofans_com.py @@ -0,0 +1,64 @@ +from manga_py.provider import Provider +from .helpers.std import Std +from sys import stderr +from manga_py.meta import __downloader_uri__ + + +class TmoFansCom(Provider, Std): + def get_chapter_index(self) -> str: + try: + re = self.re.compile(r'Capítulo (\d+(?:\.\d+)?)') + return re.search(self.chapter[0]).group(1).replace('.', '-') + except IndexError as e: + self.log('\nNot found chapter index.\nURL: {}\nChapter: {}\nPlease, report this bug: {}{}\n'.format( + self.get_url(), + self.chapter[0], + __downloader_uri__, '/issues/new?template=bug_report.md' + ), file=stderr) + + raise e + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self._get_name(r'/manga/\d+/([^/]+)') + + def get_chapters(self): + raw_chapters = self._elements('.list-group .upload-link') + n = self.http().normalize_uri + re = self.re.compile(r'(.+/)') + chapters = [] + for i in raw_chapters: + try: + text = i.cssselect('.btn-collapse')[0].text_content() + link = i.cssselect('.list-group .list-group-item .row a')[-1] + except IndexError: + self.log('Error extract chapter', file=stderr) + continue + request_url = n(link.get('href')) + response = self.http().requests(request_url, method='head') + url = n(re.search(response.headers['Location']).group(1)) + chapters.append(( + text, + url + 'cascade', + )) + + return chapters + + def get_files(self): + url = self.chapter[1] + parser = self.html_fromstring(url) + return self._images_helper(parser, 'img.viewer-image') + + def get_cover(self) -> str: + return self._cover_from_content('.book-thumbnail') + + def prepare_cookies(self): + self.cf_protect(self.get_url()) + + def chapter_for_json(self) -> str: + return self.chapter[1] + + +main = TmoFansCom diff --git a/manga-py-stable_1.x/manga_py/providers/tonarinoyj_jp.py b/manga-py-stable_1.x/manga_py/providers/tonarinoyj_jp.py new file mode 100644 index 0000000..18bf3f4 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/tonarinoyj_jp.py @@ -0,0 +1,55 @@ +from manga_py.provider import Provider +from .helpers import tonarinoyj_jp +from .helpers.std import Std + + +class TonariNoYjJp(Provider, Std): + helper = None + + def get_archive_name(self) -> str: + return self.normal_arc_name(self.get_chapter_index()) + + def get_chapter_index(self) -> str: + return str(self.chapter_id) + + def get_main_content(self): + content = self._storage.get('main_content', None) + if content is None: + return self.http_get(self.get_url()) + return content + + def get_manga_name(self) -> str: + h1 = self.document_fromstring(self.content, 'h1.series-header-title') + if h1: + return h1[0].text_content() + return '__Manga__' + + def get_chapters(self): + idx = self.re.search(r'/episode/(\d+)', self.get_url()) + items = self.helper.get_chapters(idx.group(1)) + return ['{}/episode/{}'.format(self.domain, i) for i in items] + + def get_files(self): + doc = self.html_fromstring(self.chapter) + images = [] + # img = doc.cssselect('.link-slot > img') # sometimes 1x1 px + # img and images.append(img[0].get('src')) + images += [i.get('data-src') for i in doc.cssselect('img.js-page-image')] + return images + + def get_cover(self) -> str: + return self._cover_from_content('.link-slot > img') + + def prepare_cookies(self): + self.helper = tonarinoyj_jp.TonariNoYjJp(self) + + def after_file_save(self, _path: str, idx: int): + if idx: + self.helper.solve_image(_path, idx) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = TonariNoYjJp diff --git a/manga-py-stable_1.x/manga_py/providers/toonkor_co.py b/manga-py-stable_1.x/manga_py/providers/toonkor_co.py new file mode 100644 index 0000000..55d43f3 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/toonkor_co.py @@ -0,0 +1,42 @@ +from manga_py.crypt.base_lib import BaseLib +from manga_py.provider import Provider +from .helpers.std import Std + + +class ToonKorCo(Provider, Std): + + def get_archive_name(self) -> str: + return self.normal_arc_name(self.get_chapter_index()) + + def get_chapter_index(self) -> str: + re = self.re.compile(r'\.co/[^_]+_(.+)\.html?') + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.co/([^_]+)') + + def get_chapters(self): + items = self._elements('#fboardlist td.episode__index') + n = self.http().normalize_uri + return [n(i.get('data-role')) for i in items] + + def get_files(self): + content = self.http_get(self.chapter) + imgs = self.re.search(r'toon_img\s=\s["\'](.+?)["\']', content) + if not imgs: + return [] + content = BaseLib.base64decode(imgs.group(1)).decode() + n = self.http().normalize_uri + return [n(i.get('src')) for i in self._elements('img', content)] + + def get_cover(self) -> str: + return self._cover_from_content('.bt_thumb a img') + + def book_meta(self) -> dict: + pass + + +main = ToonKorCo diff --git a/manga-py-stable_1.x/manga_py/providers/translate_webtoons_com.py b/manga-py-stable_1.x/manga_py/providers/translate_webtoons_com.py new file mode 100644 index 0000000..1212a45 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/translate_webtoons_com.py @@ -0,0 +1,52 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class TranslateWebToonsCom(Provider, Std): + def get_archive_name(self) -> str: + return self.normal_arc_name(self.get_chapter_index()) + + def get_chapter_index(self) -> str: + return self.re.search(r'\bepisodeNo=(\d+)', self.chapter).group(1) + + def get_main_content(self): + return self.http_get(self.get_url()) + + def get_manga_name(self) -> str: + return self.text_content(self.content, 'h3.subj') + + def _chapters(self, content): + return self._elements('.detail_lst > ul > li > a', content) + + @staticmethod + def _filter_chapters(chapters): + result = [] + for item in chapters: + content = item.cssselect('.rate_num.cplt')[0].text_content().strip('\n\t\r \0') + if content == '100%': + result.append(item) + return result + + def get_chapters(self): + pages = self._elements('.paginate > a:not([class])') + chapters = self._chapters(self.content) + if pages: + n = self.http().normalize_uri + for i in pages: + content = self.http_get(n(i.get('href'))) + chapters += self._chapters(content) + return self._filter_chapters(chapters) + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.img_info > img') + + def get_cover(self) -> str: + return self._cover_from_content('.thmb img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = TranslateWebToonsCom diff --git a/manga-py-stable_1.x/manga_py/providers/trashscanlations_com.py b/manga-py-stable_1.x/manga_py/providers/trashscanlations_com.py new file mode 100644 index 0000000..94102f1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/trashscanlations_com.py @@ -0,0 +1,18 @@ +from .zeroscans_com import ZeroScansCom + + +class TrashScanlationsCom(ZeroScansCom): + def get_chapter_index(self) -> str: + ch = self.chapter + idx = self.re.search(self._chapter_selector, ch) + idx = self.re.split(r'[^\d]', idx.group(1)) + return '-'.join(idx) + + def get_main_content(self): + return self._get_content('{}/series/{}/') + + def get_manga_name(self) -> str: + return self._get_name('/series/([^/]+)') + + +main = TrashScanlationsCom diff --git a/manga-py-stable_1.x/manga_py/providers/triplesevenscans_com.py b/manga-py-stable_1.x/manga_py/providers/triplesevenscans_com.py new file mode 100644 index 0000000..a818596 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/triplesevenscans_com.py @@ -0,0 +1,9 @@ +from .read_powermanga_org import ReadPowerMangaOrg + + +class TripleSevenScansCom(ReadPowerMangaOrg): + _name_re = '/reader/[^/]+/([^/]+)/' + _content_str = '{}/reader/series/{}/' + + +main = TripleSevenScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/truyen_vnsharing_site.py b/manga-py-stable_1.x/manga_py/providers/truyen_vnsharing_site.py new file mode 100644 index 0000000..e297946 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/truyen_vnsharing_site.py @@ -0,0 +1,40 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class TruyenVnsharingSite(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.compile(r'_(\d+(?:\.\d+)?)[^\d]?') + ch = self.chapter + return '-'.join(re.search(ch).group(1).split('.')) + + def get_main_content(self): + name = self._get_name('/read/([^/]+/[^/]+/[^/]+)') + url = '{}/index/read/{}' + return self.http_get(url.format( + self.domain, + name + )) + + def get_manga_name(self) -> str: + return self._get_name('/read/[^/]+/[^/]+/([^/]+)') + + def get_chapters(self): + return self._elements('#manga-info-list a.title') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.read_content .br_frame > img') + + def get_cover(self) -> str: + img = self._elements('.info_ava.manga') + if img and len(img): + return self.parse_background(img[0]) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = TruyenVnsharingSite diff --git a/manga-py-stable_1.x/manga_py/providers/truyenchon_com.py b/manga-py-stable_1.x/manga_py/providers/truyenchon_com.py new file mode 100644 index 0000000..049727a --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/truyenchon_com.py @@ -0,0 +1,37 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class TruyenChonCom(Provider, Std): + __subtype = None + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/chap.*?-(\d+(?:\.\d+)?)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + truyen = 'truyen' + if ~self.domain.find('nettruyen.'): + truyen = 'truyen-tranh' + return self._get_content('{}/%s/{}' % truyen) + + def get_manga_name(self) -> str: + groups = self.re.search(r'/(truyen.*?)/([^/]+)', self.get_url()) + self.__subtype = groups.group(1) + return groups.group(2) + + def get_chapters(self): + return self._elements('.list-chapter .chapter > a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.page-chapter > img', 'data-original') + + def get_cover(self) -> str: + return self._cover_from_content('.col-image > img') + + def book_meta(self) -> dict: + pass + + +main = TruyenChonCom diff --git a/manga-py-stable_1.x/manga_py/providers/truyentranhtuan_com.py b/manga-py-stable_1.x/manga_py/providers/truyentranhtuan_com.py new file mode 100644 index 0000000..f4fe75b --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/truyentranhtuan_com.py @@ -0,0 +1,45 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class TruyenTranhTuanCom(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'\.com/[^/]+?-(\d+(?:-\d+)?)', self.chapter) + return idx.group(1) + + def get_main_content(self): + content = self.http_get(self.get_url()) + parser = self.document_fromstring(content, '#read-title a.mangaName') + if parser and len(parser): + return self.http_get(self.http().normalize_uri(parser[0].get('href'))) + return content + + def get_manga_name(self) -> str: + url = self.get_url() + if self.re.search(r'\.com/[^/]+-\d+/', url): + parser = self.html_fromstring(url, '#read-title .mangaName', 0) + url = parser.get('href') + return self.re.search(r'\.com/([^/]+)/', url).group(1) + + def get_chapters(self): + return self._elements('#manga-chapter .chapter-name a') + + def get_files(self): + content = self.http_get(self.chapter) + items = self.re.search(r'slides_page_url_path\s*=\s*(\[.+\])[;,]?', content) + if items: + n = self.http().normalize_uri + items = self.json.loads(items.group(1)) + return [n(i) for i in items] + return [] + + def get_cover(self) -> str: + return self._cover_from_content('.manga-cover img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = TruyenTranhTuanCom diff --git a/manga-py-stable_1.x/manga_py/providers/tsumino_com.py b/manga-py-stable_1.x/manga_py/providers/tsumino_com.py new file mode 100644 index 0000000..82c3ed0 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/tsumino_com.py @@ -0,0 +1,64 @@ +from manga_py.provider import Provider +from .helpers import tsumino_com +from .helpers.std import Std + + +class TsuminoCom(Provider, Std): + __local_storage = None + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + def get_main_content(self): + url = self.get_url() + if ~url.find('/Read/'): + url = self.html_fromstring(url, '#backToIndex + a', 0).get('href') + return self.http_get(self.http().normalize_uri(url)) + + def get_manga_name(self) -> str: + return 'tsumino_' + self.re.search(r'/(?:Info|View)/(\d+)', self.get_url()).group(1) + + def get_chapters(self): + return [b''] + + def prepare_cookies(self): + self._base_cookies() + + def get_files(self): + idx = self.re.search(r'/(?:Info|View)/(\d+)', self.get_url()).group(1) + test_url = '{}/Read/View{}'.format(self.domain, idx) + if ~self.http_get(test_url).find('/recaptcha'): + cookies = tsumino_com.TsuminoCom(self).get_cookies(test_url).get_dict() + for i in cookies: + self._storage['cookies'][i] = cookies[i] + + content = self.http_post( + '{}/Read/Load'.format(self.domain, idx), + headers={ + 'X-Requested-With': 'XMLHttpRequest', + 'Referer': '{}/Read/View/{}'.format(self.domain, idx), + 'Pragma': 'no-cache', + 'Cache-Control': 'no-cache', + 'Accept-Language': 'en-US;q=0.8,en;q=0.7', + }, + data={'q': idx} + ) + items = self.json.loads(content).get('reader_page_urls') + d = str(self.domain) + return [d + '/Image/Object?name=' + i for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('img.book-page-image') + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.get_url() + + +main = TsuminoCom diff --git a/manga-py-stable_1.x/manga_py/providers/tumangaonline_com.py b/manga-py-stable_1.x/manga_py/providers/tumangaonline_com.py new file mode 100644 index 0000000..969dc00 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/tumangaonline_com.py @@ -0,0 +1,87 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class TuMangaOnlineCom(Provider, Std): + + def get_archive_name(self) -> str: + return self.normal_arc_name(self.chapter) + + def get_chapter_index(self) -> str: + return '{}-{}'.format(*self.chapter) + + def get_main_content(self): + url = '{}/api/v1/mangas/{}'.format(self.domain, self._get_id()) + data = self.http_get(url=url, headers=self._get_headers()) + return self.json.loads(data) + + def get_manga_name(self) -> str: + url = self.get_url() + re = r'/mangas/\d+/([^/]+)' + if ~url.find('/lector/'): + re = '/lector/([^/]+)' + return self.re.search(re, url).group(1) + + @staticmethod + def _get_subidas(items, n): + return [(n, i.get('idScan')) for i in items.get('subidas', [])] + + def get_chapters(self): + url = '{}/api/v1/mangas/{}/capitulos?page=1&tomo=-1' + idx = self._get_id() + + url = url.format(self.domain, idx) + data = self.http_get(url=url, headers=self._get_headers()) + items = self.json.loads(data).get('data', []) + + pages = [] + for i in items: + n = i.get('numCapitulo') + pages += self._get_subidas(i, n) + return pages + + def _get_id(self): + url = self.get_url() + re = r'/mangas/(\d+)' + if ~url.find('/lector/'): + re = r'/lector/[^/]+/(\d+)' + return self.re.search(re, url).group(1) + + @staticmethod + def _get_headers(): + return {'Cache-mode': 'no-cache', 'X-Requested-With': 'XMLHttpRequest'} + + def prepare_cookies(self): + self._base_cookies() + + def _chapter_url(self): + idx = self._get_id() + domain = self.domain + url = '{}/api/v1/imagenes?idManga={}&idScanlation={}&numeroCapitulo={}&visto=true' + ch = self.chapter + return url.format(domain, idx, *ch) + + def get_files(self): + idx = self._get_id() + ch = self.chapter + data = self.http_get(url=self._chapter_url(), headers=self._get_headers()) + + items = self.json.loads(self.json.loads(data).get('imagenes', '[]')) + url = 'https://img1.tumangaonline.com/subidas/{}/{}/{}/{}' + + return [url.format(idx, *ch, i) for i in items] + + def get_cover(self) -> str: + url = self.domain, self.content.get('imageUrl', None) + if url: + return '{}/{}'.format(self.domain, url) + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self._chapter_url() + + +main = TuMangaOnlineCom diff --git a/manga-py-stable_1.x/manga_py/providers/unionmangas_net.py b/manga-py-stable_1.x/manga_py/providers/unionmangas_net.py new file mode 100644 index 0000000..bc52a5c --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/unionmangas_net.py @@ -0,0 +1,11 @@ +from .somanga_net import SoMangaNet + + +class UnionMangasNet(SoMangaNet): + + def get_chapters(self): + selector = '.tamanho-bloco-perfil .lancamento-linha a[href*="/leitor/"]' + return self._elements(selector) + + +main = UnionMangasNet diff --git a/manga-py-stable_1.x/manga_py/providers/viz_com.py b/manga-py-stable_1.x/manga_py/providers/viz_com.py new file mode 100644 index 0000000..3caf0f5 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/viz_com.py @@ -0,0 +1,238 @@ +from sys import stderr +from pathlib import Path +from json import loads + +from manga_py import meta +from manga_py.crypt.viz_com import solve +from manga_py.fs import get_util_home_path, path_join, is_file, unlink, file_size +from manga_py.provider import Provider +from .helpers.std import Std + + +class VizCom(Provider, Std): + cookie_file = None + __cookies = {} + __has_auth = False + _continue = True + __is_debug = True + + def get_chapter_index(self) -> str: + # return str(self.chapter_id) + # todo: need tests + idx = str(self.chapter_id) + try: + re = self.re.compile(r'-chapter-(\d+)/') + idx = re.search(self.chapter).group(1) + except AttributeError: + self.log('manga-py can not get the number of the chapter!\nurl: {}'.format(self.chapter), file=stderr) + self.log(' Please, report this error\n {}{}\n\n'.format( + meta.__downloader_uri__, '/issues/new?template=bug_report.md' + ), file=stderr) + self.__is_debug and self.log('Chapter idx: {}'.format(idx)) + return idx + + def get_main_content(self): + content = self._get_content('{}/shonenjump/chapters/{}') + if self.__is_debug: + page = Path('viz_debug') + page.mkdir(parents=True, exist_ok=True) + _path = str(page.joinpath('main-{}.html'.format(self.manga_name))) + with open(_path, 'w') as w: + w.write(content) + return content + + def get_manga_name(self) -> str: + return self._get_name('/chapters/([^/]+)') + + def get_chapters(self): + chapters = [] + for chapter in self._elements('a.o_chapter-container[href*="/chapter/"]'): + url = chapter.get('href') + if url not in chapters: + chapters.append(url) + + # Paid chapters are dynamically loaded so we need to take a different approach. + re = self.re.compile(r'targetUrl:\'(.*)\',targetTitle') + for chapter in self._elements('a.o_chapter-container[onclick*="/chapter/"]'): + url = re.search(chapter.get('onclick')).group(1) + if url not in chapters: + chapters.append(url) + + self.__is_debug and self.log('Chapters count: %d' % len(chapters)) + + if self.__is_debug: + page = Path('viz_debug') + page.mkdir(parents=True, exist_ok=True) + _path = str(page.joinpath('chapters.html')) + self.log('Save path to %s' % _path) + with open(_path, 'w') as w: + w.write('\n'.join(chapters)) + + return chapters + + def get_files(self): + self.__is_debug and self.log('Files') + self._continue = True + ch = self.chapter + + params = [ + 'device_id=3', + 'manga_id={}'.format(self.re.search(r'/chapter/(\d+)', ch).group(1)), + 'metadata=1', + ] + url = 'https://www.viz.com/manga/get_manga_url?' + '&'.join(params) + self.log(self.http_get(self.http().normalize_uri(url))) + __url = self.http_get(self.http().normalize_uri(url)).strip() + self._metadata = loads(self.http_get(__url)) + + params = [ + 'device_id=3', + 'manga_id={}'.format(self.re.search(r'/chapter/(\d+)', ch).group(1)), + 'page={page}', + ] + url = 'https://www.viz.com/manga/get_manga_url?' + '&'.join(params) + self.__is_debug and self.log('Chapter url: %s' % url) + if self.__has_auth: + params.append('client_login=true') + self.__is_debug and self.log('Logged client!') + else: + self.__is_debug and self.log('Anon client!') + + return [url.format(page=i) for i in range(250)] # fixme: max 250 images per chapter + + def get_cover(self): + self._cover_from_content('.o_hero-media') + + def prepare_cookies(self): + self.__is_debug = self._params.get('debug', False) + self.__is_debug = self._params + self.http().mute = True + self.cookie_file = path_join(get_util_home_path(), 'cookies_viz_com.dat') + cookies = self.load_cookies() + content = self.http().requests(self.get_url(), cookies=cookies) + cookies.update(content.cookies.get_dict()) + self.__cookies = cookies + + if not self.has_auth(): + self.auth() + if not self.has_auth(): + self.log('Warning! Login/password incorrect?\nTry to get free chapters...', file=stderr) + self.log('Warning! This site worked from USA and Japan! Check your location', file=stderr) + unlink(self.cookie_file) + return + + self.save_cookies(self.__cookies) + self.http().cookies = self.__cookies + + def auth(self): + token = self.get_token() + + name = self.quest([], 'Request login on viz.com') + password = self.quest_password('Request password on viz.com\n') + + if len(name) == 0 or len(password) == 0: + return + + req = self.http().requests('https://www.viz.com/account/try_login', method='post', cookies=self.__cookies, data={ + 'login': name, + 'pass': password, + 'rem_user': 1, + 'authenticity_token': token, + }) + + if req.status_code >= 400: + self.log('Login/password error') + exit(1) + + self.__cookies = req.cookies.get_dict() + + try: + remember = self.json.loads(req.text) + self.__cookies['remember_token'] = remember.get('trust_user_id_token_web', remember.get('remember_token', '')) + except ValueError: + self.__is_debug and self.log('Remember error!', file=stderr) + self.__is_debug and self.log('Please, report this error {}{}'.format( + meta.__downloader_uri__, '/issues/new?template=bug_report.md' + ), file=stderr) + + def save_cookies(self, cookies: dict): + with open(self.cookie_file, 'w') as w: + w.write(self.json.dumps(cookies)) + + def load_cookies(self): + if is_file(self.cookie_file): + try: + with open(self.cookie_file, 'r') as r: + return self.json.loads(r.read()) + except ValueError: + unlink(self.cookie_file) + return {} + + def get_token(self): + auth_token_url = 'https://www.viz.com/account/refresh_login_links' + auth_token = self.http().get(auth_token_url, cookies=self.__cookies) + token = self.re.search(r'AUTH_TOKEN\s*=\s*"(.+?)"', auth_token) + return token.group(1) + + def has_auth(self): + content = self.http_get('https://www.viz.com/account/refresh_login_links', cookies=self.__cookies) + parser = self.document_fromstring(content) + profile = parser.cssselect('.o_profile-link') + success = len(profile) > 0 + self.__has_auth = success + if success: + # self.log('Login as {}'.format(profile[0].text)) + self.log('Login success') + return success + + @staticmethod + def has_chapters(parser): + return len(parser.cssselect('.o_chapter-container')) > 0 + + def save_file(self, idx=None, callback=None, url=None, in_arc_name=None): + if not self._continue: + return + + self.__is_debug and self.log('\nSave file: {}'.format(idx)) + self.__is_debug and self.log('File url: {}'.format(url)) + + _path, idx, _url = self._save_file_params_helper(url, idx) + + self.__is_debug and self.log('File params:\n PATH: {}\n IDX: {}\n URL: {}'.format(_path, idx, _url)) + + __url = self.http_get(self.http().normalize_uri(url)).strip() + + if self.__is_debug and int(idx) < 2: + ch = 'chapter_{}_page_{}.txt'.format(self.get_chapter_index(), idx) + page = Path('viz_debug') + page.mkdir(parents=True, exist_ok=True) + __debug_path = str(page.joinpath(ch)) + self.log('Save path to %s' % __debug_path) + with open(__debug_path, 'w') as w: + self.log(__url) + w.write(str(__url)) + w.close() + + if __url.find('http') != 0: + self.__is_debug and self.log('\nURL is wrong: \n {}\n'.format(__url), file=stderr) + return + + self.http().download_file(__url, _path, idx) + + if file_size(_path) < 32: + self.__is_debug and self.log('File not found. Stop for this chapter') + self._continue = False + is_file(_path) and unlink(_path) + return + + self.after_file_save(_path, idx) + + ref = solve(_path, self._metadata) + if ref is not None: + solved_path = _path + '-solved.jpeg' + ref.save(solved_path) + self._archive.add_file(solved_path, 'solved{}.jpeg'.format(idx)) + callable(callback) and callback() + + +main = VizCom diff --git a/manga-py-stable_1.x/manga_py/providers/web_ace_jp.py b/manga-py-stable_1.x/manga_py/providers/web_ace_jp.py new file mode 100644 index 0000000..6fd2515 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/web_ace_jp.py @@ -0,0 +1,68 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class WebAceJp(Provider, Std): + @staticmethod + def remove_not_ascii(value): + return value + + def get_archive_name(self) -> str: + idx = self.get_chapter_index().split('-', 2) + return self.normal_arc_name(idx) + + def get_chapter_index(self) -> str: + idx = self.re.search( + r'第(.+?)?話(?:-(.+?))?', + self.chapter[1] + ) + if not idx: + return self.chapter[1] + return self._join_groups(idx.groups()) + + def __url(self): + return '{}/youngaceup/contents/{}/'.format( + self.domain, + self.__idx() + ) + + def __idx(self): + return self.re.search( + r'/contents/(\d+)', + self.get_url() + ).group(1) + + def get_main_content(self): + return self.http_get(self.__url()) + + def get_manga_name(self) -> str: + return self.text_content(self.content, '.credit h1') + + def get_chapters(self): + content = self.http_get(self.__url() + 'episode/') + selector = '.media:not(.yudo) > a.navigate-right' + items = [] + n = self.http().normalize_uri + for el in self._elements(selector, content): + title = el.cssselect('.media-body p')[0] + title = title.text_content().strip(' \n\r\t\0') + items.append((n(el.get('href')), title)) + return items + + def get_files(self): + n = self.http().normalize_uri + items = self.json.loads(self.http_get(self.chapter[0] + '/json/')) + return [n(i) for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('#sakuhin-info img') + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self.chapter[0] + + +main = WebAceJp diff --git a/manga-py-stable_1.x/manga_py/providers/webtoon_bamtoki_com.py b/manga-py-stable_1.x/manga_py/providers/webtoon_bamtoki_com.py new file mode 100644 index 0000000..5b84b0e --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/webtoon_bamtoki_com.py @@ -0,0 +1,42 @@ +from manga_py.crypt.base_lib import BaseLib +from manga_py.provider import Provider +from .helpers.std import Std + + +class WebtoonBamtokiCom(Provider, Std): + + def get_archive_name(self) -> str: + return self.normal_arc_name(self.get_chapter_index()) + + def get_chapter_index(self) -> str: + re = self.re.compile(r'.+-(\d+).html') + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + url = self.get_url() + _selector = r'\.(?:com|se)/(.+)' + if ~url.find('.html'): + _selector += r'-\d+\.html' + return self._get_name(_selector) + + def get_chapters(self): + return self._elements('.list-details a.ellipsis') + + def get_files(self): + data = self.html_fromstring(self.chapter, '#tooncontentdata', 0) + content = BaseLib.base64decode(data.text_content().strip('\n\t\r\0 ')) + parser = self.document_fromstring(content) + return self._images_helper(parser, 'img') + + def get_cover(self) -> str: + return self._cover_from_content('.title-section-inner .col-md-6 > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = WebtoonBamtokiCom diff --git a/manga-py-stable_1.x/manga_py/providers/webtoons_com.py b/manga-py-stable_1.x/manga_py/providers/webtoons_com.py new file mode 100644 index 0000000..18ca419 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/webtoons_com.py @@ -0,0 +1,79 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class WebToonsCom(Provider, Std): + __titleNo = 0 + __mainUrl = '' + __next_page_urls = None + + def get_archive_name(self) -> str: + i = self.re.search(r'\.com%s%s' % ( + r'(?:/|%2F)[^/%]+' * 3, + r'(?:/|%2F)([^/%]+)', + ), self.chapter) + return self.normal_arc_name([self.chapter_id, i.group(1)]) + + def get_chapter_index(self) -> str: + return self.re.search(r'\bepisode_no=(\d+)', self.chapter).group(1) + + def get_main_content(self): + return self.http_get(self.__mainUrl) + + def get_manga_name(self) -> str: + self.__titleNo = self._get_name(r'title_no=(\d+)') + name = self._get_name(r'\.com/([^/]+/[^/]+/[^/]+)') + self.__mainUrl = '{}/{}/list?title_no={}'.format(self.domain, name, self.__titleNo) + return self._get_name(r'\.com/[^/]+/[^/]+/([^/]+)') + + def _chapters(self, content): + return self._elements('#_listUl li > a', content) + + def _get_page_urls(self, content): + return self._elements('.paginate a:not([class]):not([onclick])', content) + + def _get_pages_urls(self, content): + chapters = [] + n = self.http().normalize_uri + for j in self._get_page_urls(content): # page-urls + _content = self.http_get(n(j.get('href'))) + chapters += self._chapters(_content) + return chapters + + def get_next_page_urls(self, content): + urls = self._elements('a + a.pg_next', content) + n = self.http().normalize_uri + if len(urls): + self.__next_page_urls.append(n(urls[0].get('href'))) + _content = self.http().get(n(urls[0].get('href'))) + self.get_next_page_urls(_content) + + def get_chapters(self): + self.log('Parse chapters. Please, wait') + self.__next_page_urls = [] + chapters = self._chapters(self.content) + n = self.http().normalize_uri + chapters += self._get_pages_urls(self.content) # main page paginator + + self.get_next_page_urls(self.content) + for url in self.__next_page_urls: + content = self.http().get(n(url)) + chapters += self._chapters(content) + chapters += self._get_pages_urls(content) + + return chapters + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '#_imageList img', 'data-url') + + def get_cover(self) -> str: + img = self.html_fromstring(self.content, '#content > .detail_bg', 0) + return self.parse_background(img) + + def book_meta(self) -> dict: + # todo meta + pass + + +main = WebToonsCom diff --git a/manga-py-stable_1.x/manga_py/providers/webtoontr_com.py b/manga-py-stable_1.x/manga_py/providers/webtoontr_com.py new file mode 100644 index 0000000..fb79b4e --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/webtoontr_com.py @@ -0,0 +1,23 @@ +from .gomanga_co import GoMangaCo +from .helpers.std import Std + + +class WebtoonTrCom(GoMangaCo, Std): + _name_re = '/_/([^/]+)' + _content_str = '{}/_/{}' + _chapters_selector = '.panel-default table td > a' + + def get_chapter_index(self) -> str: + url = self.chapter + index_re = r'/_/[^/]+/(.+)' + return self.re.search(index_re, url).group(1).replace('.', '-') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, 'img.cImg') + + def get_cover(self) -> str: + return self._cover_from_content('.left img.image') + + +main = WebtoonTrCom diff --git a/manga-py-stable_1.x/manga_py/providers/westmanga_info.py b/manga-py-stable_1.x/manga_py/providers/westmanga_info.py new file mode 100644 index 0000000..766cfcc --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/westmanga_info.py @@ -0,0 +1,39 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class WestMangaInfo(Provider, Std): + _chapter_re = r'\.info/[^/]+-(\d+(?:-\d+)?)' + + def get_chapter_index(self) -> str: + re = self.re.compile(self._chapter_re) + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/manga/{}') + + def get_manga_name(self) -> str: + url = self.get_url() + if ~url.find('/manga/'): + return self._get_name('/manga/([^/]+)') + url = self.html_fromstring(url, '.allc a', 0).get('href') + self._params['url'] = self.http().normalize_uri(url) + return self.get_manga_name() + + def get_chapters(self): + # print(self.manga_name) + return self._elements('span.leftoff > a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.lexot img') + + def get_cover(self) -> str: + return self._cover_from_content('.naru img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = WestMangaInfo diff --git a/manga-py-stable_1.x/manga_py/providers/whitecloudpavilion_com.py b/manga-py-stable_1.x/manga_py/providers/whitecloudpavilion_com.py new file mode 100644 index 0000000..43154cf --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/whitecloudpavilion_com.py @@ -0,0 +1,33 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class WhiteCloudPavilionCom(Provider, Std): + + def get_chapter_index(self) -> str: + re = self.re.compile(r'/manga/free/manga/[^/]+/([^/]+)') + return re.search(self.chapter).group(1).replace('.', '-') + + def get_main_content(self): + return self._get_content('{}/manga/free/manga/{}') + + def get_manga_name(self) -> str: + return self._get_name('/free/manga/([^/]+)') + + def get_chapters(self): + return self._elements('.chapters .chapter-title-rtl a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + items = self._images_helper(parser, '#all img', 'data-src') + return [i.strip(' \n\r\t\0') for i in items] + + def get_cover(self) -> str: + return self._cover_from_content('.boxed img.img-responsive') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = WhiteCloudPavilionCom diff --git a/manga-py-stable_1.x/manga_py/providers/wiemanga_com.py b/manga-py-stable_1.x/manga_py/providers/wiemanga_com.py new file mode 100644 index 0000000..0f7b164 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/wiemanga_com.py @@ -0,0 +1,47 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class WieMangaCom(Provider, Std): + + def get_archive_name(self) -> str: + return self.normal_arc_name([ + self.chapter_id, + self.re.search('/chapter/([^/]+)', self.chapter).group(1) + ]) + + def get_chapter_index(self) -> str: + return self.re.search(r'/chapter/[^/]+/(\d+)/', self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/manga/{}.html') + + def get_manga_name(self) -> str: + url = self.get_url() + if ~url.find('/chapter/'): + url = self.html_fromstring(url, '.sitemaplist a + a', 0).get('href') + url = self.http().normalize_uri(url) + return self.re.search(r'/manga/([^/]+)\.html', url).group(1) + + def get_chapters(self): + return self._elements('.chapterlist .col1 a') + + def get_files(self): + selector = 'img#comicpic' + parser = self.html_fromstring(self.chapter) + pages = self._first_select_options(parser, 'select#page') + items = self._images_helper(parser, selector) + for i in pages: + parser = self.html_fromstring(i.get('value')) + items += self._images_helper(parser, selector) + return items + + def get_cover(self) -> str: + return self._cover_from_content('.bookfrontpage a > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = WieMangaCom diff --git a/manga-py-stable_1.x/manga_py/providers/wmanga_ru.py b/manga-py-stable_1.x/manga_py/providers/wmanga_ru.py new file mode 100644 index 0000000..73e4963 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/wmanga_ru.py @@ -0,0 +1,33 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class WMangaRu(Provider, Std): + + def get_chapter_index(self) -> str: + selector = r'/manga_chapter/[^/]+/(\d+)/(\d+)' + idx = self.re.search(selector, self.chapter).groups() + return '{}-{}'.format(*idx) + + def get_main_content(self): + return self._get_content('{}/starter/manga_byid/{}') + + def get_manga_name(self) -> str: + return self._get_name('/starter/manga_[^/]+/([^/]+)') + + def get_chapters(self): + return self._elements('td div div div td > a')[::-1] + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, 'td a.gallery', 'href') + + def get_cover(self): + pass # FIXME HOME + + def book_meta(self) -> dict: + # todo meta + pass + + +main = WMangaRu diff --git a/manga-py-stable_1.x/manga_py/providers/yande_re.py b/manga-py-stable_1.x/manga_py/providers/yande_re.py new file mode 100644 index 0000000..85b3497 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/yande_re.py @@ -0,0 +1,8 @@ +from .lolibooru_moe import LoliBooruMoe + + +class YandeRe(LoliBooruMoe): + _archive_prefix = 'yandere_' + + +main = YandeRe diff --git a/manga-py-stable_1.x/manga_py/providers/zeroscans_com.py b/manga-py-stable_1.x/manga_py/providers/zeroscans_com.py new file mode 100644 index 0000000..409b0c1 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/zeroscans_com.py @@ -0,0 +1,26 @@ +from .rawdevart_com import RawDevArtCom + + +class ZeroScansCom(RawDevArtCom): + _chapter_selector = r'/chapter-(\d+(?:[^\d]\d+)?)' + + def get_chapter_index(self) -> str: + ch = self.chapter + idx = self.re.search(self._chapter_selector, ch) + idx = idx.group(1) + test = self.re.search(r'(\d+)[^\d](\d+)', idx) + if test: + return '-'.join(test.groups()) + return idx + + def get_chapters(self): + items = self._elements('.wp-manga-chapter > a') + n = self.http().normalize_uri + return [n(i.get('href')).rstrip('/') + '/?style=list' for i in items] + + def get_files(self): + parser = self.html_fromstring(self.chapter) + return self._images_helper(parser, '.page-break img') + + +main = ZeroScansCom diff --git a/manga-py-stable_1.x/manga_py/providers/zingbox_me.py b/manga-py-stable_1.x/manga_py/providers/zingbox_me.py new file mode 100644 index 0000000..9f9fb81 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/zingbox_me.py @@ -0,0 +1,56 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ZingBoxMe(Provider, Std): + + def get_archive_name(self) -> str: + idx = self.get_chapter_index() + return self.normal_arc_name(idx) + + def get_chapter_index(self) -> str: + return str(self.chapter.get('title', '0')) + + def get_main_content(self): + idx = self.re.search('/manga/(?:[^/]+/)?(\d+)/', self.get_url()) + data = { + 'url': '/manga/getBookDetail/{}'.format(idx.group(1)), + 'method': 'GET', + 'api': '/mangaheatapi/web', + } + return self.http_post(self.domain + '/api', data=data) + + def get_manga_name(self) -> str: + return self._get_name(r'\.me/manga/(?:\d+/)?([^/]+)') + + def get_chapters(self): + try: + return self.json.loads(self.content).get('child', []) + except self.json.JSONDecodeError: + return [] + + def _chapter_url(self): + idx = self.chapter.get('chapterId', 0) + return '/manga/getChapterImages/{}'.format(idx) + + def get_files(self): + _ = { + 'url': self._chapter_url(), + 'method': 'GET', + 'api': '/mangaheatapi/web', + } + images = self.http_post(self.domain + '/api', data=_) + return self.json.loads(images).get('images', []) + + def get_cover(self): + return self._cover_from_content('.comicImg img') + + def book_meta(self) -> dict: + # todo meta + pass + + def chapter_for_json(self): + return self._chapter_url() + + +main = ZingBoxMe diff --git a/manga-py-stable_1.x/manga_py/providers/zip_read_com.py b/manga-py-stable_1.x/manga_py/providers/zip_read_com.py new file mode 100644 index 0000000..f401d49 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/zip_read_com.py @@ -0,0 +1,33 @@ +from manga_py.provider import Provider +from .helpers.jav_zip_org import JavZipOrg +from .helpers.std import Std + + +class ZipReadCom(Provider, Std): + + def get_chapter_index(self) -> str: + idx = self.re.search(r'/.p=(\d+)', self.chapter).group(1) + return '{}-{}'.format(self.chapter_id, idx) + + def get_main_content(self): + pass + + def get_manga_name(self) -> str: + return self._get_name(r'\.com/([^/]+)') + + def get_chapters(self): + return self._elements('#content .entry > p > a') + + def get_files(self): + jav_zip_org = JavZipOrg(self) + return jav_zip_org.get_images() + + def get_cover(self): + return self._cover_from_content('#content .entry p > img') + + def book_meta(self) -> dict: + # todo meta + pass + + +main = ZipReadCom diff --git a/manga-py-stable_1.x/manga_py/providers/zmanga_net.py b/manga-py-stable_1.x/manga_py/providers/zmanga_net.py new file mode 100644 index 0000000..d228bf5 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/providers/zmanga_net.py @@ -0,0 +1,37 @@ +from manga_py.provider import Provider +from .helpers.std import Std + + +class ZMangaNet(Provider, Std): + _type = 'capitulo' + + def get_chapter_index(self) -> str: + re = self.re.compile(r'%s-(\d+(?:-\d+)?)' % self._type) + return re.search(self.chapter).group(1) + + def get_main_content(self): + return self._get_content('{}/manga-online/{}/') + + def get_manga_name(self) -> str: + re = r'\.\w+(?:/manga-online|/read)?/([^/]+?)(?:-%s[^/]+)?/' % self._type + return self._get_name(re) + + def get_chapters(self): + return self._elements('.mangabox_line > a') + + def get_files(self): + parser = self.html_fromstring(self.chapter) + items = self._images_helper(parser, 'meta[property="og:image:secure_url"]', 'content') + if len(items) < 1: + items = self._images_helper(parser, 'meta[property="og:image"]', 'content') + return items + + def get_cover(self) -> str: + # return self._cover_from_content('.cover img') + pass + + def book_meta(self) -> dict: + pass + + +main = ZMangaNet diff --git a/manga-py-stable_1.x/manga_py/storage/.passwords.json.dist b/manga-py-stable_1.x/manga_py/storage/.passwords.json.dist new file mode 100644 index 0000000..b690e0b --- /dev/null +++ b/manga-py-stable_1.x/manga_py/storage/.passwords.json.dist @@ -0,0 +1,10 @@ +{ + "hentai_chan_me": { + "login": "login", + "password": "password" + }, + "tapas_io": { + "login": "login", + "password": "password" + } +} \ No newline at end of file diff --git a/manga-py-stable_1.x/manga_py/storage/.proxy.txt b/manga-py-stable_1.x/manga_py/storage/.proxy.txt new file mode 100644 index 0000000..53e3751 --- /dev/null +++ b/manga-py-stable_1.x/manga_py/storage/.proxy.txt @@ -0,0 +1,4 @@ +5.4.3.2:10 +54.178.157.8:3128 +45.32.62.131:8080 +106.186.22.65:8888 \ No newline at end of file diff --git a/manga-py-stable_1.x/requirements.txt b/manga-py-stable_1.x/requirements.txt new file mode 100644 index 0000000..d8ad944 --- /dev/null +++ b/manga-py-stable_1.x/requirements.txt @@ -0,0 +1,13 @@ +lxml +cssselect +Pillow +requests +pycryptodome +cloudscraper +progressbar2 +urllib3 +packaging +html-purifier +pyexecjs +numpy +loguru diff --git a/manga-py-stable_1.x/requirements_dev.txt b/manga-py-stable_1.x/requirements_dev.txt new file mode 100644 index 0000000..3eef0d7 --- /dev/null +++ b/manga-py-stable_1.x/requirements_dev.txt @@ -0,0 +1,3 @@ +-r ./requirements.txt +selenium +pyvirtualdisplay diff --git a/manga-py-stable_1.x/run_tests.py b/manga-py-stable_1.x/run_tests.py new file mode 100644 index 0000000..a9539dd --- /dev/null +++ b/manga-py-stable_1.x/run_tests.py @@ -0,0 +1,15 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +import unittest +from os import path + +from manga_py import fs +from tests import * + +root_path = path.join(path.dirname(path.realpath(__file__)), 'tests') + + +if __name__ == '__main__': + fs.make_dirs(root_path + '/temp') + unittest.main() diff --git a/manga-py-stable_1.x/setup.cfg b/manga-py-stable_1.x/setup.cfg new file mode 100644 index 0000000..7c2b287 --- /dev/null +++ b/manga-py-stable_1.x/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal = 1 \ No newline at end of file diff --git a/manga-py-stable_1.x/setup.py b/manga-py-stable_1.x/setup.py new file mode 100644 index 0000000..2f61810 --- /dev/null +++ b/manga-py-stable_1.x/setup.py @@ -0,0 +1,95 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +from __future__ import print_function + +from os import path + +from setuptools import setup + +from manga_py import __author__, __email__, __license__ +from manga_py.meta import __version__, __downloader_uri__ + +REQUIREMENTS = [ + 'lxml', + 'cssselect', + 'Pillow', + 'requests', + 'pycrypto', + 'cloudscraper', + 'progressbar2', + 'urllib3', + 'packaging', + 'pyexecjs', + 'html-purifier', + 'selenium', + 'loguru', +] + + +# if path.isfile('requirements.txt'): +# with open('requirements.txt') as f: +# REQUIREMENTS = f.read() + + +long_description = 'Please see https://github.com/manga-py/manga-py' +# if path.isfile('README.rst'): +# with open('README.rst') as f: +# long_description = f.read() + + +release_status = 'Development Status :: 5 - Production/Stable' +if ~__version__.find('beta'): + release_status = 'Development Status :: 4 - Beta' +if ~__version__.find('alpha'): + release_status = 'Development Status :: 3 - Alpha' + + +setup( + name='manga_py', + packages=[ + 'manga_py', + 'manga_py.base_classes', + 'manga_py.crypt', + 'manga_py.cli', + 'manga_py.http', + 'manga_py.providers', + 'manga_py.providers.helpers', + ], + include_package_data=True, + version=__version__, + description='Universal assistant download manga.', + long_description=long_description, + author=__author__, + author_email=__email__, + url=__downloader_uri__, + zip_safe=False, + data_files=[ + ('manga_py/storage', [ + 'manga_py/storage/.passwords.json.dist', + 'manga_py/storage/.proxy.txt', + 'manga_py/crypt/aes.js', + 'manga_py/crypt/aes_zp.js', + ]), + ], + download_url='{}/archive/{}.tar.gz'.format(__downloader_uri__, __version__), + keywords=['manga-downloader', 'manga', 'manga-py'], + license=__license__, + classifiers=[ # look here https://pypi.python.org/pypi?%3Aaction=list_classifiers + release_status, + 'License :: OSI Approved :: MIT License', + 'Natural Language :: English', + 'Environment :: Console', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Topic :: Internet :: WWW/HTTP', + ], + python_requires='>=3.5', + install_requires=REQUIREMENTS, + entry_points={ + 'console_scripts': [ + 'manga-py = manga_py:main', + ] + } +) diff --git a/manga-py-stable_1.x/tests/__init__.py b/manga-py-stable_1.x/tests/__init__.py new file mode 100644 index 0000000..cc23ee9 --- /dev/null +++ b/manga-py-stable_1.x/tests/__init__.py @@ -0,0 +1,9 @@ +from .archive import TestArchive +from .base import TestBaseClass +from .gh_pages import TestGhPages +from .http import TestHttpClasses +from .init_provider import TestInitProvider +from .matrix import TestMatrix +from .web_driver import TestWebDriver +from .crypt import TestCrypt +from .std import TestStd diff --git a/manga-py-stable_1.x/tests/archive.py b/manga-py-stable_1.x/tests/archive.py new file mode 100644 index 0000000..6dcb1be --- /dev/null +++ b/manga-py-stable_1.x/tests/archive.py @@ -0,0 +1,77 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +import unittest +from os import path +from shutil import copyfile + +from manga_py import fs +from manga_py.base_classes import Archive + +root_path = path.dirname(path.realpath(__file__)) + +files_paths = [ + ['/files/img1.jpg', '/temp/img1.jpg'], + ['/files/img2.png', '/temp/img2.png'], + ['/files/img3.jpg', '/temp/img3.jpg'], + ['/files/img4.jpg', '/temp/img4.jpg'], + ['/files/img5.png', '/temp/img5.png'], + ['/files/img6.gif', '/temp/img6.gif'], + ['/files/img7.webp', '/temp/img7.webp'], +] + + +class TestArchive(unittest.TestCase): + + def test_make_archive(self): + arc = Archive() + arc_path = root_path + '/temp/arc.zip' + fs.unlink(arc_path) + orig_size = 0 + for idx, item in enumerate(files_paths): + fs.unlink(root_path + item[1]) + copyfile(root_path + item[0], root_path + item[1]) + orig_size += int(fs.file_size(root_path + item[1])) + arc.add_file(root_path + item[1]) + + copyfile(root_path + '/files/archive_test_file', root_path + '/temp/archive_test_file') + orig_size += int(fs.file_size(root_path + '/temp/archive_test_file')) + arc.add_file(root_path + '/temp/archive_test_file') + + copyfile(root_path + '/files/archive_test_image', root_path + '/temp/archive_test_image') + orig_size += int(fs.file_size(root_path + '/temp/archive_test_image')) + arc.add_file(root_path + '/temp/archive_test_image') + + arc.make(arc_path) + size = fs.file_size(arc_path) + self.assertTrue(size and 1024 < int(size) < orig_size) + + def test_rename(self): + copyfile(root_path + '/files/archive_test_file', root_path + '/temp/archive_test_file') + fs.rename(root_path + '/temp/archive_test_file', root_path + '/temp/archive_test_file1') + self.assertTrue(fs.is_file(root_path + '/temp/archive_test_file1')) + self.assertFalse(fs.is_file(root_path + '/temp/archive_test_file')) + + def test_home(self): + self.assertTrue(fs.get_util_home_path().find('/home/') == 0) + self.assertTrue(fs.is_dir(fs.get_util_home_path())) + + def test_unlink1(self): + _dir = fs.get_util_home_path() + fs.make_dirs(_dir + '/dir') + self.assertRaises(OSError, fs.unlink, _dir) + + def test_unlink2(self): + _dir = fs.get_util_home_path() + fs.make_dirs(_dir + '/dir') + fs.unlink(_dir, True) + self.assertFalse(fs.is_dir(_dir)) + + def test_not_filesize(self): + self.assertIsNone(fs.file_size(fs.get_util_home_path() + '/file')) + + def test_check_free_space1(self): + self.assertTrue(fs.check_free_space(fs.get_util_home_path(), min_size=99)) + + def test_check_free_space2(self): + self.assertFalse(fs.check_free_space(fs.get_util_home_path(), 99, True)) diff --git a/manga-py-stable_1.x/tests/base.py b/manga-py-stable_1.x/tests/base.py new file mode 100644 index 0000000..9ca88d1 --- /dev/null +++ b/manga-py-stable_1.x/tests/base.py @@ -0,0 +1,120 @@ +import json +import unittest +from os import path + +from manga_py import fs +from manga_py.base_classes import Base, Static + +root_path = fs.dirname(path.realpath(__file__)) + +files_paths = [ + ['/files/img1.jpg', '/temp/img1.jpg'], + ['/files/img2.png', '/temp/img2.png'], + ['/files/img3.jpg', '/temp/img3.jpg'], + ['/files/img4.jpg', '/temp/img4.jpg'], + ['/files/img5.png', '/temp/img5.png'], + ['/files/img6.gif', '/temp/img6.gif'], + ['/files/img7.webp', '/temp/img7.webp'], +] + + +def httpbin(bp: Base, _path: str): + variants = [ + 'https://httpbin-sttv.herokuapp.com', + 'https://httpbin-org.herokuapp.com', + 'https://httpbin.org', + ] + _httpbin = None + for url in variants: + response = bp.http().requests(url=url, method='head') + if response.ok: + _httpbin = url + if _httpbin is None: + raise AttributeError('503. Service temporary unavailable / Path: %s ' % _path) + return '{}/{}'.format(_httpbin, _path.lstrip('/')) + + +class TestBaseClass(unittest.TestCase): + + def test_base0(self): + bp = Base() + domain = 'http://example.org' + bp._params['url'] = domain + '/manga/here.html' + self.assertEqual(bp._params['url'], bp.get_url()) + self.assertEqual(domain, bp.domain) + + def test_base1(self): + bp = Base() + self.assertRaises(KeyError, bp.get_url) + + def test_autocrop(self): + bp = Base() + img = files_paths[0] + fs.unlink(root_path + img[1]) + bp.image_auto_crop(root_path + img[0], root_path + img[1]) + self.assertTrue(fs.is_file(root_path + img[1])) + + def test_manualcrop0(self): + bp = Base() + img = files_paths[0] + fs.unlink(root_path + img[1]) + bp._image_params['crop'] = (10, 2, 100, 100) + bp.image_manual_crop(root_path + img[0], root_path + img[1]) + self.assertTrue(fs.is_file(root_path + img[1])) + + def test_manualcrop1(self): + bp = Base() + img = files_paths[0] + fs.unlink(root_path + img[1]) + bp._image_params['offsets_crop'] = (10, 32, 12, 5) + bp.image_manual_crop(root_path + img[0], root_path + img[1]) + self.assertTrue(fs.is_file(root_path + img[1])) + + def test_get(self): + bp = Base() + bp._params['url'] = 'http://example.org/manga/here.html' + url = httpbin(bp, 'get') + self.assertEqual(url, json.loads(bp.http_get(url))['url']) + + def test_post(self): + bp = Base() + bp._params['url'] = 'http://example.org/manga/here.html' + url = httpbin(bp, 'post') + self.assertEqual(url, json.loads(bp.http_post(url))['url']) + + def test_cookies0(self): + bp = Base() + bp._params['url'] = 'http://example.org/manga/here.html' + url = httpbin(bp, 'cookies') + cookies = {'test': 'test-cookie'} + bp.http_get(httpbin(bp, 'cookies/set?test=') + cookies['test']) + content = bp.http_get(url, cookies=cookies) + # print(content) + self.assertEqual(cookies['test'], json.loads(content)['cookies']['test']) + + def test_cookies1(self): + bp = Base() + bp._params['url'] = 'http://example.org/manga/here.html' + url = httpbin(bp, 'cookies/set?test=test-cookie') + self.assertEqual('test-cookie', bp.http().get_base_cookies(url).get('test')) + + def test_redirect0(self): + from urllib.parse import quote + bp = Base() + bp._params['url'] = 'http://example.org/manga/here.html' + url = httpbin(bp, 'redirect-to?url=' + quote(httpbin(bp, 'get?test=1'))) + test_data = {'test': '1'} + content = bp.http_get(url) + # print(content) + self.assertEqual(test_data, json.loads(content)['args']) + + def test_redirect1(self): + bp = Base() + bp._params['url'] = 'http://example.org/manga/here.html' + url = httpbin(bp, 'redirect/11') + self.assertRaises(AttributeError, bp.http_get, url) + + def test_ascii(self): + string = u'/\\\0@#$⼢⼣⼤abCde123йцуڪڦ' + normal_string = '@⼢⼣⼤abCde123йцуڪڦ' + self.assertEqual(Static.remove_not_ascii(string), normal_string) diff --git a/manga-py-stable_1.x/tests/crypt.py b/manga-py-stable_1.x/tests/crypt.py new file mode 100644 index 0000000..675c54a --- /dev/null +++ b/manga-py-stable_1.x/tests/crypt.py @@ -0,0 +1,76 @@ +import unittest +from pathlib import Path +from .base import root_path +from manga_py.provider import Provider +from manga_py.crypt import AcQqComCrypt +from manga_py.crypt import KissMangaComCrypt +from manga_py.crypt import MangaRockComCrypt +from manga_py.crypt import ManhuaGuiComCrypt +from manga_py.crypt import MangaGoMe +from manga_py.image import Image +import json +import re + + +class TestCrypt(unittest.TestCase): + _ac_qq_data = '8eyJjb21pYyI6eyJpZCI6NTM2NDM1LCJ0aXRsZSI6Ilx1NTczMFx1ODVjZlx1OWY1MFx1NTkyOSIsImNvbGxlY3QiOiIxNDg3OTU1IiwiaXNKYXBhbkNvbWljIjpmYWxzZSwiaXNMaWdodE5vdmVsIjpmYWxzZSwiaXNMaWdodENvbWljIjpmYWxzZSwiaXNGaW5pc2giOmZhbHNlLCJpc1JvYXN0YWJsZSI6dHJ1ZSwiZUlkIjoiS2xCUFMwTlBYVlZhQWdZZkFRWUFBd3NNSEVKWVhTZz0ifSwiY2hhcHRlciI6eyJjaWQiOjI3NCwiY1RpdGxlIjoiMTI1XHVmZjFhXHU3OGE3XHU5MWNlXHUwMGI3XHU4NTg3XHU1YzlhIFx1NGUwYSIsImNTZXEiOiIyNTUiLCJ2aXBTdGF0dXMiOjIsInByZXZDaWQiOjI3MywibmV4dENpZCI6Mjc1LCJibGFua0ZpcnN0IjoxLCJjYW5SZWFkIjpmYWxzZX0sInBpY3R1cmUiOlt7InBpZCI6IjI4MDM3Iiwid2lkdGgiOjkwMCwiaGVpZ2h0IjoxMjczLCJ1cmwiOiJodHRwczpcL1wvbWFuaHVhLnFwaWMuY25cL21hbmh1YV9kZXRhaWxcLzBcLzI1XzE2XzI0X2U5NTNiZjhhMTBjODA1MWQxNTQyYzA0OWQ0OTdlOTJhXzI4MDM3LmpwZ1wvMCJ9XSwiYWRzIjp7InRvcCI6IiIsImxlZnQiOltdLCJib3R0b20iOnsidGl0bGUiOiJcdTRlMDdcdTRlOGJcdTRlMDdcdTcwNzUiLCJwaWMiOiJodHRwczpcL1wvbWFuaHVhLnFwaWMuY25cL29wZXJhdGlvblwvMFwvMDVfMTFfNDRfYzlhZGZlZGQxMjExNjczNTAyMWEyMmJjYTY2YWVkNDFfMTUzMDc2MjI2NjYxNy5qcGdcLzAiLCJ1cmwiOiJodHRwOlwvXC9hYy5xcS5jb21cL0NvbWljXC9jb21pY0luZm9cL2lkXC82MzEzOTkiLCJ3aWR0aCI6IjY1MCIsImhlaWdodCI6IjExMCJ9fSwiYXJ0aXN0Ijp7ImF2YXRhciI6Imh0dHA6XC9cL3RoaXJkcXEucWxvZ28uY25cL2c/Yj1zZGsmaz03dmg2WVBhSUQzNWRaQzZXMkppYlBFZyZzPTY0MCZ0PTE1MTczNzA2MjkiLCJuaWNrIjoiXHVmZjA4XHU1MTZiXHU1ZWE2XHVmZjA5XHU0ZTAwXHU0ZThjXHU1ZGU1XHU0ZjVjXHU1YmE0IiwidWluQ3J5cHQiOiJkMnRQZG5WV05GZE9XVms5In19' + _kissmanga_data = [ + 'hkJ+EJR/9dCEuATq4edqK2Y2yYuk7oHv6DtMcKdZDztGw8Bdrm3Uh9Z6aZnJeq51IeU04EwWn8DUZ3wEfdvMnYtQh7GSoWdOkdJa7Dbyfs7AspTURTDMhBqYsoZzduP7kyxQ/ftwtbQ733ShihZvNUg4pcR36H4YAKEAcwhZNA0=', + 'hkJ+EJR/9dCEuATq4edqK9GZCq4jAmbydCinAnz3hV01EBnqDvmVlxgEsScYB6JxDM99fJN636C/8+qLQnGVZSDaZ5rRIISuamFvWwZBkpHl2UPXxHd/wIRd6CEcBxer6Zs7vjyjx6W33bVh1OHzeFcXJo8eHQCBmOdWEuF61fk=', + 'hkJ+EJR/9dCEuATq4edqKzJtiFoZ4A6if1KVpaBlajzEcGnP+nT58dQpi9VyyFZduSlPLh9JhUtwrnN7SGjkTCaCr12oRm+OsHRJYhcLVjsz/tcnHEeBFUCJUC9IU5mK1ZKiIDQhEHbnJzh1P+WuNirvKIrHJGwpU7+NfxDvva4=', + ] + _mangago_data = 'b/fewbQPsnakoTXxGjVeyvnp1IKTwZlqQJmozPy7EDIwDQP0M+OR+dhAvBSEBk0haWgKUgCELhnL1sDwJFKoJRPD3BPuEScf+m3wIHiDDySKmoG0yuM6D0nYKf3+mRPVeLWbPqEUEs9js8r/rZkMUpg8QBxL2LW9KWj5TFe5jbDieK1k0jKmnlLof+riZ5Lii3ogXBn3LkQ0OjuCEo3mH2495DfPuanMimtK52UCJIe1Slac4VGFmcfMxWggoTVwmxqlO3YvUHS8WvhUtXMSyy5i5PbuFCZ1RP1T7+RxtBr4xi4olxQBi84Lwk9LN9MnIXl3o3r5Jb2Aq8hBiDfG9gpAye+N0SVnONY2xjo/gEo/njWHEqb8Wggr6kuwUdjqtMQA8zOoEmLGGs4zgeddSR5SsE0WfSxc9gXQwUS3Dlz6vfWTSOPacqKonzT7ggG7cZOoR7gHmEUjjKPhumNnxCHLa0uwTdFpBg38c+72j5dpOqLRld6PsvOJalph2Y79' + + @property + def _provider(self): + provider = Provider() + provider._params['url'] = 'http://example.org' + return provider + + def test_ac_qq_com(self): + lib = AcQqComCrypt(self._provider) + 'data from ac.qq.com/ComicView/index/id/536435/cid/274' + self.assertIsNotNone(lib.decode(self._ac_qq_data).get('comic', None)) + + def test_ac_qq_com_none(self): + lib = AcQqComCrypt(self._provider) + self.assertIsNone(lib.decode(self._ac_qq_data[5:]).get('comic', None)) + + def test_kissmanga(self): + lib = KissMangaComCrypt() + 'view-source:kissmanga.com/Manga/Kou-1-Desu-ga-Isekai-de-Joushu-Hajimemashita/Chapter-019?id=401994' + key = lib.decode_escape(r'\x6E\x73\x66\x64\x37\x33\x32\x6E\x73\x64\x6E\x64\x73\x38\x32\x33\x6E\x73\x64\x66') + iv = b'a5e8e2e9c2721be0a84ad660c472c1f3' + for i in self._kissmanga_data: + href = lib.decrypt(iv, key, i).decode('utf-8').replace('\x10', '').replace('\x0f', '') + self.assertEqual(href[:4], 'http') + + def test_manga_rock_com(self): + crypt = MangaRockComCrypt() + path_cr = str(Path(root_path).joinpath('files', 'manga_rock_com.mri')) + path_test = str(Path(root_path).joinpath('temp', 'manga_rock_com')) + + self.assertIsNone(Image.real_extension(path_cr)) + + with open(path_cr, 'rb') as r: + with open(path_test, 'wb') as w: + w.write(crypt.decrypt(r.read())) + + self.assertIsNotNone(Image.real_extension(path_test)) + + def test_manhuagui_com(self): + lib = ManhuaGuiComCrypt() + 'view-source:https://www.manhuagui.com/comic/28271/375550.html#p=3' + path = str(Path(root_path).joinpath('files', 'manhuagui')) + with open(path, 'r') as f: + js = re.search(r'\](\(function\(.+\))\s?<', f.read()) + data = lib.decrypt(js.group(1), '') + js = re.search(r'\(({.+})\)', data).group(1) + js = json.loads(js) + # self.assertIs(js, dict) # NOT WORKED Oo + self.assertTrue(isinstance(js, dict)) + + def test_mangago_me(self): + lib = MangaGoMe() + data = lib.decrypt(self._mangago_data) + self.assertEqual(data[:4], 'http') diff --git a/manga-py-stable_1.x/tests/files/archive_test_file b/manga-py-stable_1.x/tests/files/archive_test_file new file mode 100644 index 0000000..6442715 --- /dev/null +++ b/manga-py-stable_1.x/tests/files/archive_test_file @@ -0,0 +1 @@ +archive_test_file diff --git a/manga-py-stable_1.x/tests/files/archive_test_image b/manga-py-stable_1.x/tests/files/archive_test_image new file mode 100644 index 0000000..eb8ef53 Binary files /dev/null and b/manga-py-stable_1.x/tests/files/archive_test_image differ diff --git a/manga-py-stable_1.x/tests/files/img1.jpg b/manga-py-stable_1.x/tests/files/img1.jpg new file mode 100644 index 0000000..eb8ef53 Binary files /dev/null and b/manga-py-stable_1.x/tests/files/img1.jpg differ diff --git a/manga-py-stable_1.x/tests/files/img2.png b/manga-py-stable_1.x/tests/files/img2.png new file mode 100644 index 0000000..348d9c4 Binary files /dev/null and b/manga-py-stable_1.x/tests/files/img2.png differ diff --git a/manga-py-stable_1.x/tests/files/img3.jpg b/manga-py-stable_1.x/tests/files/img3.jpg new file mode 100644 index 0000000..e283e2c Binary files /dev/null and b/manga-py-stable_1.x/tests/files/img3.jpg differ diff --git a/manga-py-stable_1.x/tests/files/img4.jpg b/manga-py-stable_1.x/tests/files/img4.jpg new file mode 100644 index 0000000..74cd22a Binary files /dev/null and b/manga-py-stable_1.x/tests/files/img4.jpg differ diff --git a/manga-py-stable_1.x/tests/files/img5.png b/manga-py-stable_1.x/tests/files/img5.png new file mode 100644 index 0000000..cb59a63 Binary files /dev/null and b/manga-py-stable_1.x/tests/files/img5.png differ diff --git a/manga-py-stable_1.x/tests/files/img6.gif b/manga-py-stable_1.x/tests/files/img6.gif new file mode 100644 index 0000000..9570f07 Binary files /dev/null and b/manga-py-stable_1.x/tests/files/img6.gif differ diff --git a/manga-py-stable_1.x/tests/files/img7.webp b/manga-py-stable_1.x/tests/files/img7.webp new file mode 100644 index 0000000..122741b Binary files /dev/null and b/manga-py-stable_1.x/tests/files/img7.webp differ diff --git a/manga-py-stable_1.x/tests/files/manga_rock_com.mri b/manga-py-stable_1.x/tests/files/manga_rock_com.mri new file mode 100644 index 0000000..6c14739 Binary files /dev/null and b/manga-py-stable_1.x/tests/files/manga_rock_com.mri differ diff --git a/manga-py-stable_1.x/tests/files/manhuagui b/manga-py-stable_1.x/tests/files/manhuagui new file mode 100644 index 0000000..6a0a748 --- /dev/null +++ b/manga-py-stable_1.x/tests/files/manhuagui @@ -0,0 +1,2 @@ + +第一印象会议第06回_第一印象会议漫画 - 看漫画
      最新更新漫画大全完结连载排行榜日本漫画更多类别我的浏览记录
      loading...
      关灯

      第一印象会议

      /

      第06回

      /(/16)
      图片努力加载中,请稍候
      \ No newline at end of file diff --git a/manga-py-stable_1.x/tests/files/select.html b/manga-py-stable_1.x/tests/files/select.html new file mode 100644 index 0000000..7428674 --- /dev/null +++ b/manga-py-stable_1.x/tests/files/select.html @@ -0,0 +1,434 @@ + + + + + + + + + +EGScans - Online Manga Viewer - 6 no Trigger - Chapter Chapter 001 - Page 1 + + + + + + + + + + + + + +
      + + + + + + + + + + + + + + + + + +
      + + + + + + + + + + + + + + + + + + +
      +
      +Display Style: Manga +Chapter +Previous Page Page of 51 Next Page +
      +
      +
      + + + + + +
      +
      + + + +
      + +
      +
      + + +
      + +
      + + + + +
      +
      + + + + +
      +
      +
      +
      All the comics featured in this website are property of their publishers. The translations are fanmade and meant to be a preview of material unavailable for western countries. Do not try to profit from this material. If you liked any of the comics you've read here, consider buying the original version, or the local translation where available, and support its creators. Thank you all.
      + +
      + +
      +Valid HTML5 + Valid CSS! + +
      + + + + + + + + + + + \ No newline at end of file diff --git a/manga-py-stable_1.x/tests/gh_pages.py b/manga-py-stable_1.x/tests/gh_pages.py new file mode 100644 index 0000000..615752e --- /dev/null +++ b/manga-py-stable_1.x/tests/gh_pages.py @@ -0,0 +1,10 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +import unittest + + +class TestGhPages(unittest.TestCase): + def test_make(self): + from helpers.gh_pages import main + main() diff --git a/manga-py-stable_1.x/tests/http.py b/manga-py-stable_1.x/tests/http.py new file mode 100644 index 0000000..dc69f6c --- /dev/null +++ b/manga-py-stable_1.x/tests/http.py @@ -0,0 +1,30 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +import unittest + +from manga_py.http.url_normalizer import normalize_uri + + +class TestHttpClasses(unittest.TestCase): + referer = 'http://example.org/manga/here.html' + + def test_url_normalizer_url_helper1(self): + url = '//example.org/manga/here.html' + test_url = normalize_uri(url, self.referer) + self.assertEqual(self.referer, test_url) + + def test_url_normalizer_url_helper2(self): + url = '/manga/here.html' + test_url = normalize_uri(url, self.referer) + self.assertEqual(self.referer, test_url) + + def test_url_normalizer_url_helper3(self): + url = '://example.org/manga/here.html' + test_url = normalize_uri(url, self.referer) + self.assertEqual(self.referer, test_url) + + def test_url_normalizer_url_helper4(self): + url = 'here.html' + test_url = normalize_uri(url, self.referer) + self.assertEqual(self.referer, test_url) diff --git a/manga-py-stable_1.x/tests/images.py b/manga-py-stable_1.x/tests/images.py new file mode 100644 index 0000000..51d8a44 --- /dev/null +++ b/manga-py-stable_1.x/tests/images.py @@ -0,0 +1,140 @@ +import unittest +from os import path + +from PIL import Image as PilImage + +from manga_py import fs +from manga_py.image import Image + +root_path = path.dirname(path.realpath(__file__)) + +files_paths = [ + ['/files/img1.jpg', '/temp/img1.jpg'], + ['/files/img2.png', '/temp/img2.png'], + ['/files/img3.jpg', '/temp/img3.jpg'], + ['/files/img4.jpg', '/temp/img4.jpg'], + ['/files/img5.png', '/temp/img5.png'], + ['/files/img6.gif', '/temp/img6.gif'], + ['/files/img7.webp', '/temp/img7.webp'], +] + + +class TestImages(unittest.TestCase): + + def test_manual_crop(self): + for file in files_paths: + fs.unlink(root_path + file[1]) + + image = PilImage.open(root_path + file[0]) + sizes = image.size + image.close() + + img = Image(root_path + file[0]) + img.crop_manual((10, 0, image.size[0], image.size[1]), root_path + file[1]) + img.close() + + cropped_image = PilImage.open(root_path + file[1]) + cropped_sizes = cropped_image.size + cropped_image.close() + + self.assertTrue((sizes[0] - cropped_sizes[0]) == 10) + + def test_manual_crop_with_offsets(self): + for file in files_paths: + fs.unlink(root_path + file[1]) + + image = PilImage.open(root_path + file[0]) + sizes = image.size + image.close() + + img = Image(root_path + file[0]) + img.crop_manual_with_offsets((10, 0, 0, 0), root_path + file[1]) + img.close() + + cropped_image = PilImage.open(root_path + file[1]) + cropped_sizes = cropped_image.size + cropped_image.close() + + self.assertTrue((sizes[0] - cropped_sizes[0]) == 10) + + def test_auto_crop1(self): + file = files_paths[0] + fs.unlink(root_path + file[1]) + + image = PilImage.open(root_path + file[0]) + sizes = image.size + image.close() + + img = Image(root_path + file[0]) + img.crop_auto(root_path + file[1]) + img.close() + + cropped_image = PilImage.open(root_path + file[1]) + cropped_sizes = cropped_image.size + cropped_image.close() + + self.assertTrue(sizes[0] > cropped_sizes[0]) + + def test_auto_crop2(self): + file = files_paths[1] + fs.unlink(root_path + file[1]) + + image = PilImage.open(root_path + file[0]) + sizes = image.size + image.close() + + img = Image(root_path + file[0]) + img.crop_auto(root_path + file[1]) + img.close() + + cropped_image = PilImage.open(root_path + file[1]) + cropped_sizes = cropped_image.size + cropped_image.close() + + self.assertTrue(sizes[0] == cropped_sizes[0]) + + def test_auto_crop3(self): + file = files_paths[4] + fs.unlink(root_path + file[1]) + + image = PilImage.open(root_path + file[0]) + sizes = image.size + image.close() + + img = Image(root_path + file[0]) + img.crop_auto(root_path + file[1]) + img.close() + + cropped_image = PilImage.open(root_path + file[1]) + cropped_sizes = cropped_image.size + cropped_image.close() + + self.assertTrue(sizes[0] == (2 + cropped_sizes[0])) # 2px black line + + def test_image_not_found(self): + self.assertRaises(AttributeError, lambda: Image(root_path)) + + def test_gray1(self): + file = files_paths[1] + fs.unlink(root_path + file[1]) + + image = Image(root_path + file[0]) + image.gray(root_path + file[1]) + image.close() + + image = PilImage.open(root_path + file[1]) + index = image.mode.find('L') + image.close() + + self.assertTrue(index == 0) + + def test_convert(self): + file = files_paths[0][0] + image = Image(root_path + file) + + basename = file[0:file.find('.')] + basename = root_path + '/temp' + basename + '.bmp' + image.convert(basename) + image.close() + + self.assertTrue(path.isfile(basename)) diff --git a/manga-py-stable_1.x/tests/init_provider.py b/manga-py-stable_1.x/tests/init_provider.py new file mode 100644 index 0000000..8085bda --- /dev/null +++ b/manga-py-stable_1.x/tests/init_provider.py @@ -0,0 +1,38 @@ +import unittest +from os import path + +from manga_py import fs +from manga_py.provider import Provider +from manga_py.providers import get_provider + +root_path = path.dirname(path.realpath(__file__)) + + +class TestInitProvider(unittest.TestCase): + + # success + def test_get_provider1(self): + provider = get_provider('http://readmanga.me/manga/name/here') + self.assertIsInstance(provider(), Provider) + + # failed + def test_get_provider2(self): + provider = get_provider('http://example.org/manga/name/here') + self.assertFalse(provider) + + def test_root_path(self): + self.assertEqual(path.realpath(fs.path_join(root_path, '..')), fs.root_path()) + + def test_file_name_query_remove1(self): + name = '/addr/to/filename' + self.assertEqual( + name, + fs.remove_file_query_params(name + '?query=params').replace('\\', '/') # windows os patch + ) + + def test_file_name_query_remove2(self): + name = '/addr/to/filename/' + self.assertEqual( + name + 'image.png', + fs.remove_file_query_params(name + '?query=params').replace('\\', '/') # windows os patch + ) diff --git a/manga-py-stable_1.x/tests/matrix.py b/manga-py-stable_1.x/tests/matrix.py new file mode 100644 index 0000000..3e6d90b --- /dev/null +++ b/manga-py-stable_1.x/tests/matrix.py @@ -0,0 +1,211 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +import json +import math +import operator +import unittest +from functools import reduce +from os import path + +from PIL import Image as PilImage, ImageChops + +from manga_py.crypt import sunday_webry_com +from manga_py.crypt.puzzle import Puzzle +from manga_py.crypt import mangago_me +from manga_py.crypt import viz_com + +root_path = path.dirname(path.realpath(__file__)) + + +class TestMatrix(unittest.TestCase): + @staticmethod + def _rmsdiff(im1, im2): + """Calculate the root-mean-square difference between two images""" + h = ImageChops.difference(im1, im2).histogram() + # calculate rms + return math.sqrt( + reduce( + operator.add, + map(lambda h, i: h * (i ** 2), h, range(256)) + ) / (float(im1.size[0]) * im1.size[1]) + ) + + def test_jpg(self): + file_src = root_path + '/mosaic/tonarinoyj_jp_orig.jpg' # tonarinoyj.jp image + file_ref = root_path + '/mosaic/tonarinoyj_jp_reference.jpg' + file_dst = root_path + '/temp/tonarinoyj_jp_mosaic.jpg' + + div_num = 4 + matrix = {} + for i in range(div_num * div_num): + matrix[i] = (i % div_num) * div_num + int(i / div_num) + p = Puzzle(div_num, div_num, matrix, 8) + p.need_copy_orig = True + p.de_scramble(file_src, file_dst) + + src = PilImage.open(file_dst) + ref = PilImage.open(file_ref) + + deviation = self._rmsdiff(src, ref) + src.close() + ref.close() + self.assertTrue(deviation < 10) + + def test_png(self): + file_src = root_path + '/mosaic/tonarinoyj_jp_orig.png' # tonarinoyj.jp image + file_ref = root_path + '/mosaic/tonarinoyj_jp_reference.png' + file_dst = root_path + '/temp/tonarinoyj_jp_mosaic.png' + + div_num = 4 + matrix = {} + for i in range(div_num * div_num): + matrix[i] = (i % div_num) * div_num + int(i / div_num) + p = Puzzle(div_num, div_num, matrix, 8) + p.need_copy_orig = True + p.de_scramble(file_src, file_dst) + + src = PilImage.open(file_dst) + ref = PilImage.open(file_ref) + + deviation = self._rmsdiff(src, ref) + src.close() + ref.close() + self.assertTrue(deviation < 10) + + def test_sunday_webry_com(self): + decoder = sunday_webry_com.SundayWebryCom() + + with open(root_path + '/mosaic/sunday_reference_matrix.json') as f: + result = json.loads(f.read()) + + n = 0 + for _i, _r in enumerate(result): + + result_py = decoder.solve(848, 1200, 64, 64, _i + 1) + + for i, r in enumerate(_r): + p = result_py[i] + if ( + r['srcX'] != p['srcX'] or + r['srcY'] != p['srcY'] or + r['destX'] != p['destX'] or + r['destY'] != p['destY'] or + r['width'] != p['width'] or + r['height'] != p['height'] + ): + n += 1 + + self.assertTrue(n < 1) + + def test_solve_sunday_webry_com(self): + decoder = sunday_webry_com.SundayWebryCom() + puzzle = sunday_webry_com.MatrixSunday() + + src = root_path + '/mosaic/sunday_orig.jpg' + file_dst = root_path + '/temp/sunday_mosaic2.jpg' + file_ref = root_path + '/mosaic/sunday_reference.jpg' + + result_py2 = decoder.solve_by_img(src, 64, 64, 2) + + puzzle.de_scramble(src, file_dst, result_py2) + + src = PilImage.open(file_dst) + ref = PilImage.open(file_ref) + + deviation = self._rmsdiff(src, ref) + + self.assertTrue(deviation < 10) + + def test_solve_plus_comico_js(self): + src = root_path + '/mosaic/plus_comico_jp_orig.jpg' + file_dst = root_path + '/temp/plus_comico_jp_mosaic.jpg' + file_ref = root_path + '/mosaic/plus_comico_jp_reference.jpg' + + _matrix = '3,14,5,8,10,12,4,2,1,6,15,13,7,11,0,9'.split(',') + + div_num = 4 + matrix = {} + n = 0 + for i in _matrix: + matrix[int(i)] = n + n += 1 + + p = Puzzle(div_num, div_num, matrix, 8) + p.need_copy_orig = True + p.de_scramble(src, file_dst) + + src = PilImage.open(file_dst) + ref = PilImage.open(file_ref) + + deviation = self._rmsdiff(src, ref) + src.close() + ref.close() + self.assertTrue(deviation < 10) + + def test_solve_mangago(self): + urls = [ + ( + 'mangago1_orig.jpeg', + 'mangago1_reference.jpeg', + 'http://iweb7.mangapicgallery.com/r/cspiclink/make_me_bark/418858/962c5915bbe6f4ab0903149b5d94baba796a5cf059389458858fdeb74ddc02a4.jpeg' + ), + ( + 'mangago2_orig.jpeg', + 'mangago2_reference.jpeg', + 'http://iweb7.mangapicgallery.com/r/cspiclink/make_me_bark/418858/53e50ae9291f4ab0903149b5d94baba796a5cf059383846d7d1f4dc72e9f75f9.jpeg' + ), + ( + 'mangago3_orig.jpeg', + 'mangago3_reference.jpeg', + 'http://iweb7.mangapicgallery.com/r/cspiclink/make_me_bark/418859/c56e8e1f5baf770212313f5e9532ec5e6103b61e956e06496929048f98e33004.jpeg' + ), + ( + 'mangago4_orig.jpeg', + 'mangago4_reference.jpeg', + 'http://iweb7.mangapicgallery.com/r/cspiclink/make_me_bark/418859/5af9065f5b2e2169a4bfd805e9aa21d3112d498d68c6caa9046af4b06a723170.jpeg' + ), + ( + 'mangago5_orig.jpeg', + 'mangago5_reference.jpeg', + 'http://iweb7.mangapicgallery.com/r/cspiclink/make_me_bark/418859/34f05a15df5ae2169a4bfd805e9aa21d3112d498d68c765523c20c307fa0fda2.jpeg' + ), + ( + 'mangago6_orig.jpeg', + 'mangago6_reference.jpeg', + 'http://iweb7.mangapicgallery.com/r/cspiclink/make_me_bark/418864/0d76361a3cf5baf770212313f5e9532ec5e6103b616618337cb81b6acf9c1912.jpeg' + ), + ( + 'mangago7_orig.jpeg', + 'mangago7_reference.jpeg', + 'http://iweb7.mangapicgallery.com/r/cspiclink/lookism/443703/dbdf873a11bafad56c41ff7fbed622aa76e19f3564e5d52a6688d6d9e3c57fb2.jpeg' + ), + ] + + for i in urls: + img = path.join(root_path, 'mosaic', i[0]) + dst = path.join(root_path, 'temp', i[0]) + + ref = path.join(root_path, 'mosaic', i[1]) + + mangago_me.MangaGoMe.puzzle(img, dst, i[2]) + + # compare + src = PilImage.open(ref) + ref = PilImage.open(dst) + deviation = self._rmsdiff(src, ref) + src.close() + ref.close() + self.assertTrue(deviation < 10) + + def test_solve_viz_com(self): + for i in range(7): + src_path = root_path + '/mosaic/viz/index{}.jfif'.format(i) + ref_path = root_path + '/temp/canvas{}.png'.format(i) + solved_path = root_path + '/mosaic/viz/canvas{}.png'.format(i) + ref = viz_com.solve(src_path, {'width': 800, 'height': 1200}) + ref.save(ref_path) + solved = PilImage.open(solved_path) + deviation = self._rmsdiff(solved, ref) + solved.close() + self.assertTrue(deviation < 10) diff --git a/manga-py-stable_1.x/tests/mosaic/mangago1_orig.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago1_orig.jpeg new file mode 100644 index 0000000..6c1f0f4 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago1_orig.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago1_reference.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago1_reference.jpeg new file mode 100644 index 0000000..673df8a Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago1_reference.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago2_orig.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago2_orig.jpeg new file mode 100644 index 0000000..bbca87b Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago2_orig.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago2_reference.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago2_reference.jpeg new file mode 100644 index 0000000..ced269a Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago2_reference.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago3_orig.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago3_orig.jpeg new file mode 100644 index 0000000..c372ba6 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago3_orig.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago3_reference.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago3_reference.jpeg new file mode 100644 index 0000000..8a64c1b Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago3_reference.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago4_orig.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago4_orig.jpeg new file mode 100644 index 0000000..fc6e03a Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago4_orig.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago4_reference.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago4_reference.jpeg new file mode 100644 index 0000000..8f485bf Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago4_reference.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago5_orig.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago5_orig.jpeg new file mode 100644 index 0000000..fe91282 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago5_orig.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago5_reference.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago5_reference.jpeg new file mode 100644 index 0000000..c709736 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago5_reference.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago6_orig.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago6_orig.jpeg new file mode 100644 index 0000000..0d9e615 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago6_orig.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago6_reference.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago6_reference.jpeg new file mode 100644 index 0000000..40962a9 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago6_reference.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago7_orig.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago7_orig.jpeg new file mode 100644 index 0000000..911778f Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago7_orig.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/mangago7_reference.jpeg b/manga-py-stable_1.x/tests/mosaic/mangago7_reference.jpeg new file mode 100644 index 0000000..66c978b Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/mangago7_reference.jpeg differ diff --git a/manga-py-stable_1.x/tests/mosaic/plus_comico_jp_orig.jpg b/manga-py-stable_1.x/tests/mosaic/plus_comico_jp_orig.jpg new file mode 100644 index 0000000..08f874e Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/plus_comico_jp_orig.jpg differ diff --git a/manga-py-stable_1.x/tests/mosaic/plus_comico_jp_reference.jpg b/manga-py-stable_1.x/tests/mosaic/plus_comico_jp_reference.jpg new file mode 100644 index 0000000..6fd58e3 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/plus_comico_jp_reference.jpg differ diff --git a/manga-py-stable_1.x/tests/mosaic/sunday_orig.jpg b/manga-py-stable_1.x/tests/mosaic/sunday_orig.jpg new file mode 100644 index 0000000..5b447f5 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/sunday_orig.jpg differ diff --git a/manga-py-stable_1.x/tests/mosaic/sunday_reference.jpg b/manga-py-stable_1.x/tests/mosaic/sunday_reference.jpg new file mode 100644 index 0000000..1a51fcf Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/sunday_reference.jpg differ diff --git a/manga-py-stable_1.x/tests/mosaic/sunday_reference_matrix.json b/manga-py-stable_1.x/tests/mosaic/sunday_reference_matrix.json new file mode 100644 index 0000000..206580f --- /dev/null +++ b/manga-py-stable_1.x/tests/mosaic/sunday_reference_matrix.json @@ -0,0 +1 @@ +[[{"srcX":576,"srcY":448,"destX":576,"destY":448,"width":16,"height":48},{"srcX":0,"srcY":448,"destX":592,"destY":576,"width":64,"height":48},{"srcX":64,"srcY":448,"destX":656,"destY":640,"width":64,"height":48},{"srcX":128,"srcY":448,"destX":720,"destY":704,"width":64,"height":48},{"srcX":192,"srcY":448,"destX":784,"destY":768,"width":64,"height":48},{"srcX":256,"srcY":448,"destX":0,"destY":256,"width":64,"height":48},{"srcX":320,"srcY":448,"destX":64,"destY":320,"width":64,"height":48},{"srcX":384,"srcY":448,"destX":128,"destY":384,"width":64,"height":48},{"srcX":448,"srcY":448,"destX":192,"destY":0,"width":64,"height":48},{"srcX":512,"srcY":448,"destX":256,"destY":64,"width":64,"height":48},{"srcX":592,"srcY":448,"destX":320,"destY":128,"width":64,"height":48},{"srcX":656,"srcY":448,"destX":384,"destY":192,"width":64,"height":48},{"srcX":720,"srcY":448,"destX":448,"destY":256,"width":64,"height":48},{"srcX":784,"srcY":448,"destX":512,"destY":320,"width":64,"height":48},{"srcX":576,"srcY":0,"destX":576,"destY":64,"width":16,"height":64},{"srcX":576,"srcY":64,"destX":640,"destY":128,"width":16,"height":64},{"srcX":576,"srcY":128,"destX":704,"destY":192,"width":16,"height":64},{"srcX":576,"srcY":192,"destX":768,"destY":256,"width":16,"height":64},{"srcX":576,"srcY":256,"destX":576,"destY":320,"width":16,"height":64},{"srcX":576,"srcY":320,"destX":640,"destY":384,"width":16,"height":64},{"srcX":576,"srcY":384,"destX":64,"destY":496,"width":16,"height":64},{"srcX":576,"srcY":496,"destX":128,"destY":560,"width":16,"height":64},{"srcX":576,"srcY":560,"destX":192,"destY":624,"width":16,"height":64},{"srcX":576,"srcY":624,"destX":256,"destY":688,"width":16,"height":64},{"srcX":576,"srcY":688,"destX":320,"destY":752,"width":16,"height":64},{"srcX":576,"srcY":752,"destX":384,"destY":816,"width":16,"height":64},{"srcX":576,"srcY":816,"destX":448,"destY":880,"width":16,"height":64},{"srcX":576,"srcY":880,"destX":512,"destY":944,"width":16,"height":64},{"srcX":576,"srcY":944,"destX":0,"destY":1008,"width":16,"height":64},{"srcX":576,"srcY":1008,"destX":64,"destY":1072,"width":16,"height":64},{"srcX":576,"srcY":1072,"destX":128,"destY":1136,"width":16,"height":64},{"srcX":576,"srcY":1136,"destX":768,"destY":0,"width":16,"height":64},{"srcX":0,"srcY":0,"destX":208,"destY":1072,"width":64,"height":64},{"srcX":0,"srcY":64,"destX":512,"destY":432,"width":64,"height":64},{"srcX":0,"srcY":128,"destX":0,"destY":192,"width":64,"height":64},{"srcX":0,"srcY":192,"destX":336,"destY":752,"width":64,"height":64},{"srcX":0,"srcY":256,"destX":656,"destY":64,"width":64,"height":64},{"srcX":0,"srcY":320,"destX":144,"destY":1072,"width":64,"height":64},{"srcX":0,"srcY":384,"destX":448,"destY":432,"width":64,"height":64},{"srcX":0,"srcY":496,"destX":784,"destY":944,"width":64,"height":64},{"srcX":0,"srcY":560,"destX":256,"destY":752,"width":64,"height":64},{"srcX":0,"srcY":624,"destX":592,"destY":64,"width":64,"height":64},{"srcX":0,"srcY":688,"destX":80,"destY":1072,"width":64,"height":64},{"srcX":0,"srcY":752,"destX":384,"destY":432,"width":64,"height":64},{"srcX":0,"srcY":816,"destX":720,"destY":944,"width":64,"height":64},{"srcX":0,"srcY":880,"destX":192,"destY":752,"width":64,"height":64},{"srcX":0,"srcY":944,"destX":512,"destY":64,"width":64,"height":64},{"srcX":0,"srcY":1008,"destX":0,"destY":1072,"width":64,"height":64},{"srcX":0,"srcY":1072,"destX":320,"destY":432,"width":64,"height":64},{"srcX":0,"srcY":1136,"destX":656,"destY":944,"width":64,"height":64},{"srcX":64,"srcY":0,"destX":256,"destY":240,"width":64,"height":64},{"srcX":64,"srcY":64,"destX":592,"destY":752,"width":64,"height":64},{"srcX":64,"srcY":128,"destX":64,"destY":560,"width":64,"height":64},{"srcX":64,"srcY":192,"destX":400,"destY":1072,"width":64,"height":64},{"srcX":64,"srcY":256,"destX":720,"destY":384,"width":64,"height":64},{"srcX":64,"srcY":320,"destX":192,"destY":240,"width":64,"height":64},{"srcX":64,"srcY":384,"destX":528,"destY":752,"width":64,"height":64},{"srcX":64,"srcY":496,"destX":0,"destY":560,"width":64,"height":64},{"srcX":64,"srcY":560,"destX":336,"destY":1072,"width":64,"height":64},{"srcX":64,"srcY":624,"destX":656,"destY":384,"width":64,"height":64},{"srcX":64,"srcY":688,"destX":128,"destY":192,"width":64,"height":64},{"srcX":64,"srcY":752,"destX":464,"destY":752,"width":64,"height":64},{"srcX":64,"srcY":816,"destX":784,"destY":64,"width":64,"height":64},{"srcX":64,"srcY":880,"destX":272,"destY":1072,"width":64,"height":64},{"srcX":64,"srcY":944,"destX":576,"destY":384,"width":64,"height":64},{"srcX":64,"srcY":1008,"destX":64,"destY":192,"width":64,"height":64},{"srcX":64,"srcY":1072,"destX":400,"destY":752,"width":64,"height":64},{"srcX":64,"srcY":1136,"destX":720,"destY":64,"width":64,"height":64},{"srcX":128,"srcY":0,"destX":336,"destY":560,"width":64,"height":64},{"srcX":128,"srcY":64,"destX":656,"destY":1072,"width":64,"height":64},{"srcX":128,"srcY":128,"destX":128,"destY":880,"width":64,"height":64},{"srcX":128,"srcY":192,"destX":448,"destY":192,"width":64,"height":64},{"srcX":128,"srcY":256,"destX":784,"destY":704,"width":64,"height":64},{"srcX":128,"srcY":320,"destX":272,"destY":560,"width":64,"height":64},{"srcX":128,"srcY":384,"destX":592,"destY":1072,"width":64,"height":64},{"srcX":128,"srcY":496,"destX":64,"destY":880,"width":64,"height":64},{"srcX":128,"srcY":560,"destX":384,"destY":240,"width":64,"height":64},{"srcX":128,"srcY":624,"destX":720,"destY":752,"width":64,"height":64},{"srcX":128,"srcY":688,"destX":208,"destY":560,"width":64,"height":64},{"srcX":128,"srcY":752,"destX":528,"destY":1072,"width":64,"height":64},{"srcX":128,"srcY":816,"destX":0,"destY":880,"width":64,"height":64},{"srcX":128,"srcY":880,"destX":320,"destY":240,"width":64,"height":64},{"srcX":128,"srcY":944,"destX":656,"destY":752,"width":64,"height":64},{"srcX":128,"srcY":1008,"destX":144,"destY":560,"width":64,"height":64},{"srcX":128,"srcY":1072,"destX":464,"destY":1072,"width":64,"height":64},{"srcX":128,"srcY":1136,"destX":784,"destY":384,"width":64,"height":64},{"srcX":192,"srcY":0,"destX":384,"destY":880,"width":64,"height":64},{"srcX":192,"srcY":64,"destX":720,"destY":192,"width":64,"height":64},{"srcX":192,"srcY":128,"destX":192,"destY":48,"width":64,"height":64},{"srcX":192,"srcY":192,"destX":528,"destY":560,"width":64,"height":64},{"srcX":192,"srcY":256,"destX":0,"destY":368,"width":64,"height":64},{"srcX":192,"srcY":320,"destX":320,"destY":880,"width":64,"height":64},{"srcX":192,"srcY":384,"destX":640,"destY":192,"width":64,"height":64},{"srcX":192,"srcY":496,"destX":128,"destY":0,"width":64,"height":64},{"srcX":192,"srcY":560,"destX":464,"destY":560,"width":64,"height":64},{"srcX":192,"srcY":624,"destX":784,"destY":1072,"width":64,"height":64},{"srcX":192,"srcY":688,"destX":256,"destY":880,"width":64,"height":64},{"srcX":192,"srcY":752,"destX":576,"destY":192,"width":64,"height":64},{"srcX":192,"srcY":816,"destX":64,"destY":0,"width":64,"height":64},{"srcX":192,"srcY":880,"destX":400,"destY":560,"width":64,"height":64},{"srcX":192,"srcY":944,"destX":720,"destY":1072,"width":64,"height":64},{"srcX":192,"srcY":1008,"destX":192,"destY":880,"width":64,"height":64},{"srcX":192,"srcY":1072,"destX":512,"destY":192,"width":64,"height":64},{"srcX":192,"srcY":1136,"destX":0,"destY":0,"width":64,"height":64},{"srcX":256,"srcY":0,"destX":448,"destY":0,"width":64,"height":64},{"srcX":256,"srcY":64,"destX":784,"destY":512,"width":64,"height":64},{"srcX":256,"srcY":128,"destX":256,"destY":368,"width":64,"height":64},{"srcX":256,"srcY":192,"destX":592,"destY":880,"width":64,"height":64},{"srcX":256,"srcY":256,"destX":64,"destY":688,"width":64,"height":64},{"srcX":256,"srcY":320,"destX":384,"destY":0,"width":64,"height":64},{"srcX":256,"srcY":384,"destX":720,"destY":512,"width":64,"height":64},{"srcX":256,"srcY":496,"destX":192,"destY":368,"width":64,"height":64},{"srcX":256,"srcY":560,"destX":528,"destY":880,"width":64,"height":64},{"srcX":256,"srcY":624,"destX":0,"destY":688,"width":64,"height":64},{"srcX":256,"srcY":688,"destX":320,"destY":0,"width":64,"height":64},{"srcX":256,"srcY":752,"destX":656,"destY":512,"width":64,"height":64},{"srcX":256,"srcY":816,"destX":128,"destY":320,"width":64,"height":64},{"srcX":256,"srcY":880,"destX":464,"destY":880,"width":64,"height":64},{"srcX":256,"srcY":944,"destX":784,"destY":192,"width":64,"height":64},{"srcX":256,"srcY":1008,"destX":256,"destY":0,"width":64,"height":64},{"srcX":256,"srcY":1072,"destX":592,"destY":512,"width":64,"height":64},{"srcX":256,"srcY":1136,"destX":64,"destY":368,"width":64,"height":64},{"srcX":320,"srcY":0,"destX":512,"destY":368,"width":64,"height":64},{"srcX":320,"srcY":64,"destX":0,"destY":128,"width":64,"height":64},{"srcX":320,"srcY":128,"destX":336,"destY":688,"width":64,"height":64},{"srcX":320,"srcY":192,"destX":640,"destY":0,"width":64,"height":64},{"srcX":320,"srcY":256,"destX":144,"destY":1008,"width":64,"height":64},{"srcX":320,"srcY":320,"destX":448,"destY":368,"width":64,"height":64},{"srcX":320,"srcY":384,"destX":784,"destY":880,"width":64,"height":64},{"srcX":320,"srcY":496,"destX":272,"destY":688,"width":64,"height":64},{"srcX":320,"srcY":560,"destX":576,"destY":0,"width":64,"height":64},{"srcX":320,"srcY":624,"destX":80,"destY":1008,"width":64,"height":64},{"srcX":320,"srcY":688,"destX":384,"destY":368,"width":64,"height":64},{"srcX":320,"srcY":752,"destX":720,"destY":880,"width":64,"height":64},{"srcX":320,"srcY":816,"destX":192,"destY":688,"width":64,"height":64},{"srcX":320,"srcY":880,"destX":512,"destY":0,"width":64,"height":64},{"srcX":320,"srcY":944,"destX":16,"destY":1008,"width":64,"height":64},{"srcX":320,"srcY":1008,"destX":320,"destY":368,"width":64,"height":64},{"srcX":320,"srcY":1072,"destX":656,"destY":880,"width":64,"height":64},{"srcX":320,"srcY":1136,"destX":128,"destY":688,"width":64,"height":64},{"srcX":384,"srcY":0,"destX":592,"destY":688,"width":64,"height":64},{"srcX":384,"srcY":64,"destX":80,"destY":496,"width":64,"height":64},{"srcX":384,"srcY":128,"destX":400,"destY":1008,"width":64,"height":64},{"srcX":384,"srcY":192,"destX":720,"destY":320,"width":64,"height":64},{"srcX":384,"srcY":256,"destX":192,"destY":176,"width":64,"height":64},{"srcX":384,"srcY":320,"destX":528,"destY":688,"width":64,"height":64},{"srcX":384,"srcY":384,"destX":0,"destY":496,"width":64,"height":64},{"srcX":384,"srcY":496,"destX":336,"destY":1008,"width":64,"height":64},{"srcX":384,"srcY":560,"destX":656,"destY":320,"width":64,"height":64},{"srcX":384,"srcY":624,"destX":128,"destY":128,"width":64,"height":64},{"srcX":384,"srcY":688,"destX":464,"destY":688,"width":64,"height":64},{"srcX":384,"srcY":752,"destX":784,"destY":0,"width":64,"height":64},{"srcX":384,"srcY":816,"destX":272,"destY":1008,"width":64,"height":64},{"srcX":384,"srcY":880,"destX":592,"destY":320,"width":64,"height":64},{"srcX":384,"srcY":944,"destX":64,"destY":128,"width":64,"height":64},{"srcX":384,"srcY":1008,"destX":400,"destY":688,"width":64,"height":64},{"srcX":384,"srcY":1072,"destX":704,"destY":0,"width":64,"height":64},{"srcX":384,"srcY":1136,"destX":208,"destY":1008,"width":64,"height":64},{"srcX":448,"srcY":0,"destX":656,"destY":1008,"width":64,"height":64},{"srcX":448,"srcY":64,"destX":128,"destY":816,"width":64,"height":64},{"srcX":448,"srcY":128,"destX":448,"destY":128,"width":64,"height":64},{"srcX":448,"srcY":192,"destX":784,"destY":640,"width":64,"height":64},{"srcX":448,"srcY":256,"destX":272,"destY":496,"width":64,"height":64},{"srcX":448,"srcY":320,"destX":592,"destY":1008,"width":64,"height":64},{"srcX":448,"srcY":384,"destX":64,"destY":816,"width":64,"height":64},{"srcX":448,"srcY":496,"destX":384,"destY":128,"width":64,"height":64},{"srcX":448,"srcY":560,"destX":720,"destY":640,"width":64,"height":64},{"srcX":448,"srcY":624,"destX":208,"destY":496,"width":64,"height":64},{"srcX":448,"srcY":688,"destX":528,"destY":1008,"width":64,"height":64},{"srcX":448,"srcY":752,"destX":0,"destY":816,"width":64,"height":64},{"srcX":448,"srcY":816,"destX":320,"destY":176,"width":64,"height":64},{"srcX":448,"srcY":880,"destX":656,"destY":688,"width":64,"height":64},{"srcX":448,"srcY":944,"destX":144,"destY":496,"width":64,"height":64},{"srcX":448,"srcY":1008,"destX":464,"destY":1008,"width":64,"height":64},{"srcX":448,"srcY":1072,"destX":784,"destY":320,"width":64,"height":64},{"srcX":448,"srcY":1136,"destX":256,"destY":176,"width":64,"height":64},{"srcX":512,"srcY":0,"destX":720,"destY":128,"width":64,"height":64},{"srcX":512,"srcY":64,"destX":208,"destY":1136,"width":64,"height":64},{"srcX":512,"srcY":128,"destX":528,"destY":496,"width":64,"height":64},{"srcX":512,"srcY":192,"destX":0,"destY":304,"width":64,"height":64},{"srcX":512,"srcY":256,"destX":320,"destY":816,"width":64,"height":64},{"srcX":512,"srcY":320,"destX":656,"destY":128,"width":64,"height":64},{"srcX":512,"srcY":384,"destX":144,"destY":1136,"width":64,"height":64},{"srcX":512,"srcY":496,"destX":464,"destY":496,"width":64,"height":64},{"srcX":512,"srcY":560,"destX":784,"destY":1008,"width":64,"height":64},{"srcX":512,"srcY":624,"destX":256,"destY":816,"width":64,"height":64},{"srcX":512,"srcY":688,"destX":576,"destY":128,"width":64,"height":64},{"srcX":512,"srcY":752,"destX":64,"destY":1136,"width":64,"height":64},{"srcX":512,"srcY":816,"destX":400,"destY":496,"width":64,"height":64},{"srcX":512,"srcY":880,"destX":720,"destY":1008,"width":64,"height":64},{"srcX":512,"srcY":944,"destX":192,"destY":816,"width":64,"height":64},{"srcX":512,"srcY":1008,"destX":512,"destY":128,"width":64,"height":64},{"srcX":512,"srcY":1072,"destX":0,"destY":1136,"width":64,"height":64},{"srcX":512,"srcY":1136,"destX":336,"destY":496,"width":64,"height":64},{"srcX":592,"srcY":0,"destX":784,"destY":448,"width":64,"height":64},{"srcX":592,"srcY":64,"destX":256,"destY":304,"width":64,"height":64},{"srcX":592,"srcY":128,"destX":592,"destY":816,"width":64,"height":64},{"srcX":592,"srcY":192,"destX":64,"destY":624,"width":64,"height":64},{"srcX":592,"srcY":256,"destX":400,"destY":1136,"width":64,"height":64},{"srcX":592,"srcY":320,"destX":720,"destY":448,"width":64,"height":64},{"srcX":592,"srcY":384,"destX":192,"destY":304,"width":64,"height":64},{"srcX":592,"srcY":496,"destX":528,"destY":816,"width":64,"height":64},{"srcX":592,"srcY":560,"destX":0,"destY":624,"width":64,"height":64},{"srcX":592,"srcY":624,"destX":336,"destY":1136,"width":64,"height":64},{"srcX":592,"srcY":688,"destX":656,"destY":448,"width":64,"height":64},{"srcX":592,"srcY":752,"destX":128,"destY":256,"width":64,"height":64},{"srcX":592,"srcY":816,"destX":464,"destY":816,"width":64,"height":64},{"srcX":592,"srcY":880,"destX":784,"destY":128,"width":64,"height":64},{"srcX":592,"srcY":944,"destX":272,"destY":1136,"width":64,"height":64},{"srcX":592,"srcY":1008,"destX":592,"destY":448,"width":64,"height":64},{"srcX":592,"srcY":1072,"destX":64,"destY":256,"width":64,"height":64},{"srcX":592,"srcY":1136,"destX":400,"destY":816,"width":64,"height":64},{"srcX":656,"srcY":0,"destX":0,"destY":64,"width":64,"height":64},{"srcX":656,"srcY":64,"destX":336,"destY":624,"width":64,"height":64},{"srcX":656,"srcY":128,"destX":656,"destY":1136,"width":64,"height":64},{"srcX":656,"srcY":192,"destX":128,"destY":944,"width":64,"height":64},{"srcX":656,"srcY":256,"destX":448,"destY":304,"width":64,"height":64},{"srcX":656,"srcY":320,"destX":784,"destY":816,"width":64,"height":64},{"srcX":656,"srcY":384,"destX":272,"destY":624,"width":64,"height":64},{"srcX":656,"srcY":496,"destX":592,"destY":1136,"width":64,"height":64},{"srcX":656,"srcY":560,"destX":64,"destY":944,"width":64,"height":64},{"srcX":656,"srcY":624,"destX":384,"destY":304,"width":64,"height":64},{"srcX":656,"srcY":688,"destX":720,"destY":816,"width":64,"height":64},{"srcX":656,"srcY":752,"destX":208,"destY":624,"width":64,"height":64},{"srcX":656,"srcY":816,"destX":528,"destY":1136,"width":64,"height":64},{"srcX":656,"srcY":880,"destX":0,"destY":944,"width":64,"height":64},{"srcX":656,"srcY":944,"destX":320,"destY":304,"width":64,"height":64},{"srcX":656,"srcY":1008,"destX":656,"destY":816,"width":64,"height":64},{"srcX":656,"srcY":1072,"destX":128,"destY":624,"width":64,"height":64},{"srcX":656,"srcY":1136,"destX":464,"destY":1136,"width":64,"height":64},{"srcX":720,"srcY":0,"destX":64,"destY":432,"width":64,"height":64},{"srcX":720,"srcY":64,"destX":384,"destY":944,"width":64,"height":64},{"srcX":720,"srcY":128,"destX":704,"destY":256,"width":64,"height":64},{"srcX":720,"srcY":192,"destX":192,"destY":112,"width":64,"height":64},{"srcX":720,"srcY":256,"destX":528,"destY":624,"width":64,"height":64},{"srcX":720,"srcY":320,"destX":0,"destY":432,"width":64,"height":64},{"srcX":720,"srcY":384,"destX":320,"destY":944,"width":64,"height":64},{"srcX":720,"srcY":496,"destX":640,"destY":256,"width":64,"height":64},{"srcX":720,"srcY":560,"destX":128,"destY":64,"width":64,"height":64},{"srcX":720,"srcY":624,"destX":464,"destY":624,"width":64,"height":64},{"srcX":720,"srcY":688,"destX":784,"destY":1136,"width":64,"height":64},{"srcX":720,"srcY":752,"destX":256,"destY":944,"width":64,"height":64},{"srcX":720,"srcY":816,"destX":576,"destY":256,"width":64,"height":64},{"srcX":720,"srcY":880,"destX":64,"destY":64,"width":64,"height":64},{"srcX":720,"srcY":944,"destX":400,"destY":624,"width":64,"height":64},{"srcX":720,"srcY":1008,"destX":720,"destY":1136,"width":64,"height":64},{"srcX":720,"srcY":1072,"destX":192,"destY":944,"width":64,"height":64},{"srcX":720,"srcY":1136,"destX":512,"destY":256,"width":64,"height":64},{"srcX":784,"srcY":0,"destX":128,"destY":752,"width":64,"height":64},{"srcX":784,"srcY":64,"destX":448,"destY":64,"width":64,"height":64},{"srcX":784,"srcY":128,"destX":784,"destY":576,"width":64,"height":64},{"srcX":784,"srcY":192,"destX":256,"destY":432,"width":64,"height":64},{"srcX":784,"srcY":256,"destX":592,"destY":944,"width":64,"height":64},{"srcX":784,"srcY":320,"destX":64,"destY":752,"width":64,"height":64},{"srcX":784,"srcY":384,"destX":384,"destY":64,"width":64,"height":64},{"srcX":784,"srcY":496,"destX":720,"destY":576,"width":64,"height":64},{"srcX":784,"srcY":560,"destX":192,"destY":432,"width":64,"height":64},{"srcX":784,"srcY":624,"destX":528,"destY":944,"width":64,"height":64},{"srcX":784,"srcY":688,"destX":0,"destY":752,"width":64,"height":64},{"srcX":784,"srcY":752,"destX":320,"destY":64,"width":64,"height":64},{"srcX":784,"srcY":816,"destX":656,"destY":576,"width":64,"height":64},{"srcX":784,"srcY":880,"destX":128,"destY":432,"width":64,"height":64},{"srcX":784,"srcY":944,"destX":448,"destY":944,"width":64,"height":64},{"srcX":784,"srcY":1008,"destX":784,"destY":256,"width":64,"height":64},{"srcX":784,"srcY":1072,"destX":256,"destY":112,"width":64,"height":64},{"srcX":784,"srcY":1136,"destX":592,"destY":624,"width":64,"height":64}],[{"srcX":320,"srcY":896,"destX":320,"destY":896,"width":16,"height":48},{"srcX":0,"srcY":896,"destX":336,"destY":832,"width":64,"height":48},{"srcX":64,"srcY":896,"destX":400,"destY":0,"width":64,"height":48},{"srcX":128,"srcY":896,"destX":464,"destY":64,"width":64,"height":48},{"srcX":192,"srcY":896,"destX":528,"destY":128,"width":64,"height":48},{"srcX":256,"srcY":896,"destX":592,"destY":192,"width":64,"height":48},{"srcX":336,"srcY":896,"destX":656,"destY":256,"width":64,"height":48},{"srcX":400,"srcY":896,"destX":720,"destY":320,"width":64,"height":48},{"srcX":464,"srcY":896,"destX":784,"destY":384,"width":64,"height":48},{"srcX":528,"srcY":896,"destX":0,"destY":896,"width":64,"height":48},{"srcX":592,"srcY":896,"destX":64,"destY":960,"width":64,"height":48},{"srcX":656,"srcY":896,"destX":128,"destY":1024,"width":64,"height":48},{"srcX":720,"srcY":896,"destX":192,"destY":1088,"width":64,"height":48},{"srcX":784,"srcY":896,"destX":256,"destY":896,"width":64,"height":48},{"srcX":320,"srcY":0,"destX":128,"destY":128,"width":16,"height":64},{"srcX":320,"srcY":64,"destX":192,"destY":192,"width":16,"height":64},{"srcX":320,"srcY":128,"destX":256,"destY":256,"width":16,"height":64},{"srcX":320,"srcY":192,"destX":0,"destY":320,"width":16,"height":64},{"srcX":320,"srcY":256,"destX":64,"destY":384,"width":16,"height":64},{"srcX":320,"srcY":320,"destX":128,"destY":448,"width":16,"height":64},{"srcX":320,"srcY":384,"destX":192,"destY":512,"width":16,"height":64},{"srcX":320,"srcY":448,"destX":256,"destY":576,"width":16,"height":64},{"srcX":320,"srcY":512,"destX":0,"destY":640,"width":16,"height":64},{"srcX":320,"srcY":576,"destX":64,"destY":704,"width":16,"height":64},{"srcX":320,"srcY":640,"destX":128,"destY":768,"width":16,"height":64},{"srcX":320,"srcY":704,"destX":192,"destY":832,"width":16,"height":64},{"srcX":320,"srcY":768,"destX":320,"destY":944,"width":16,"height":64},{"srcX":320,"srcY":832,"destX":384,"destY":1008,"width":16,"height":64},{"srcX":320,"srcY":944,"destX":448,"destY":1072,"width":16,"height":64},{"srcX":320,"srcY":1008,"destX":512,"destY":1136,"width":16,"height":64},{"srcX":320,"srcY":1072,"destX":0,"destY":0,"width":16,"height":64},{"srcX":320,"srcY":1136,"destX":64,"destY":64,"width":16,"height":64},{"srcX":0,"srcY":0,"destX":400,"destY":944,"width":64,"height":64},{"srcX":0,"srcY":64,"destX":720,"destY":256,"width":64,"height":64},{"srcX":0,"srcY":128,"destX":208,"destY":64,"width":64,"height":64},{"srcX":0,"srcY":192,"destX":528,"destY":624,"width":64,"height":64},{"srcX":0,"srcY":256,"destX":0,"destY":384,"width":64,"height":64},{"srcX":0,"srcY":320,"destX":336,"destY":944,"width":64,"height":64},{"srcX":0,"srcY":384,"destX":656,"destY":304,"width":64,"height":64},{"srcX":0,"srcY":448,"destX":144,"destY":64,"width":64,"height":64},{"srcX":0,"srcY":512,"destX":464,"destY":624,"width":64,"height":64},{"srcX":0,"srcY":576,"destX":784,"destY":1136,"width":64,"height":64},{"srcX":0,"srcY":640,"destX":256,"destY":944,"width":64,"height":64},{"srcX":0,"srcY":704,"destX":592,"destY":304,"width":64,"height":64},{"srcX":0,"srcY":768,"destX":80,"destY":64,"width":64,"height":64},{"srcX":0,"srcY":832,"destX":400,"destY":624,"width":64,"height":64},{"srcX":0,"srcY":944,"destX":720,"destY":1136,"width":64,"height":64},{"srcX":0,"srcY":1008,"destX":192,"destY":896,"width":64,"height":64},{"srcX":0,"srcY":1072,"destX":528,"destY":304,"width":64,"height":64},{"srcX":0,"srcY":1136,"destX":0,"destY":64,"width":64,"height":64},{"srcX":64,"srcY":0,"destX":464,"destY":112,"width":64,"height":64},{"srcX":64,"srcY":64,"destX":784,"destY":624,"width":64,"height":64},{"srcX":64,"srcY":128,"destX":272,"destY":384,"width":64,"height":64},{"srcX":64,"srcY":192,"destX":592,"destY":944,"width":64,"height":64},{"srcX":64,"srcY":256,"destX":80,"destY":704,"width":64,"height":64},{"srcX":64,"srcY":320,"destX":400,"destY":112,"width":64,"height":64},{"srcX":64,"srcY":384,"destX":720,"destY":624,"width":64,"height":64},{"srcX":64,"srcY":448,"destX":208,"destY":384,"width":64,"height":64},{"srcX":64,"srcY":512,"destX":528,"destY":944,"width":64,"height":64},{"srcX":64,"srcY":576,"destX":0,"destY":704,"width":64,"height":64},{"srcX":64,"srcY":640,"destX":336,"destY":64,"width":64,"height":64},{"srcX":64,"srcY":704,"destX":656,"destY":624,"width":64,"height":64},{"srcX":64,"srcY":768,"destX":144,"destY":384,"width":64,"height":64},{"srcX":64,"srcY":832,"destX":464,"destY":944,"width":64,"height":64},{"srcX":64,"srcY":944,"destX":784,"destY":256,"width":64,"height":64},{"srcX":64,"srcY":1008,"destX":272,"destY":64,"width":64,"height":64},{"srcX":64,"srcY":1072,"destX":592,"destY":624,"width":64,"height":64},{"srcX":64,"srcY":1136,"destX":80,"destY":384,"width":64,"height":64},{"srcX":128,"srcY":0,"destX":528,"destY":432,"width":64,"height":64},{"srcX":128,"srcY":64,"destX":0,"destY":192,"width":64,"height":64},{"srcX":128,"srcY":128,"destX":336,"destY":704,"width":64,"height":64},{"srcX":128,"srcY":192,"destX":656,"destY":64,"width":64,"height":64},{"srcX":128,"srcY":256,"destX":128,"destY":1072,"width":64,"height":64},{"srcX":128,"srcY":320,"destX":464,"destY":432,"width":64,"height":64},{"srcX":128,"srcY":384,"destX":784,"destY":944,"width":64,"height":64},{"srcX":128,"srcY":448,"destX":272,"destY":704,"width":64,"height":64},{"srcX":128,"srcY":512,"destX":592,"destY":64,"width":64,"height":64},{"srcX":128,"srcY":576,"destX":64,"destY":1072,"width":64,"height":64},{"srcX":128,"srcY":640,"destX":400,"destY":432,"width":64,"height":64},{"srcX":128,"srcY":704,"destX":720,"destY":944,"width":64,"height":64},{"srcX":128,"srcY":768,"destX":208,"destY":704,"width":64,"height":64},{"srcX":128,"srcY":832,"destX":528,"destY":64,"width":64,"height":64},{"srcX":128,"srcY":944,"destX":0,"destY":1072,"width":64,"height":64},{"srcX":128,"srcY":1008,"destX":336,"destY":384,"width":64,"height":64},{"srcX":128,"srcY":1072,"destX":656,"destY":944,"width":64,"height":64},{"srcX":128,"srcY":1136,"destX":144,"destY":704,"width":64,"height":64},{"srcX":192,"srcY":0,"destX":592,"destY":752,"width":64,"height":64},{"srcX":192,"srcY":64,"destX":64,"destY":512,"width":64,"height":64},{"srcX":192,"srcY":128,"destX":384,"destY":1072,"width":64,"height":64},{"srcX":192,"srcY":192,"destX":720,"destY":432,"width":64,"height":64},{"srcX":192,"srcY":256,"destX":208,"destY":192,"width":64,"height":64},{"srcX":192,"srcY":320,"destX":528,"destY":752,"width":64,"height":64},{"srcX":192,"srcY":384,"destX":0,"destY":512,"width":64,"height":64},{"srcX":192,"srcY":448,"destX":320,"destY":1072,"width":64,"height":64},{"srcX":192,"srcY":512,"destX":656,"destY":432,"width":64,"height":64},{"srcX":192,"srcY":576,"destX":128,"destY":192,"width":64,"height":64},{"srcX":192,"srcY":640,"destX":464,"destY":752,"width":64,"height":64},{"srcX":192,"srcY":704,"destX":784,"destY":64,"width":64,"height":64},{"srcX":192,"srcY":768,"destX":256,"destY":1072,"width":64,"height":64},{"srcX":192,"srcY":832,"destX":592,"destY":432,"width":64,"height":64},{"srcX":192,"srcY":944,"destX":64,"destY":192,"width":64,"height":64},{"srcX":192,"srcY":1008,"destX":400,"destY":752,"width":64,"height":64},{"srcX":192,"srcY":1072,"destX":720,"destY":64,"width":64,"height":64},{"srcX":192,"srcY":1136,"destX":192,"destY":1024,"width":64,"height":64},{"srcX":256,"srcY":0,"destX":656,"destY":1072,"width":64,"height":64},{"srcX":256,"srcY":64,"destX":128,"destY":832,"width":64,"height":64},{"srcX":256,"srcY":128,"destX":464,"destY":240,"width":64,"height":64},{"srcX":256,"srcY":192,"destX":784,"destY":752,"width":64,"height":64},{"srcX":256,"srcY":256,"destX":272,"destY":512,"width":64,"height":64},{"srcX":256,"srcY":320,"destX":592,"destY":1072,"width":64,"height":64},{"srcX":256,"srcY":384,"destX":64,"destY":832,"width":64,"height":64},{"srcX":256,"srcY":448,"destX":400,"destY":240,"width":64,"height":64},{"srcX":256,"srcY":512,"destX":720,"destY":752,"width":64,"height":64},{"srcX":256,"srcY":576,"destX":208,"destY":512,"width":64,"height":64},{"srcX":256,"srcY":640,"destX":528,"destY":1072,"width":64,"height":64},{"srcX":256,"srcY":704,"destX":0,"destY":832,"width":64,"height":64},{"srcX":256,"srcY":768,"destX":336,"destY":192,"width":64,"height":64},{"srcX":256,"srcY":832,"destX":656,"destY":752,"width":64,"height":64},{"srcX":256,"srcY":944,"destX":128,"destY":512,"width":64,"height":64},{"srcX":256,"srcY":1008,"destX":464,"destY":1072,"width":64,"height":64},{"srcX":256,"srcY":1072,"destX":784,"destY":432,"width":64,"height":64},{"srcX":256,"srcY":1136,"destX":272,"destY":192,"width":64,"height":64},{"srcX":336,"srcY":0,"destX":720,"destY":192,"width":64,"height":64},{"srcX":336,"srcY":64,"destX":208,"destY":0,"width":64,"height":64},{"srcX":336,"srcY":128,"destX":528,"destY":560,"width":64,"height":64},{"srcX":336,"srcY":192,"destX":16,"destY":320,"width":64,"height":64},{"srcX":336,"srcY":256,"destX":336,"destY":880,"width":64,"height":64},{"srcX":336,"srcY":320,"destX":656,"destY":192,"width":64,"height":64},{"srcX":336,"srcY":384,"destX":144,"destY":0,"width":64,"height":64},{"srcX":336,"srcY":448,"destX":464,"destY":560,"width":64,"height":64},{"srcX":336,"srcY":512,"destX":784,"destY":1072,"width":64,"height":64},{"srcX":336,"srcY":576,"destX":272,"destY":832,"width":64,"height":64},{"srcX":336,"srcY":640,"destX":592,"destY":240,"width":64,"height":64},{"srcX":336,"srcY":704,"destX":80,"destY":0,"width":64,"height":64},{"srcX":336,"srcY":768,"destX":400,"destY":560,"width":64,"height":64},{"srcX":336,"srcY":832,"destX":720,"destY":1072,"width":64,"height":64},{"srcX":336,"srcY":944,"destX":208,"destY":832,"width":64,"height":64},{"srcX":336,"srcY":1008,"destX":528,"destY":240,"width":64,"height":64},{"srcX":336,"srcY":1072,"destX":16,"destY":0,"width":64,"height":64},{"srcX":336,"srcY":1136,"destX":336,"destY":512,"width":64,"height":64},{"srcX":400,"srcY":0,"destX":784,"destY":560,"width":64,"height":64},{"srcX":400,"srcY":64,"destX":272,"destY":320,"width":64,"height":64},{"srcX":400,"srcY":128,"destX":592,"destY":880,"width":64,"height":64},{"srcX":400,"srcY":192,"destX":80,"destY":640,"width":64,"height":64},{"srcX":400,"srcY":256,"destX":400,"destY":48,"width":64,"height":64},{"srcX":400,"srcY":320,"destX":720,"destY":560,"width":64,"height":64},{"srcX":400,"srcY":384,"destX":208,"destY":320,"width":64,"height":64},{"srcX":400,"srcY":448,"destX":528,"destY":880,"width":64,"height":64},{"srcX":400,"srcY":512,"destX":16,"destY":640,"width":64,"height":64},{"srcX":400,"srcY":576,"destX":336,"destY":0,"width":64,"height":64},{"srcX":400,"srcY":640,"destX":656,"destY":560,"width":64,"height":64},{"srcX":400,"srcY":704,"destX":144,"destY":320,"width":64,"height":64},{"srcX":400,"srcY":768,"destX":464,"destY":880,"width":64,"height":64},{"srcX":400,"srcY":832,"destX":784,"destY":192,"width":64,"height":64},{"srcX":400,"srcY":944,"destX":272,"destY":0,"width":64,"height":64},{"srcX":400,"srcY":1008,"destX":592,"destY":560,"width":64,"height":64},{"srcX":400,"srcY":1072,"destX":80,"destY":320,"width":64,"height":64},{"srcX":400,"srcY":1136,"destX":400,"destY":880,"width":64,"height":64},{"srcX":464,"srcY":0,"destX":0,"destY":128,"width":64,"height":64},{"srcX":464,"srcY":64,"destX":336,"destY":640,"width":64,"height":64},{"srcX":464,"srcY":128,"destX":656,"destY":0,"width":64,"height":64},{"srcX":464,"srcY":192,"destX":128,"destY":960,"width":64,"height":64},{"srcX":464,"srcY":256,"destX":464,"destY":368,"width":64,"height":64},{"srcX":464,"srcY":320,"destX":784,"destY":880,"width":64,"height":64},{"srcX":464,"srcY":384,"destX":272,"destY":640,"width":64,"height":64},{"srcX":464,"srcY":448,"destX":592,"destY":0,"width":64,"height":64},{"srcX":464,"srcY":512,"destX":64,"destY":1008,"width":64,"height":64},{"srcX":464,"srcY":576,"destX":400,"destY":368,"width":64,"height":64},{"srcX":464,"srcY":640,"destX":720,"destY":880,"width":64,"height":64},{"srcX":464,"srcY":704,"destX":208,"destY":640,"width":64,"height":64},{"srcX":464,"srcY":768,"destX":528,"destY":0,"width":64,"height":64},{"srcX":464,"srcY":832,"destX":0,"destY":1008,"width":64,"height":64},{"srcX":464,"srcY":944,"destX":336,"destY":320,"width":64,"height":64},{"srcX":464,"srcY":1008,"destX":656,"destY":880,"width":64,"height":64},{"srcX":464,"srcY":1072,"destX":144,"destY":640,"width":64,"height":64},{"srcX":464,"srcY":1136,"destX":464,"destY":0,"width":64,"height":64},{"srcX":528,"srcY":0,"destX":64,"destY":448,"width":64,"height":64},{"srcX":528,"srcY":64,"destX":400,"destY":1008,"width":64,"height":64},{"srcX":528,"srcY":128,"destX":720,"destY":368,"width":64,"height":64},{"srcX":528,"srcY":192,"destX":208,"destY":128,"width":64,"height":64},{"srcX":528,"srcY":256,"destX":528,"destY":688,"width":64,"height":64},{"srcX":528,"srcY":320,"destX":0,"destY":448,"width":64,"height":64},{"srcX":528,"srcY":384,"destX":320,"destY":1008,"width":64,"height":64},{"srcX":528,"srcY":448,"destX":656,"destY":368,"width":64,"height":64},{"srcX":528,"srcY":512,"destX":144,"destY":128,"width":64,"height":64},{"srcX":528,"srcY":576,"destX":464,"destY":688,"width":64,"height":64},{"srcX":528,"srcY":640,"destX":784,"destY":0,"width":64,"height":64},{"srcX":528,"srcY":704,"destX":256,"destY":1008,"width":64,"height":64},{"srcX":528,"srcY":768,"destX":592,"destY":368,"width":64,"height":64},{"srcX":528,"srcY":832,"destX":64,"destY":128,"width":64,"height":64},{"srcX":528,"srcY":944,"destX":400,"destY":688,"width":64,"height":64},{"srcX":528,"srcY":1008,"destX":720,"destY":0,"width":64,"height":64},{"srcX":528,"srcY":1072,"destX":192,"destY":960,"width":64,"height":64},{"srcX":528,"srcY":1136,"destX":528,"destY":368,"width":64,"height":64},{"srcX":592,"srcY":0,"destX":144,"destY":768,"width":64,"height":64},{"srcX":592,"srcY":64,"destX":464,"destY":176,"width":64,"height":64},{"srcX":592,"srcY":128,"destX":784,"destY":688,"width":64,"height":64},{"srcX":592,"srcY":192,"destX":272,"destY":448,"width":64,"height":64},{"srcX":592,"srcY":256,"destX":592,"destY":1008,"width":64,"height":64},{"srcX":592,"srcY":320,"destX":64,"destY":768,"width":64,"height":64},{"srcX":592,"srcY":384,"destX":400,"destY":176,"width":64,"height":64},{"srcX":592,"srcY":448,"destX":720,"destY":688,"width":64,"height":64},{"srcX":592,"srcY":512,"destX":208,"destY":448,"width":64,"height":64},{"srcX":592,"srcY":576,"destX":528,"destY":1008,"width":64,"height":64},{"srcX":592,"srcY":640,"destX":0,"destY":768,"width":64,"height":64},{"srcX":592,"srcY":704,"destX":336,"destY":128,"width":64,"height":64},{"srcX":592,"srcY":768,"destX":656,"destY":688,"width":64,"height":64},{"srcX":592,"srcY":832,"destX":144,"destY":448,"width":64,"height":64},{"srcX":592,"srcY":944,"destX":464,"destY":1008,"width":64,"height":64},{"srcX":592,"srcY":1008,"destX":784,"destY":320,"width":64,"height":64},{"srcX":592,"srcY":1072,"destX":272,"destY":128,"width":64,"height":64},{"srcX":592,"srcY":1136,"destX":592,"destY":688,"width":64,"height":64},{"srcX":656,"srcY":0,"destX":192,"destY":1136,"width":64,"height":64},{"srcX":656,"srcY":64,"destX":528,"destY":496,"width":64,"height":64},{"srcX":656,"srcY":128,"destX":0,"destY":256,"width":64,"height":64},{"srcX":656,"srcY":192,"destX":336,"destY":768,"width":64,"height":64},{"srcX":656,"srcY":256,"destX":656,"destY":128,"width":64,"height":64},{"srcX":656,"srcY":320,"destX":128,"destY":1136,"width":64,"height":64},{"srcX":656,"srcY":384,"destX":464,"destY":496,"width":64,"height":64},{"srcX":656,"srcY":448,"destX":784,"destY":1008,"width":64,"height":64},{"srcX":656,"srcY":512,"destX":272,"destY":768,"width":64,"height":64},{"srcX":656,"srcY":576,"destX":592,"destY":128,"width":64,"height":64},{"srcX":656,"srcY":640,"destX":64,"destY":1136,"width":64,"height":64},{"srcX":656,"srcY":704,"destX":400,"destY":496,"width":64,"height":64},{"srcX":656,"srcY":768,"destX":720,"destY":1008,"width":64,"height":64},{"srcX":656,"srcY":832,"destX":208,"destY":768,"width":64,"height":64},{"srcX":656,"srcY":944,"destX":528,"destY":176,"width":64,"height":64},{"srcX":656,"srcY":1008,"destX":0,"destY":1136,"width":64,"height":64},{"srcX":656,"srcY":1072,"destX":336,"destY":448,"width":64,"height":64},{"srcX":656,"srcY":1136,"destX":656,"destY":1008,"width":64,"height":64},{"srcX":720,"srcY":0,"destX":272,"destY":256,"width":64,"height":64},{"srcX":720,"srcY":64,"destX":592,"destY":816,"width":64,"height":64},{"srcX":720,"srcY":128,"destX":64,"destY":576,"width":64,"height":64},{"srcX":720,"srcY":192,"destX":384,"destY":1136,"width":64,"height":64},{"srcX":720,"srcY":256,"destX":720,"destY":496,"width":64,"height":64},{"srcX":720,"srcY":320,"destX":192,"destY":256,"width":64,"height":64},{"srcX":720,"srcY":384,"destX":528,"destY":816,"width":64,"height":64},{"srcX":720,"srcY":448,"destX":0,"destY":576,"width":64,"height":64},{"srcX":720,"srcY":512,"destX":320,"destY":1136,"width":64,"height":64},{"srcX":720,"srcY":576,"destX":656,"destY":496,"width":64,"height":64},{"srcX":720,"srcY":640,"destX":128,"destY":256,"width":64,"height":64},{"srcX":720,"srcY":704,"destX":464,"destY":816,"width":64,"height":64},{"srcX":720,"srcY":768,"destX":784,"destY":128,"width":64,"height":64},{"srcX":720,"srcY":832,"destX":256,"destY":1136,"width":64,"height":64},{"srcX":720,"srcY":944,"destX":592,"destY":496,"width":64,"height":64},{"srcX":720,"srcY":1008,"destX":64,"destY":256,"width":64,"height":64},{"srcX":720,"srcY":1072,"destX":400,"destY":816,"width":64,"height":64},{"srcX":720,"srcY":1136,"destX":720,"destY":128,"width":64,"height":64},{"srcX":784,"srcY":0,"destX":336,"destY":576,"width":64,"height":64},{"srcX":784,"srcY":64,"destX":656,"destY":1136,"width":64,"height":64},{"srcX":784,"srcY":128,"destX":128,"destY":896,"width":64,"height":64},{"srcX":784,"srcY":192,"destX":464,"destY":304,"width":64,"height":64},{"srcX":784,"srcY":256,"destX":784,"destY":816,"width":64,"height":64},{"srcX":784,"srcY":320,"destX":272,"destY":576,"width":64,"height":64},{"srcX":784,"srcY":384,"destX":592,"destY":1136,"width":64,"height":64},{"srcX":784,"srcY":448,"destX":64,"destY":896,"width":64,"height":64},{"srcX":784,"srcY":512,"destX":400,"destY":304,"width":64,"height":64},{"srcX":784,"srcY":576,"destX":720,"destY":816,"width":64,"height":64},{"srcX":784,"srcY":640,"destX":192,"destY":576,"width":64,"height":64},{"srcX":784,"srcY":704,"destX":528,"destY":1136,"width":64,"height":64},{"srcX":784,"srcY":768,"destX":0,"destY":944,"width":64,"height":64},{"srcX":784,"srcY":832,"destX":336,"destY":256,"width":64,"height":64},{"srcX":784,"srcY":944,"destX":656,"destY":816,"width":64,"height":64},{"srcX":784,"srcY":1008,"destX":128,"destY":576,"width":64,"height":64},{"srcX":784,"srcY":1072,"destX":448,"destY":1136,"width":64,"height":64},{"srcX":784,"srcY":1136,"destX":784,"destY":496,"width":64,"height":64}],[{"srcX":64,"srcY":192,"destX":64,"destY":192,"width":16,"height":48},{"srcX":0,"srcY":192,"destX":80,"destY":640,"width":64,"height":48},{"srcX":80,"srcY":192,"destX":144,"destY":704,"width":64,"height":48},{"srcX":144,"srcY":192,"destX":208,"destY":768,"width":64,"height":48},{"srcX":208,"srcY":192,"destX":272,"destY":832,"width":64,"height":48},{"srcX":272,"srcY":192,"destX":336,"destY":896,"width":64,"height":48},{"srcX":336,"srcY":192,"destX":400,"destY":960,"width":64,"height":48},{"srcX":400,"srcY":192,"destX":464,"destY":1024,"width":64,"height":48},{"srcX":464,"srcY":192,"destX":528,"destY":1088,"width":64,"height":48},{"srcX":528,"srcY":192,"destX":592,"destY":192,"width":64,"height":48},{"srcX":592,"srcY":192,"destX":656,"destY":256,"width":64,"height":48},{"srcX":656,"srcY":192,"destX":720,"destY":320,"width":64,"height":48},{"srcX":720,"srcY":192,"destX":784,"destY":384,"width":64,"height":48},{"srcX":784,"srcY":192,"destX":0,"destY":0,"width":64,"height":48},{"srcX":64,"srcY":0,"destX":0,"destY":240,"width":16,"height":64},{"srcX":64,"srcY":64,"destX":0,"destY":304,"width":16,"height":64},{"srcX":64,"srcY":128,"destX":0,"destY":368,"width":16,"height":64},{"srcX":64,"srcY":240,"destX":0,"destY":432,"width":16,"height":64},{"srcX":64,"srcY":304,"destX":0,"destY":496,"width":16,"height":64},{"srcX":64,"srcY":368,"destX":0,"destY":560,"width":16,"height":64},{"srcX":64,"srcY":432,"destX":0,"destY":624,"width":16,"height":64},{"srcX":64,"srcY":496,"destX":0,"destY":688,"width":16,"height":64},{"srcX":64,"srcY":560,"destX":0,"destY":752,"width":16,"height":64},{"srcX":64,"srcY":624,"destX":0,"destY":816,"width":16,"height":64},{"srcX":64,"srcY":688,"destX":0,"destY":880,"width":16,"height":64},{"srcX":64,"srcY":752,"destX":0,"destY":944,"width":16,"height":64},{"srcX":64,"srcY":816,"destX":0,"destY":1008,"width":16,"height":64},{"srcX":64,"srcY":880,"destX":0,"destY":1072,"width":16,"height":64},{"srcX":64,"srcY":944,"destX":0,"destY":1136,"width":16,"height":64},{"srcX":64,"srcY":1008,"destX":640,"destY":0,"width":16,"height":64},{"srcX":64,"srcY":1072,"destX":704,"destY":64,"width":16,"height":64},{"srcX":64,"srcY":1136,"destX":768,"destY":128,"width":16,"height":64},{"srcX":0,"srcY":0,"destX":592,"destY":816,"width":64,"height":64},{"srcX":0,"srcY":64,"destX":80,"destY":576,"width":64,"height":64},{"srcX":0,"srcY":128,"destX":400,"destY":1136,"width":64,"height":64},{"srcX":0,"srcY":240,"destX":720,"destY":496,"width":64,"height":64},{"srcX":0,"srcY":304,"destX":208,"destY":256,"width":64,"height":64},{"srcX":0,"srcY":368,"destX":528,"destY":768,"width":64,"height":64},{"srcX":0,"srcY":432,"destX":16,"destY":624,"width":64,"height":64},{"srcX":0,"srcY":496,"destX":336,"destY":1136,"width":64,"height":64},{"srcX":0,"srcY":560,"destX":656,"destY":496,"width":64,"height":64},{"srcX":0,"srcY":624,"destX":144,"destY":256,"width":64,"height":64},{"srcX":0,"srcY":688,"destX":464,"destY":768,"width":64,"height":64},{"srcX":0,"srcY":752,"destX":784,"destY":128,"width":64,"height":64},{"srcX":0,"srcY":816,"destX":272,"destY":1136,"width":64,"height":64},{"srcX":0,"srcY":880,"destX":592,"destY":496,"width":64,"height":64},{"srcX":0,"srcY":944,"destX":80,"destY":256,"width":64,"height":64},{"srcX":0,"srcY":1008,"destX":400,"destY":768,"width":64,"height":64},{"srcX":0,"srcY":1072,"destX":704,"destY":128,"width":64,"height":64},{"srcX":0,"srcY":1136,"destX":208,"destY":1136,"width":64,"height":64},{"srcX":80,"srcY":0,"destX":656,"destY":1136,"width":64,"height":64},{"srcX":80,"srcY":64,"destX":144,"destY":944,"width":64,"height":64},{"srcX":80,"srcY":128,"destX":464,"destY":256,"width":64,"height":64},{"srcX":80,"srcY":240,"destX":784,"destY":816,"width":64,"height":64},{"srcX":80,"srcY":304,"destX":272,"destY":576,"width":64,"height":64},{"srcX":80,"srcY":368,"destX":592,"destY":1136,"width":64,"height":64},{"srcX":80,"srcY":432,"destX":80,"destY":944,"width":64,"height":64},{"srcX":80,"srcY":496,"destX":400,"destY":256,"width":64,"height":64},{"srcX":80,"srcY":560,"destX":720,"destY":816,"width":64,"height":64},{"srcX":80,"srcY":624,"destX":208,"destY":576,"width":64,"height":64},{"srcX":80,"srcY":688,"destX":528,"destY":1136,"width":64,"height":64},{"srcX":80,"srcY":752,"destX":16,"destY":944,"width":64,"height":64},{"srcX":80,"srcY":816,"destX":336,"destY":256,"width":64,"height":64},{"srcX":80,"srcY":880,"destX":656,"destY":816,"width":64,"height":64},{"srcX":80,"srcY":944,"destX":144,"destY":576,"width":64,"height":64},{"srcX":80,"srcY":1008,"destX":464,"destY":1136,"width":64,"height":64},{"srcX":80,"srcY":1072,"destX":784,"destY":496,"width":64,"height":64},{"srcX":80,"srcY":1136,"destX":272,"destY":256,"width":64,"height":64},{"srcX":144,"srcY":0,"destX":720,"destY":256,"width":64,"height":64},{"srcX":144,"srcY":64,"destX":192,"destY":64,"width":64,"height":64},{"srcX":144,"srcY":128,"destX":528,"destY":576,"width":64,"height":64},{"srcX":144,"srcY":240,"destX":16,"destY":432,"width":64,"height":64},{"srcX":144,"srcY":304,"destX":336,"destY":944,"width":64,"height":64},{"srcX":144,"srcY":368,"destX":656,"destY":304,"width":64,"height":64},{"srcX":144,"srcY":432,"destX":128,"destY":64,"width":64,"height":64},{"srcX":144,"srcY":496,"destX":464,"destY":576,"width":64,"height":64},{"srcX":144,"srcY":560,"destX":784,"destY":1136,"width":64,"height":64},{"srcX":144,"srcY":624,"destX":272,"destY":944,"width":64,"height":64},{"srcX":144,"srcY":688,"destX":592,"destY":304,"width":64,"height":64},{"srcX":144,"srcY":752,"destX":64,"destY":64,"width":64,"height":64},{"srcX":144,"srcY":816,"destX":400,"destY":576,"width":64,"height":64},{"srcX":144,"srcY":880,"destX":720,"destY":1136,"width":64,"height":64},{"srcX":144,"srcY":944,"destX":208,"destY":944,"width":64,"height":64},{"srcX":144,"srcY":1008,"destX":528,"destY":256,"width":64,"height":64},{"srcX":144,"srcY":1072,"destX":0,"destY":112,"width":64,"height":64},{"srcX":144,"srcY":1136,"destX":336,"destY":576,"width":64,"height":64},{"srcX":208,"srcY":0,"destX":784,"destY":624,"width":64,"height":64},{"srcX":208,"srcY":64,"destX":272,"destY":384,"width":64,"height":64},{"srcX":208,"srcY":128,"destX":592,"destY":944,"width":64,"height":64},{"srcX":208,"srcY":240,"destX":80,"destY":752,"width":64,"height":64},{"srcX":208,"srcY":304,"destX":384,"destY":64,"width":64,"height":64},{"srcX":208,"srcY":368,"destX":720,"destY":624,"width":64,"height":64},{"srcX":208,"srcY":432,"destX":208,"destY":384,"width":64,"height":64},{"srcX":208,"srcY":496,"destX":528,"destY":896,"width":64,"height":64},{"srcX":208,"srcY":560,"destX":16,"destY":752,"width":64,"height":64},{"srcX":208,"srcY":624,"destX":320,"destY":64,"width":64,"height":64},{"srcX":208,"srcY":688,"destX":656,"destY":624,"width":64,"height":64},{"srcX":208,"srcY":752,"destX":144,"destY":384,"width":64,"height":64},{"srcX":208,"srcY":816,"destX":464,"destY":896,"width":64,"height":64},{"srcX":208,"srcY":880,"destX":784,"destY":256,"width":64,"height":64},{"srcX":208,"srcY":944,"destX":256,"destY":64,"width":64,"height":64},{"srcX":208,"srcY":1008,"destX":592,"destY":624,"width":64,"height":64},{"srcX":208,"srcY":1072,"destX":80,"destY":384,"width":64,"height":64},{"srcX":208,"srcY":1136,"destX":400,"destY":896,"width":64,"height":64},{"srcX":272,"srcY":0,"destX":16,"destY":240,"width":64,"height":64},{"srcX":272,"srcY":64,"destX":336,"destY":704,"width":64,"height":64},{"srcX":272,"srcY":128,"destX":640,"destY":64,"width":64,"height":64},{"srcX":272,"srcY":240,"destX":144,"destY":1072,"width":64,"height":64},{"srcX":272,"srcY":304,"destX":464,"destY":384,"width":64,"height":64},{"srcX":272,"srcY":368,"destX":784,"destY":944,"width":64,"height":64},{"srcX":272,"srcY":432,"destX":272,"destY":704,"width":64,"height":64},{"srcX":272,"srcY":496,"destX":576,"destY":64,"width":64,"height":64},{"srcX":272,"srcY":560,"destX":80,"destY":1072,"width":64,"height":64},{"srcX":272,"srcY":624,"destX":400,"destY":384,"width":64,"height":64},{"srcX":272,"srcY":688,"destX":720,"destY":944,"width":64,"height":64},{"srcX":272,"srcY":752,"destX":208,"destY":704,"width":64,"height":64},{"srcX":272,"srcY":816,"destX":512,"destY":64,"width":64,"height":64},{"srcX":272,"srcY":880,"destX":16,"destY":1072,"width":64,"height":64},{"srcX":272,"srcY":944,"destX":336,"destY":384,"width":64,"height":64},{"srcX":272,"srcY":1008,"destX":656,"destY":944,"width":64,"height":64},{"srcX":272,"srcY":1072,"destX":144,"destY":752,"width":64,"height":64},{"srcX":272,"srcY":1136,"destX":448,"destY":64,"width":64,"height":64},{"srcX":336,"srcY":0,"destX":80,"destY":512,"width":64,"height":64},{"srcX":336,"srcY":64,"destX":400,"destY":1072,"width":64,"height":64},{"srcX":336,"srcY":128,"destX":720,"destY":432,"width":64,"height":64},{"srcX":336,"srcY":240,"destX":208,"destY":192,"width":64,"height":64},{"srcX":336,"srcY":304,"destX":528,"destY":704,"width":64,"height":64},{"srcX":336,"srcY":368,"destX":16,"destY":560,"width":64,"height":64},{"srcX":336,"srcY":432,"destX":336,"destY":1072,"width":64,"height":64},{"srcX":336,"srcY":496,"destX":656,"destY":432,"width":64,"height":64},{"srcX":336,"srcY":560,"destX":144,"destY":192,"width":64,"height":64},{"srcX":336,"srcY":624,"destX":464,"destY":704,"width":64,"height":64},{"srcX":336,"srcY":688,"destX":784,"destY":64,"width":64,"height":64},{"srcX":336,"srcY":752,"destX":272,"destY":1072,"width":64,"height":64},{"srcX":336,"srcY":816,"destX":592,"destY":432,"width":64,"height":64},{"srcX":336,"srcY":880,"destX":80,"destY":192,"width":64,"height":64},{"srcX":336,"srcY":944,"destX":400,"destY":704,"width":64,"height":64},{"srcX":336,"srcY":1008,"destX":720,"destY":64,"width":64,"height":64},{"srcX":336,"srcY":1072,"destX":208,"destY":1072,"width":64,"height":64},{"srcX":336,"srcY":1136,"destX":528,"destY":384,"width":64,"height":64},{"srcX":400,"srcY":0,"destX":144,"destY":880,"width":64,"height":64},{"srcX":400,"srcY":64,"destX":464,"destY":192,"width":64,"height":64},{"srcX":400,"srcY":128,"destX":784,"destY":752,"width":64,"height":64},{"srcX":400,"srcY":240,"destX":272,"destY":512,"width":64,"height":64},{"srcX":400,"srcY":304,"destX":592,"destY":1072,"width":64,"height":64},{"srcX":400,"srcY":368,"destX":80,"destY":880,"width":64,"height":64},{"srcX":400,"srcY":432,"destX":400,"destY":192,"width":64,"height":64},{"srcX":400,"srcY":496,"destX":720,"destY":752,"width":64,"height":64},{"srcX":400,"srcY":560,"destX":208,"destY":512,"width":64,"height":64},{"srcX":400,"srcY":624,"destX":528,"destY":1024,"width":64,"height":64},{"srcX":400,"srcY":688,"destX":16,"destY":880,"width":64,"height":64},{"srcX":400,"srcY":752,"destX":336,"destY":192,"width":64,"height":64},{"srcX":400,"srcY":816,"destX":656,"destY":752,"width":64,"height":64},{"srcX":400,"srcY":880,"destX":144,"destY":512,"width":64,"height":64},{"srcX":400,"srcY":944,"destX":464,"destY":1072,"width":64,"height":64},{"srcX":400,"srcY":1008,"destX":784,"destY":432,"width":64,"height":64},{"srcX":400,"srcY":1072,"destX":272,"destY":192,"width":64,"height":64},{"srcX":400,"srcY":1136,"destX":592,"destY":752,"width":64,"height":64},{"srcX":464,"srcY":0,"destX":192,"destY":0,"width":64,"height":64},{"srcX":464,"srcY":64,"destX":528,"destY":512,"width":64,"height":64},{"srcX":464,"srcY":128,"destX":16,"destY":368,"width":64,"height":64},{"srcX":464,"srcY":240,"destX":336,"destY":832,"width":64,"height":64},{"srcX":464,"srcY":304,"destX":656,"destY":192,"width":64,"height":64},{"srcX":464,"srcY":368,"destX":128,"destY":0,"width":64,"height":64},{"srcX":464,"srcY":432,"destX":464,"destY":512,"width":64,"height":64},{"srcX":464,"srcY":496,"destX":784,"destY":1072,"width":64,"height":64},{"srcX":464,"srcY":560,"destX":272,"destY":880,"width":64,"height":64},{"srcX":464,"srcY":624,"destX":592,"destY":240,"width":64,"height":64},{"srcX":464,"srcY":688,"destX":64,"destY":0,"width":64,"height":64},{"srcX":464,"srcY":752,"destX":400,"destY":512,"width":64,"height":64},{"srcX":464,"srcY":816,"destX":720,"destY":1072,"width":64,"height":64},{"srcX":464,"srcY":880,"destX":208,"destY":880,"width":64,"height":64},{"srcX":464,"srcY":944,"destX":528,"destY":192,"width":64,"height":64},{"srcX":464,"srcY":1008,"destX":0,"destY":48,"width":64,"height":64},{"srcX":464,"srcY":1072,"destX":336,"destY":512,"width":64,"height":64},{"srcX":464,"srcY":1136,"destX":656,"destY":1072,"width":64,"height":64},{"srcX":528,"srcY":0,"destX":272,"destY":320,"width":64,"height":64},{"srcX":528,"srcY":64,"destX":592,"destY":880,"width":64,"height":64},{"srcX":528,"srcY":128,"destX":80,"destY":688,"width":64,"height":64},{"srcX":528,"srcY":240,"destX":384,"destY":0,"width":64,"height":64},{"srcX":528,"srcY":304,"destX":720,"destY":560,"width":64,"height":64},{"srcX":528,"srcY":368,"destX":208,"destY":320,"width":64,"height":64},{"srcX":528,"srcY":432,"destX":528,"destY":832,"width":64,"height":64},{"srcX":528,"srcY":496,"destX":16,"destY":688,"width":64,"height":64},{"srcX":528,"srcY":560,"destX":320,"destY":0,"width":64,"height":64},{"srcX":528,"srcY":624,"destX":656,"destY":560,"width":64,"height":64},{"srcX":528,"srcY":688,"destX":144,"destY":320,"width":64,"height":64},{"srcX":528,"srcY":752,"destX":464,"destY":832,"width":64,"height":64},{"srcX":528,"srcY":816,"destX":784,"destY":192,"width":64,"height":64},{"srcX":528,"srcY":880,"destX":256,"destY":0,"width":64,"height":64},{"srcX":528,"srcY":944,"destX":592,"destY":560,"width":64,"height":64},{"srcX":528,"srcY":1008,"destX":80,"destY":320,"width":64,"height":64},{"srcX":528,"srcY":1072,"destX":400,"destY":832,"width":64,"height":64},{"srcX":528,"srcY":1136,"destX":720,"destY":192,"width":64,"height":64},{"srcX":592,"srcY":0,"destX":336,"destY":640,"width":64,"height":64},{"srcX":592,"srcY":64,"destX":656,"destY":0,"width":64,"height":64},{"srcX":592,"srcY":128,"destX":144,"destY":1008,"width":64,"height":64},{"srcX":592,"srcY":240,"destX":464,"destY":320,"width":64,"height":64},{"srcX":592,"srcY":304,"destX":784,"destY":880,"width":64,"height":64},{"srcX":592,"srcY":368,"destX":272,"destY":640,"width":64,"height":64},{"srcX":592,"srcY":432,"destX":576,"destY":0,"width":64,"height":64},{"srcX":592,"srcY":496,"destX":80,"destY":1008,"width":64,"height":64},{"srcX":592,"srcY":560,"destX":400,"destY":320,"width":64,"height":64},{"srcX":592,"srcY":624,"destX":720,"destY":880,"width":64,"height":64},{"srcX":592,"srcY":688,"destX":208,"destY":640,"width":64,"height":64},{"srcX":592,"srcY":752,"destX":512,"destY":0,"width":64,"height":64},{"srcX":592,"srcY":816,"destX":16,"destY":1008,"width":64,"height":64},{"srcX":592,"srcY":880,"destX":336,"destY":320,"width":64,"height":64},{"srcX":592,"srcY":944,"destX":656,"destY":880,"width":64,"height":64},{"srcX":592,"srcY":1008,"destX":144,"destY":640,"width":64,"height":64},{"srcX":592,"srcY":1072,"destX":448,"destY":0,"width":64,"height":64},{"srcX":592,"srcY":1136,"destX":784,"destY":560,"width":64,"height":64},{"srcX":656,"srcY":0,"destX":400,"destY":1008,"width":64,"height":64},{"srcX":656,"srcY":64,"destX":720,"destY":368,"width":64,"height":64},{"srcX":656,"srcY":128,"destX":192,"destY":128,"width":64,"height":64},{"srcX":656,"srcY":240,"destX":528,"destY":640,"width":64,"height":64},{"srcX":656,"srcY":304,"destX":16,"destY":496,"width":64,"height":64},{"srcX":656,"srcY":368,"destX":336,"destY":1008,"width":64,"height":64},{"srcX":656,"srcY":432,"destX":656,"destY":368,"width":64,"height":64},{"srcX":656,"srcY":496,"destX":128,"destY":128,"width":64,"height":64},{"srcX":656,"srcY":560,"destX":464,"destY":640,"width":64,"height":64},{"srcX":656,"srcY":624,"destX":784,"destY":0,"width":64,"height":64},{"srcX":656,"srcY":688,"destX":272,"destY":1008,"width":64,"height":64},{"srcX":656,"srcY":752,"destX":592,"destY":368,"width":64,"height":64},{"srcX":656,"srcY":816,"destX":64,"destY":128,"width":64,"height":64},{"srcX":656,"srcY":880,"destX":400,"destY":640,"width":64,"height":64},{"srcX":656,"srcY":944,"destX":720,"destY":0,"width":64,"height":64},{"srcX":656,"srcY":1008,"destX":208,"destY":1008,"width":64,"height":64},{"srcX":656,"srcY":1072,"destX":528,"destY":320,"width":64,"height":64},{"srcX":656,"srcY":1136,"destX":0,"destY":176,"width":64,"height":64},{"srcX":720,"srcY":0,"destX":448,"destY":128,"width":64,"height":64},{"srcX":720,"srcY":64,"destX":784,"destY":688,"width":64,"height":64},{"srcX":720,"srcY":128,"destX":272,"destY":448,"width":64,"height":64},{"srcX":720,"srcY":240,"destX":592,"destY":1008,"width":64,"height":64},{"srcX":720,"srcY":304,"destX":80,"destY":816,"width":64,"height":64},{"srcX":720,"srcY":368,"destX":384,"destY":128,"width":64,"height":64},{"srcX":720,"srcY":432,"destX":720,"destY":688,"width":64,"height":64},{"srcX":720,"srcY":496,"destX":208,"destY":448,"width":64,"height":64},{"srcX":720,"srcY":560,"destX":528,"destY":960,"width":64,"height":64},{"srcX":720,"srcY":624,"destX":16,"destY":816,"width":64,"height":64},{"srcX":720,"srcY":688,"destX":320,"destY":128,"width":64,"height":64},{"srcX":720,"srcY":752,"destX":656,"destY":688,"width":64,"height":64},{"srcX":720,"srcY":816,"destX":144,"destY":448,"width":64,"height":64},{"srcX":720,"srcY":880,"destX":464,"destY":960,"width":64,"height":64},{"srcX":720,"srcY":944,"destX":784,"destY":320,"width":64,"height":64},{"srcX":720,"srcY":1008,"destX":256,"destY":128,"width":64,"height":64},{"srcX":720,"srcY":1072,"destX":592,"destY":688,"width":64,"height":64},{"srcX":720,"srcY":1136,"destX":80,"destY":448,"width":64,"height":64},{"srcX":784,"srcY":0,"destX":528,"destY":448,"width":64,"height":64},{"srcX":784,"srcY":64,"destX":16,"destY":304,"width":64,"height":64},{"srcX":784,"srcY":128,"destX":336,"destY":768,"width":64,"height":64},{"srcX":784,"srcY":240,"destX":640,"destY":128,"width":64,"height":64},{"srcX":784,"srcY":304,"destX":144,"destY":1136,"width":64,"height":64},{"srcX":784,"srcY":368,"destX":464,"destY":448,"width":64,"height":64},{"srcX":784,"srcY":432,"destX":784,"destY":1008,"width":64,"height":64},{"srcX":784,"srcY":496,"destX":272,"destY":768,"width":64,"height":64},{"srcX":784,"srcY":560,"destX":576,"destY":128,"width":64,"height":64},{"srcX":784,"srcY":624,"destX":80,"destY":1136,"width":64,"height":64},{"srcX":784,"srcY":688,"destX":400,"destY":448,"width":64,"height":64},{"srcX":784,"srcY":752,"destX":720,"destY":1008,"width":64,"height":64},{"srcX":784,"srcY":816,"destX":208,"destY":816,"width":64,"height":64},{"srcX":784,"srcY":880,"destX":512,"destY":128,"width":64,"height":64},{"srcX":784,"srcY":944,"destX":16,"destY":1136,"width":64,"height":64},{"srcX":784,"srcY":1008,"destX":336,"destY":448,"width":64,"height":64},{"srcX":784,"srcY":1072,"destX":656,"destY":1008,"width":64,"height":64},{"srcX":784,"srcY":1136,"destX":144,"destY":816,"width":64,"height":64}],[{"srcX":640,"srcY":640,"destX":640,"destY":640,"width":16,"height":48},{"srcX":0,"srcY":640,"destX":656,"destY":128,"width":64,"height":48},{"srcX":64,"srcY":640,"destX":720,"destY":192,"width":64,"height":48},{"srcX":128,"srcY":640,"destX":784,"destY":256,"width":64,"height":48},{"srcX":192,"srcY":640,"destX":0,"destY":768,"width":64,"height":48},{"srcX":256,"srcY":640,"destX":64,"destY":832,"width":64,"height":48},{"srcX":320,"srcY":640,"destX":128,"destY":896,"width":64,"height":48},{"srcX":384,"srcY":640,"destX":192,"destY":960,"width":64,"height":48},{"srcX":448,"srcY":640,"destX":256,"destY":1024,"width":64,"height":48},{"srcX":512,"srcY":640,"destX":320,"destY":1088,"width":64,"height":48},{"srcX":576,"srcY":640,"destX":384,"destY":640,"width":64,"height":48},{"srcX":656,"srcY":640,"destX":448,"destY":704,"width":64,"height":48},{"srcX":720,"srcY":640,"destX":512,"destY":768,"width":64,"height":48},{"srcX":784,"srcY":640,"destX":576,"destY":832,"width":64,"height":48},{"srcX":640,"srcY":0,"destX":192,"destY":256,"width":16,"height":64},{"srcX":640,"srcY":64,"destX":256,"destY":320,"width":16,"height":64},{"srcX":640,"srcY":128,"destX":320,"destY":384,"width":16,"height":64},{"srcX":640,"srcY":192,"destX":384,"destY":448,"width":16,"height":64},{"srcX":640,"srcY":256,"destX":448,"destY":512,"width":16,"height":64},{"srcX":640,"srcY":320,"destX":512,"destY":576,"width":16,"height":64},{"srcX":640,"srcY":384,"destX":768,"destY":688,"width":16,"height":64},{"srcX":640,"srcY":448,"destX":640,"destY":752,"width":16,"height":64},{"srcX":640,"srcY":512,"destX":704,"destY":816,"width":16,"height":64},{"srcX":640,"srcY":576,"destX":768,"destY":880,"width":16,"height":64},{"srcX":640,"srcY":688,"destX":640,"destY":944,"width":16,"height":64},{"srcX":640,"srcY":752,"destX":704,"destY":1008,"width":16,"height":64},{"srcX":640,"srcY":816,"destX":768,"destY":1072,"width":16,"height":64},{"srcX":640,"srcY":880,"destX":640,"destY":1136,"width":16,"height":64},{"srcX":640,"srcY":944,"destX":576,"destY":0,"width":16,"height":64},{"srcX":640,"srcY":1008,"destX":0,"destY":64,"width":16,"height":64},{"srcX":640,"srcY":1072,"destX":64,"destY":128,"width":16,"height":64},{"srcX":640,"srcY":1136,"destX":128,"destY":192,"width":16,"height":64},{"srcX":0,"srcY":0,"destX":784,"destY":688,"width":64,"height":64},{"srcX":0,"srcY":64,"destX":256,"destY":448,"width":64,"height":64},{"srcX":0,"srcY":128,"destX":576,"destY":1008,"width":64,"height":64},{"srcX":0,"srcY":192,"destX":64,"destY":768,"width":64,"height":64},{"srcX":0,"srcY":256,"destX":400,"destY":128,"width":64,"height":64},{"srcX":0,"srcY":320,"destX":704,"destY":688,"width":64,"height":64},{"srcX":0,"srcY":384,"destX":192,"destY":448,"width":64,"height":64},{"srcX":0,"srcY":448,"destX":512,"destY":1008,"width":64,"height":64},{"srcX":0,"srcY":512,"destX":0,"destY":816,"width":64,"height":64},{"srcX":0,"srcY":576,"destX":336,"destY":128,"width":64,"height":64},{"srcX":0,"srcY":688,"destX":640,"destY":688,"width":64,"height":64},{"srcX":0,"srcY":752,"destX":128,"destY":448,"width":64,"height":64},{"srcX":0,"srcY":816,"destX":448,"destY":1008,"width":64,"height":64},{"srcX":0,"srcY":880,"destX":784,"destY":368,"width":64,"height":64},{"srcX":0,"srcY":944,"destX":272,"destY":128,"width":64,"height":64},{"srcX":0,"srcY":1008,"destX":576,"destY":640,"width":64,"height":64},{"srcX":0,"srcY":1072,"destX":64,"destY":448,"width":64,"height":64},{"srcX":0,"srcY":1136,"destX":384,"destY":1008,"width":64,"height":64},{"srcX":64,"srcY":0,"destX":0,"destY":256,"width":64,"height":64},{"srcX":64,"srcY":64,"destX":320,"destY":768,"width":64,"height":64},{"srcX":64,"srcY":128,"destX":656,"destY":176,"width":64,"height":64},{"srcX":64,"srcY":192,"destX":128,"destY":1136,"width":64,"height":64},{"srcX":64,"srcY":256,"destX":464,"destY":448,"width":64,"height":64},{"srcX":64,"srcY":320,"destX":784,"destY":1008,"width":64,"height":64},{"srcX":64,"srcY":384,"destX":256,"destY":768,"width":64,"height":64},{"srcX":64,"srcY":448,"destX":592,"destY":128,"width":64,"height":64},{"srcX":64,"srcY":512,"destX":64,"destY":1136,"width":64,"height":64},{"srcX":64,"srcY":576,"destX":400,"destY":448,"width":64,"height":64},{"srcX":64,"srcY":688,"destX":720,"destY":1008,"width":64,"height":64},{"srcX":64,"srcY":752,"destX":192,"destY":768,"width":64,"height":64},{"srcX":64,"srcY":816,"destX":528,"destY":128,"width":64,"height":64},{"srcX":64,"srcY":880,"destX":0,"destY":1136,"width":64,"height":64},{"srcX":64,"srcY":944,"destX":320,"destY":448,"width":64,"height":64},{"srcX":64,"srcY":1008,"destX":640,"destY":1008,"width":64,"height":64},{"srcX":64,"srcY":1072,"destX":128,"destY":768,"width":64,"height":64},{"srcX":64,"srcY":1136,"destX":464,"destY":128,"width":64,"height":64},{"srcX":128,"srcY":0,"destX":64,"destY":576,"width":64,"height":64},{"srcX":128,"srcY":64,"destX":384,"destY":1136,"width":64,"height":64},{"srcX":128,"srcY":128,"destX":720,"destY":496,"width":64,"height":64},{"srcX":128,"srcY":192,"destX":208,"destY":256,"width":64,"height":64},{"srcX":128,"srcY":256,"destX":512,"destY":816,"width":64,"height":64},{"srcX":128,"srcY":320,"destX":0,"destY":576,"width":64,"height":64},{"srcX":128,"srcY":384,"destX":320,"destY":1136,"width":64,"height":64},{"srcX":128,"srcY":448,"destX":656,"destY":496,"width":64,"height":64},{"srcX":128,"srcY":512,"destX":128,"destY":256,"width":64,"height":64},{"srcX":128,"srcY":576,"destX":448,"destY":816,"width":64,"height":64},{"srcX":128,"srcY":688,"destX":784,"destY":128,"width":64,"height":64},{"srcX":128,"srcY":752,"destX":256,"destY":1136,"width":64,"height":64},{"srcX":128,"srcY":816,"destX":592,"destY":448,"width":64,"height":64},{"srcX":128,"srcY":880,"destX":64,"destY":256,"width":64,"height":64},{"srcX":128,"srcY":944,"destX":384,"destY":816,"width":64,"height":64},{"srcX":128,"srcY":1008,"destX":720,"destY":128,"width":64,"height":64},{"srcX":128,"srcY":1072,"destX":192,"destY":1136,"width":64,"height":64},{"srcX":128,"srcY":1136,"destX":528,"destY":448,"width":64,"height":64},{"srcX":192,"srcY":0,"destX":128,"destY":944,"width":64,"height":64},{"srcX":192,"srcY":64,"destX":464,"destY":256,"width":64,"height":64},{"srcX":192,"srcY":128,"destX":784,"destY":816,"width":64,"height":64},{"srcX":192,"srcY":192,"destX":256,"destY":576,"width":64,"height":64},{"srcX":192,"srcY":256,"destX":576,"destY":1136,"width":64,"height":64},{"srcX":192,"srcY":320,"destX":64,"destY":944,"width":64,"height":64},{"srcX":192,"srcY":384,"destX":400,"destY":256,"width":64,"height":64},{"srcX":192,"srcY":448,"destX":720,"destY":816,"width":64,"height":64},{"srcX":192,"srcY":512,"destX":192,"destY":576,"width":64,"height":64},{"srcX":192,"srcY":576,"destX":512,"destY":1136,"width":64,"height":64},{"srcX":192,"srcY":688,"destX":0,"destY":944,"width":64,"height":64},{"srcX":192,"srcY":752,"destX":336,"destY":256,"width":64,"height":64},{"srcX":192,"srcY":816,"destX":640,"destY":816,"width":64,"height":64},{"srcX":192,"srcY":880,"destX":128,"destY":576,"width":64,"height":64},{"srcX":192,"srcY":944,"destX":448,"destY":1136,"width":64,"height":64},{"srcX":192,"srcY":1008,"destX":784,"destY":496,"width":64,"height":64},{"srcX":192,"srcY":1072,"destX":272,"destY":256,"width":64,"height":64},{"srcX":192,"srcY":1136,"destX":576,"destY":768,"width":64,"height":64},{"srcX":256,"srcY":0,"destX":208,"destY":64,"width":64,"height":64},{"srcX":256,"srcY":64,"destX":528,"destY":576,"width":64,"height":64},{"srcX":256,"srcY":128,"destX":0,"destY":384,"width":64,"height":64},{"srcX":256,"srcY":192,"destX":320,"destY":896,"width":64,"height":64},{"srcX":256,"srcY":256,"destX":656,"destY":304,"width":64,"height":64},{"srcX":256,"srcY":320,"destX":144,"destY":64,"width":64,"height":64},{"srcX":256,"srcY":384,"destX":448,"destY":576,"width":64,"height":64},{"srcX":256,"srcY":448,"destX":784,"destY":1136,"width":64,"height":64},{"srcX":256,"srcY":512,"destX":256,"destY":896,"width":64,"height":64},{"srcX":256,"srcY":576,"destX":592,"destY":256,"width":64,"height":64},{"srcX":256,"srcY":688,"destX":80,"destY":64,"width":64,"height":64},{"srcX":256,"srcY":752,"destX":384,"destY":576,"width":64,"height":64},{"srcX":256,"srcY":816,"destX":720,"destY":1136,"width":64,"height":64},{"srcX":256,"srcY":880,"destX":192,"destY":896,"width":64,"height":64},{"srcX":256,"srcY":944,"destX":528,"destY":256,"width":64,"height":64},{"srcX":256,"srcY":1008,"destX":16,"destY":64,"width":64,"height":64},{"srcX":256,"srcY":1072,"destX":320,"destY":576,"width":64,"height":64},{"srcX":256,"srcY":1136,"destX":656,"destY":1136,"width":64,"height":64},{"srcX":320,"srcY":0,"destX":256,"destY":384,"width":64,"height":64},{"srcX":320,"srcY":64,"destX":576,"destY":944,"width":64,"height":64},{"srcX":320,"srcY":128,"destX":64,"destY":704,"width":64,"height":64},{"srcX":320,"srcY":192,"destX":400,"destY":64,"width":64,"height":64},{"srcX":320,"srcY":256,"destX":720,"destY":624,"width":64,"height":64},{"srcX":320,"srcY":320,"destX":192,"destY":384,"width":64,"height":64},{"srcX":320,"srcY":384,"destX":512,"destY":944,"width":64,"height":64},{"srcX":320,"srcY":448,"destX":0,"destY":704,"width":64,"height":64},{"srcX":320,"srcY":512,"destX":336,"destY":64,"width":64,"height":64},{"srcX":320,"srcY":576,"destX":656,"destY":624,"width":64,"height":64},{"srcX":320,"srcY":688,"destX":128,"destY":384,"width":64,"height":64},{"srcX":320,"srcY":752,"destX":448,"destY":944,"width":64,"height":64},{"srcX":320,"srcY":816,"destX":784,"destY":304,"width":64,"height":64},{"srcX":320,"srcY":880,"destX":272,"destY":64,"width":64,"height":64},{"srcX":320,"srcY":944,"destX":592,"destY":576,"width":64,"height":64},{"srcX":320,"srcY":1008,"destX":64,"destY":384,"width":64,"height":64},{"srcX":320,"srcY":1072,"destX":384,"destY":944,"width":64,"height":64},{"srcX":320,"srcY":1136,"destX":720,"destY":304,"width":64,"height":64},{"srcX":384,"srcY":0,"destX":320,"destY":704,"width":64,"height":64},{"srcX":384,"srcY":64,"destX":656,"destY":64,"width":64,"height":64},{"srcX":384,"srcY":128,"destX":128,"destY":1072,"width":64,"height":64},{"srcX":384,"srcY":192,"destX":464,"destY":384,"width":64,"height":64},{"srcX":384,"srcY":256,"destX":784,"destY":944,"width":64,"height":64},{"srcX":384,"srcY":320,"destX":256,"destY":704,"width":64,"height":64},{"srcX":384,"srcY":384,"destX":592,"destY":64,"width":64,"height":64},{"srcX":384,"srcY":448,"destX":64,"destY":1072,"width":64,"height":64},{"srcX":384,"srcY":512,"destX":400,"destY":384,"width":64,"height":64},{"srcX":384,"srcY":576,"destX":720,"destY":944,"width":64,"height":64},{"srcX":384,"srcY":688,"destX":192,"destY":704,"width":64,"height":64},{"srcX":384,"srcY":752,"destX":528,"destY":64,"width":64,"height":64},{"srcX":384,"srcY":816,"destX":0,"destY":1072,"width":64,"height":64},{"srcX":384,"srcY":880,"destX":336,"destY":384,"width":64,"height":64},{"srcX":384,"srcY":944,"destX":656,"destY":944,"width":64,"height":64},{"srcX":384,"srcY":1008,"destX":128,"destY":704,"width":64,"height":64},{"srcX":384,"srcY":1072,"destX":464,"destY":64,"width":64,"height":64},{"srcX":384,"srcY":1136,"destX":784,"destY":624,"width":64,"height":64},{"srcX":448,"srcY":0,"destX":384,"destY":1072,"width":64,"height":64},{"srcX":448,"srcY":64,"destX":720,"destY":432,"width":64,"height":64},{"srcX":448,"srcY":128,"destX":208,"destY":192,"width":64,"height":64},{"srcX":448,"srcY":192,"destX":512,"destY":704,"width":64,"height":64},{"srcX":448,"srcY":256,"destX":0,"destY":512,"width":64,"height":64},{"srcX":448,"srcY":320,"destX":320,"destY":1024,"width":64,"height":64},{"srcX":448,"srcY":384,"destX":656,"destY":432,"width":64,"height":64},{"srcX":448,"srcY":448,"destX":144,"destY":192,"width":64,"height":64},{"srcX":448,"srcY":512,"destX":448,"destY":752,"width":64,"height":64},{"srcX":448,"srcY":576,"destX":784,"destY":64,"width":64,"height":64},{"srcX":448,"srcY":688,"destX":256,"destY":1072,"width":64,"height":64},{"srcX":448,"srcY":752,"destX":592,"destY":384,"width":64,"height":64},{"srcX":448,"srcY":816,"destX":64,"destY":192,"width":64,"height":64},{"srcX":448,"srcY":880,"destX":384,"destY":752,"width":64,"height":64},{"srcX":448,"srcY":944,"destX":720,"destY":64,"width":64,"height":64},{"srcX":448,"srcY":1008,"destX":192,"destY":1072,"width":64,"height":64},{"srcX":448,"srcY":1072,"destX":528,"destY":384,"width":64,"height":64},{"srcX":448,"srcY":1136,"destX":0,"destY":192,"width":64,"height":64},{"srcX":512,"srcY":0,"destX":464,"destY":192,"width":64,"height":64},{"srcX":512,"srcY":64,"destX":784,"destY":752,"width":64,"height":64},{"srcX":512,"srcY":128,"destX":256,"destY":512,"width":64,"height":64},{"srcX":512,"srcY":192,"destX":576,"destY":1072,"width":64,"height":64},{"srcX":512,"srcY":256,"destX":64,"destY":880,"width":64,"height":64},{"srcX":512,"srcY":320,"destX":400,"destY":192,"width":64,"height":64},{"srcX":512,"srcY":384,"destX":720,"destY":752,"width":64,"height":64},{"srcX":512,"srcY":448,"destX":192,"destY":512,"width":64,"height":64},{"srcX":512,"srcY":512,"destX":512,"destY":1072,"width":64,"height":64},{"srcX":512,"srcY":576,"destX":0,"destY":880,"width":64,"height":64},{"srcX":512,"srcY":688,"destX":336,"destY":192,"width":64,"height":64},{"srcX":512,"srcY":752,"destX":656,"destY":752,"width":64,"height":64},{"srcX":512,"srcY":816,"destX":128,"destY":512,"width":64,"height":64},{"srcX":512,"srcY":880,"destX":448,"destY":1072,"width":64,"height":64},{"srcX":512,"srcY":944,"destX":784,"destY":432,"width":64,"height":64},{"srcX":512,"srcY":1008,"destX":272,"destY":192,"width":64,"height":64},{"srcX":512,"srcY":1072,"destX":576,"destY":704,"width":64,"height":64},{"srcX":512,"srcY":1136,"destX":64,"destY":512,"width":64,"height":64},{"srcX":576,"srcY":0,"destX":528,"destY":512,"width":64,"height":64},{"srcX":576,"srcY":64,"destX":0,"destY":320,"width":64,"height":64},{"srcX":576,"srcY":128,"destX":320,"destY":832,"width":64,"height":64},{"srcX":576,"srcY":192,"destX":656,"destY":240,"width":64,"height":64},{"srcX":576,"srcY":256,"destX":128,"destY":0,"width":64,"height":64},{"srcX":576,"srcY":320,"destX":464,"destY":512,"width":64,"height":64},{"srcX":576,"srcY":384,"destX":784,"destY":1072,"width":64,"height":64},{"srcX":576,"srcY":448,"destX":256,"destY":832,"width":64,"height":64},{"srcX":576,"srcY":512,"destX":592,"destY":192,"width":64,"height":64},{"srcX":576,"srcY":576,"destX":64,"destY":0,"width":64,"height":64},{"srcX":576,"srcY":688,"destX":384,"destY":512,"width":64,"height":64},{"srcX":576,"srcY":752,"destX":704,"destY":1072,"width":64,"height":64},{"srcX":576,"srcY":816,"destX":192,"destY":832,"width":64,"height":64},{"srcX":576,"srcY":880,"destX":528,"destY":192,"width":64,"height":64},{"srcX":576,"srcY":944,"destX":0,"destY":0,"width":64,"height":64},{"srcX":576,"srcY":1008,"destX":320,"destY":512,"width":64,"height":64},{"srcX":576,"srcY":1072,"destX":640,"destY":1072,"width":64,"height":64},{"srcX":576,"srcY":1136,"destX":128,"destY":832,"width":64,"height":64},{"srcX":656,"srcY":0,"destX":576,"destY":880,"width":64,"height":64},{"srcX":656,"srcY":64,"destX":64,"destY":640,"width":64,"height":64},{"srcX":656,"srcY":128,"destX":384,"destY":0,"width":64,"height":64},{"srcX":656,"srcY":192,"destX":720,"destY":560,"width":64,"height":64},{"srcX":656,"srcY":256,"destX":192,"destY":320,"width":64,"height":64},{"srcX":656,"srcY":320,"destX":512,"destY":880,"width":64,"height":64},{"srcX":656,"srcY":384,"destX":0,"destY":640,"width":64,"height":64},{"srcX":656,"srcY":448,"destX":320,"destY":0,"width":64,"height":64},{"srcX":656,"srcY":512,"destX":656,"destY":560,"width":64,"height":64},{"srcX":656,"srcY":576,"destX":128,"destY":320,"width":64,"height":64},{"srcX":656,"srcY":688,"destX":448,"destY":880,"width":64,"height":64},{"srcX":656,"srcY":752,"destX":784,"destY":192,"width":64,"height":64},{"srcX":656,"srcY":816,"destX":256,"destY":0,"width":64,"height":64},{"srcX":656,"srcY":880,"destX":592,"destY":512,"width":64,"height":64},{"srcX":656,"srcY":944,"destX":64,"destY":320,"width":64,"height":64},{"srcX":656,"srcY":1008,"destX":384,"destY":880,"width":64,"height":64},{"srcX":656,"srcY":1072,"destX":720,"destY":240,"width":64,"height":64},{"srcX":656,"srcY":1136,"destX":192,"destY":0,"width":64,"height":64},{"srcX":720,"srcY":0,"destX":656,"destY":0,"width":64,"height":64},{"srcX":720,"srcY":64,"destX":128,"destY":1008,"width":64,"height":64},{"srcX":720,"srcY":128,"destX":464,"destY":320,"width":64,"height":64},{"srcX":720,"srcY":192,"destX":784,"destY":880,"width":64,"height":64},{"srcX":720,"srcY":256,"destX":256,"destY":640,"width":64,"height":64},{"srcX":720,"srcY":320,"destX":592,"destY":0,"width":64,"height":64},{"srcX":720,"srcY":384,"destX":64,"destY":1008,"width":64,"height":64},{"srcX":720,"srcY":448,"destX":400,"destY":320,"width":64,"height":64},{"srcX":720,"srcY":512,"destX":704,"destY":880,"width":64,"height":64},{"srcX":720,"srcY":576,"destX":192,"destY":640,"width":64,"height":64},{"srcX":720,"srcY":688,"destX":512,"destY":0,"width":64,"height":64},{"srcX":720,"srcY":752,"destX":0,"destY":1008,"width":64,"height":64},{"srcX":720,"srcY":816,"destX":336,"destY":320,"width":64,"height":64},{"srcX":720,"srcY":880,"destX":640,"destY":880,"width":64,"height":64},{"srcX":720,"srcY":944,"destX":128,"destY":640,"width":64,"height":64},{"srcX":720,"srcY":1008,"destX":448,"destY":0,"width":64,"height":64},{"srcX":720,"srcY":1072,"destX":784,"destY":560,"width":64,"height":64},{"srcX":720,"srcY":1136,"destX":272,"destY":320,"width":64,"height":64},{"srcX":784,"srcY":0,"destX":720,"destY":368,"width":64,"height":64},{"srcX":784,"srcY":64,"destX":208,"destY":128,"width":64,"height":64},{"srcX":784,"srcY":128,"destX":512,"destY":640,"width":64,"height":64},{"srcX":784,"srcY":192,"destX":0,"destY":448,"width":64,"height":64},{"srcX":784,"srcY":256,"destX":320,"destY":960,"width":64,"height":64},{"srcX":784,"srcY":320,"destX":656,"destY":368,"width":64,"height":64},{"srcX":784,"srcY":384,"destX":144,"destY":128,"width":64,"height":64},{"srcX":784,"srcY":448,"destX":448,"destY":640,"width":64,"height":64},{"srcX":784,"srcY":512,"destX":784,"destY":0,"width":64,"height":64},{"srcX":784,"srcY":576,"destX":256,"destY":960,"width":64,"height":64},{"srcX":784,"srcY":688,"destX":592,"destY":320,"width":64,"height":64},{"srcX":784,"srcY":752,"destX":80,"destY":128,"width":64,"height":64},{"srcX":784,"srcY":816,"destX":384,"destY":688,"width":64,"height":64},{"srcX":784,"srcY":880,"destX":720,"destY":0,"width":64,"height":64},{"srcX":784,"srcY":944,"destX":192,"destY":1008,"width":64,"height":64},{"srcX":784,"srcY":1008,"destX":528,"destY":320,"width":64,"height":64},{"srcX":784,"srcY":1072,"destX":0,"destY":128,"width":64,"height":64},{"srcX":784,"srcY":1136,"destX":320,"destY":640,"width":64,"height":64}],[{"srcX":384,"srcY":1088,"destX":384,"destY":1088,"width":16,"height":48},{"srcX":0,"srcY":1088,"destX":400,"destY":1088,"width":64,"height":48},{"srcX":64,"srcY":1088,"destX":464,"destY":1088,"width":64,"height":48},{"srcX":128,"srcY":1088,"destX":528,"destY":1088,"width":64,"height":48},{"srcX":192,"srcY":1088,"destX":592,"destY":1088,"width":64,"height":48},{"srcX":256,"srcY":1088,"destX":656,"destY":1088,"width":64,"height":48},{"srcX":320,"srcY":1088,"destX":720,"destY":1088,"width":64,"height":48},{"srcX":400,"srcY":1088,"destX":784,"destY":1088,"width":64,"height":48},{"srcX":464,"srcY":1088,"destX":0,"destY":640,"width":64,"height":48},{"srcX":528,"srcY":1088,"destX":64,"destY":704,"width":64,"height":48},{"srcX":592,"srcY":1088,"destX":128,"destY":768,"width":64,"height":48},{"srcX":656,"srcY":1088,"destX":192,"destY":832,"width":64,"height":48},{"srcX":720,"srcY":1088,"destX":256,"destY":896,"width":64,"height":48},{"srcX":784,"srcY":1088,"destX":320,"destY":960,"width":64,"height":48},{"srcX":384,"srcY":0,"destX":640,"destY":320,"width":16,"height":64},{"srcX":384,"srcY":64,"destX":704,"destY":384,"width":16,"height":64},{"srcX":384,"srcY":128,"destX":768,"destY":448,"width":16,"height":64},{"srcX":384,"srcY":192,"destX":384,"destY":512,"width":16,"height":64},{"srcX":384,"srcY":256,"destX":448,"destY":576,"width":16,"height":64},{"srcX":384,"srcY":320,"destX":512,"destY":640,"width":16,"height":64},{"srcX":384,"srcY":384,"destX":576,"destY":704,"width":16,"height":64},{"srcX":384,"srcY":448,"destX":640,"destY":768,"width":16,"height":64},{"srcX":384,"srcY":512,"destX":704,"destY":832,"width":16,"height":64},{"srcX":384,"srcY":576,"destX":768,"destY":896,"width":16,"height":64},{"srcX":384,"srcY":640,"destX":384,"destY":960,"width":16,"height":64},{"srcX":384,"srcY":704,"destX":448,"destY":1024,"width":16,"height":64},{"srcX":384,"srcY":768,"destX":192,"destY":1136,"width":16,"height":64},{"srcX":384,"srcY":832,"destX":768,"destY":0,"width":16,"height":64},{"srcX":384,"srcY":896,"destX":384,"destY":64,"width":16,"height":64},{"srcX":384,"srcY":960,"destX":448,"destY":128,"width":16,"height":64},{"srcX":384,"srcY":1024,"destX":512,"destY":192,"width":16,"height":64},{"srcX":384,"srcY":1136,"destX":576,"destY":256,"width":16,"height":64},{"srcX":0,"srcY":0,"destX":128,"destY":1008,"width":64,"height":64},{"srcX":0,"srcY":64,"destX":448,"destY":320,"width":64,"height":64},{"srcX":0,"srcY":128,"destX":784,"destY":832,"width":64,"height":64},{"srcX":0,"srcY":192,"destX":256,"destY":640,"width":64,"height":64},{"srcX":0,"srcY":256,"destX":576,"destY":0,"width":64,"height":64},{"srcX":0,"srcY":320,"destX":64,"destY":1008,"width":64,"height":64},{"srcX":0,"srcY":384,"destX":384,"destY":320,"width":64,"height":64},{"srcX":0,"srcY":448,"destX":720,"destY":832,"width":64,"height":64},{"srcX":0,"srcY":512,"destX":192,"destY":640,"width":64,"height":64},{"srcX":0,"srcY":576,"destX":512,"destY":0,"width":64,"height":64},{"srcX":0,"srcY":640,"destX":0,"destY":1008,"width":64,"height":64},{"srcX":0,"srcY":704,"destX":320,"destY":320,"width":64,"height":64},{"srcX":0,"srcY":768,"destX":640,"destY":832,"width":64,"height":64},{"srcX":0,"srcY":832,"destX":128,"destY":640,"width":64,"height":64},{"srcX":0,"srcY":896,"destX":448,"destY":0,"width":64,"height":64},{"srcX":0,"srcY":960,"destX":784,"destY":512,"width":64,"height":64},{"srcX":0,"srcY":1024,"destX":256,"destY":320,"width":64,"height":64},{"srcX":0,"srcY":1136,"destX":576,"destY":832,"width":64,"height":64},{"srcX":64,"srcY":0,"destX":192,"destY":128,"width":64,"height":64},{"srcX":64,"srcY":64,"destX":528,"destY":640,"width":64,"height":64},{"srcX":64,"srcY":128,"destX":0,"destY":448,"width":64,"height":64},{"srcX":64,"srcY":192,"destX":320,"destY":1008,"width":64,"height":64},{"srcX":64,"srcY":256,"destX":656,"destY":320,"width":64,"height":64},{"srcX":64,"srcY":320,"destX":128,"destY":128,"width":64,"height":64},{"srcX":64,"srcY":384,"destX":448,"destY":640,"width":64,"height":64},{"srcX":64,"srcY":448,"destX":784,"destY":0,"width":64,"height":64},{"srcX":64,"srcY":512,"destX":256,"destY":1008,"width":64,"height":64},{"srcX":64,"srcY":576,"destX":576,"destY":320,"width":64,"height":64},{"srcX":64,"srcY":640,"destX":64,"destY":128,"width":64,"height":64},{"srcX":64,"srcY":704,"destX":384,"destY":640,"width":64,"height":64},{"srcX":64,"srcY":768,"destX":704,"destY":0,"width":64,"height":64},{"srcX":64,"srcY":832,"destX":192,"destY":1008,"width":64,"height":64},{"srcX":64,"srcY":896,"destX":512,"destY":320,"width":64,"height":64},{"srcX":64,"srcY":960,"destX":0,"destY":128,"width":64,"height":64},{"srcX":64,"srcY":1024,"destX":320,"destY":640,"width":64,"height":64},{"srcX":64,"srcY":1136,"destX":640,"destY":0,"width":64,"height":64},{"srcX":128,"srcY":0,"destX":256,"destY":448,"width":64,"height":64},{"srcX":128,"srcY":64,"destX":592,"destY":960,"width":64,"height":64},{"srcX":128,"srcY":128,"destX":64,"destY":816,"width":64,"height":64},{"srcX":128,"srcY":192,"destX":384,"destY":128,"width":64,"height":64},{"srcX":128,"srcY":256,"destX":720,"destY":640,"width":64,"height":64},{"srcX":128,"srcY":320,"destX":192,"destY":448,"width":64,"height":64},{"srcX":128,"srcY":384,"destX":528,"destY":960,"width":64,"height":64},{"srcX":128,"srcY":448,"destX":0,"destY":816,"width":64,"height":64},{"srcX":128,"srcY":512,"destX":320,"destY":128,"width":64,"height":64},{"srcX":128,"srcY":576,"destX":656,"destY":640,"width":64,"height":64},{"srcX":128,"srcY":640,"destX":128,"destY":448,"width":64,"height":64},{"srcX":128,"srcY":704,"destX":464,"destY":960,"width":64,"height":64},{"srcX":128,"srcY":768,"destX":784,"destY":320,"width":64,"height":64},{"srcX":128,"srcY":832,"destX":256,"destY":128,"width":64,"height":64},{"srcX":128,"srcY":896,"destX":592,"destY":640,"width":64,"height":64},{"srcX":128,"srcY":960,"destX":64,"destY":448,"width":64,"height":64},{"srcX":128,"srcY":1024,"destX":400,"destY":960,"width":64,"height":64},{"srcX":128,"srcY":1136,"destX":720,"destY":320,"width":64,"height":64},{"srcX":192,"srcY":0,"destX":320,"destY":768,"width":64,"height":64},{"srcX":192,"srcY":64,"destX":656,"destY":128,"width":64,"height":64},{"srcX":192,"srcY":128,"destX":128,"destY":1136,"width":64,"height":64},{"srcX":192,"srcY":192,"destX":448,"destY":448,"width":64,"height":64},{"srcX":192,"srcY":256,"destX":784,"destY":960,"width":64,"height":64},{"srcX":192,"srcY":320,"destX":256,"destY":768,"width":64,"height":64},{"srcX":192,"srcY":384,"destX":592,"destY":128,"width":64,"height":64},{"srcX":192,"srcY":448,"destX":64,"destY":1136,"width":64,"height":64},{"srcX":192,"srcY":512,"destX":384,"destY":448,"width":64,"height":64},{"srcX":192,"srcY":576,"destX":720,"destY":960,"width":64,"height":64},{"srcX":192,"srcY":640,"destX":192,"destY":768,"width":64,"height":64},{"srcX":192,"srcY":704,"destX":528,"destY":128,"width":64,"height":64},{"srcX":192,"srcY":768,"destX":0,"destY":1136,"width":64,"height":64},{"srcX":192,"srcY":832,"destX":320,"destY":448,"width":64,"height":64},{"srcX":192,"srcY":896,"destX":656,"destY":960,"width":64,"height":64},{"srcX":192,"srcY":960,"destX":128,"destY":816,"width":64,"height":64},{"srcX":192,"srcY":1024,"destX":464,"destY":128,"width":64,"height":64},{"srcX":192,"srcY":1136,"destX":784,"destY":640,"width":64,"height":64},{"srcX":256,"srcY":0,"destX":400,"destY":1136,"width":64,"height":64},{"srcX":256,"srcY":64,"destX":704,"destY":448,"width":64,"height":64},{"srcX":256,"srcY":128,"destX":192,"destY":256,"width":64,"height":64},{"srcX":256,"srcY":192,"destX":512,"destY":768,"width":64,"height":64},{"srcX":256,"srcY":256,"destX":0,"destY":576,"width":64,"height":64},{"srcX":256,"srcY":320,"destX":336,"destY":1136,"width":64,"height":64},{"srcX":256,"srcY":384,"destX":640,"destY":448,"width":64,"height":64},{"srcX":256,"srcY":448,"destX":128,"destY":256,"width":64,"height":64},{"srcX":256,"srcY":512,"destX":448,"destY":768,"width":64,"height":64},{"srcX":256,"srcY":576,"destX":784,"destY":128,"width":64,"height":64},{"srcX":256,"srcY":640,"destX":272,"destY":1136,"width":64,"height":64},{"srcX":256,"srcY":704,"destX":576,"destY":448,"width":64,"height":64},{"srcX":256,"srcY":768,"destX":64,"destY":256,"width":64,"height":64},{"srcX":256,"srcY":832,"destX":384,"destY":768,"width":64,"height":64},{"srcX":256,"srcY":896,"destX":720,"destY":128,"width":64,"height":64},{"srcX":256,"srcY":960,"destX":208,"destY":1136,"width":64,"height":64},{"srcX":256,"srcY":1024,"destX":512,"destY":448,"width":64,"height":64},{"srcX":256,"srcY":1136,"destX":0,"destY":256,"width":64,"height":64},{"srcX":320,"srcY":0,"destX":448,"destY":256,"width":64,"height":64},{"srcX":320,"srcY":64,"destX":784,"destY":768,"width":64,"height":64},{"srcX":320,"srcY":128,"destX":256,"destY":576,"width":64,"height":64},{"srcX":320,"srcY":192,"destX":592,"destY":1136,"width":64,"height":64},{"srcX":320,"srcY":256,"destX":64,"destY":944,"width":64,"height":64},{"srcX":320,"srcY":320,"destX":384,"destY":256,"width":64,"height":64},{"srcX":320,"srcY":384,"destX":720,"destY":768,"width":64,"height":64},{"srcX":320,"srcY":448,"destX":192,"destY":576,"width":64,"height":64},{"srcX":320,"srcY":512,"destX":528,"destY":1136,"width":64,"height":64},{"srcX":320,"srcY":576,"destX":0,"destY":944,"width":64,"height":64},{"srcX":320,"srcY":640,"destX":320,"destY":256,"width":64,"height":64},{"srcX":320,"srcY":704,"destX":656,"destY":768,"width":64,"height":64},{"srcX":320,"srcY":768,"destX":128,"destY":576,"width":64,"height":64},{"srcX":320,"srcY":832,"destX":464,"destY":1136,"width":64,"height":64},{"srcX":320,"srcY":896,"destX":784,"destY":448,"width":64,"height":64},{"srcX":320,"srcY":960,"destX":256,"destY":256,"width":64,"height":64},{"srcX":320,"srcY":1024,"destX":576,"destY":768,"width":64,"height":64},{"srcX":320,"srcY":1136,"destX":64,"destY":576,"width":64,"height":64},{"srcX":400,"srcY":0,"destX":528,"destY":576,"width":64,"height":64},{"srcX":400,"srcY":64,"destX":0,"destY":384,"width":64,"height":64},{"srcX":400,"srcY":128,"destX":320,"destY":896,"width":64,"height":64},{"srcX":400,"srcY":192,"destX":656,"destY":256,"width":64,"height":64},{"srcX":400,"srcY":256,"destX":128,"destY":64,"width":64,"height":64},{"srcX":400,"srcY":320,"destX":464,"destY":576,"width":64,"height":64},{"srcX":400,"srcY":384,"destX":784,"destY":1136,"width":64,"height":64},{"srcX":400,"srcY":448,"destX":256,"destY":944,"width":64,"height":64},{"srcX":400,"srcY":512,"destX":592,"destY":256,"width":64,"height":64},{"srcX":400,"srcY":576,"destX":64,"destY":64,"width":64,"height":64},{"srcX":400,"srcY":640,"destX":384,"destY":576,"width":64,"height":64},{"srcX":400,"srcY":704,"destX":720,"destY":1136,"width":64,"height":64},{"srcX":400,"srcY":768,"destX":192,"destY":944,"width":64,"height":64},{"srcX":400,"srcY":832,"destX":512,"destY":256,"width":64,"height":64},{"srcX":400,"srcY":896,"destX":0,"destY":64,"width":64,"height":64},{"srcX":400,"srcY":960,"destX":320,"destY":576,"width":64,"height":64},{"srcX":400,"srcY":1024,"destX":656,"destY":1136,"width":64,"height":64},{"srcX":400,"srcY":1136,"destX":128,"destY":944,"width":64,"height":64},{"srcX":464,"srcY":0,"destX":576,"destY":896,"width":64,"height":64},{"srcX":464,"srcY":64,"destX":64,"destY":752,"width":64,"height":64},{"srcX":464,"srcY":128,"destX":400,"destY":64,"width":64,"height":64},{"srcX":464,"srcY":192,"destX":720,"destY":576,"width":64,"height":64},{"srcX":464,"srcY":256,"destX":192,"destY":384,"width":64,"height":64},{"srcX":464,"srcY":320,"destX":512,"destY":896,"width":64,"height":64},{"srcX":464,"srcY":384,"destX":0,"destY":752,"width":64,"height":64},{"srcX":464,"srcY":448,"destX":320,"destY":64,"width":64,"height":64},{"srcX":464,"srcY":512,"destX":656,"destY":576,"width":64,"height":64},{"srcX":464,"srcY":576,"destX":128,"destY":384,"width":64,"height":64},{"srcX":464,"srcY":640,"destX":448,"destY":896,"width":64,"height":64},{"srcX":464,"srcY":704,"destX":784,"destY":256,"width":64,"height":64},{"srcX":464,"srcY":768,"destX":256,"destY":64,"width":64,"height":64},{"srcX":464,"srcY":832,"destX":592,"destY":576,"width":64,"height":64},{"srcX":464,"srcY":896,"destX":64,"destY":384,"width":64,"height":64},{"srcX":464,"srcY":960,"destX":384,"destY":896,"width":64,"height":64},{"srcX":464,"srcY":1024,"destX":720,"destY":256,"width":64,"height":64},{"srcX":464,"srcY":1136,"destX":192,"destY":64,"width":64,"height":64},{"srcX":528,"srcY":0,"destX":656,"destY":64,"width":64,"height":64},{"srcX":528,"srcY":64,"destX":128,"destY":1072,"width":64,"height":64},{"srcX":528,"srcY":128,"destX":448,"destY":384,"width":64,"height":64},{"srcX":528,"srcY":192,"destX":784,"destY":896,"width":64,"height":64},{"srcX":528,"srcY":256,"destX":256,"destY":704,"width":64,"height":64},{"srcX":528,"srcY":320,"destX":592,"destY":64,"width":64,"height":64},{"srcX":528,"srcY":384,"destX":64,"destY":1072,"width":64,"height":64},{"srcX":528,"srcY":448,"destX":384,"destY":384,"width":64,"height":64},{"srcX":528,"srcY":512,"destX":704,"destY":896,"width":64,"height":64},{"srcX":528,"srcY":576,"destX":192,"destY":704,"width":64,"height":64},{"srcX":528,"srcY":640,"destX":528,"destY":64,"width":64,"height":64},{"srcX":528,"srcY":704,"destX":0,"destY":1072,"width":64,"height":64},{"srcX":528,"srcY":768,"destX":320,"destY":384,"width":64,"height":64},{"srcX":528,"srcY":832,"destX":640,"destY":896,"width":64,"height":64},{"srcX":528,"srcY":896,"destX":128,"destY":704,"width":64,"height":64},{"srcX":528,"srcY":960,"destX":464,"destY":64,"width":64,"height":64},{"srcX":528,"srcY":1024,"destX":784,"destY":576,"width":64,"height":64},{"srcX":528,"srcY":1136,"destX":256,"destY":384,"width":64,"height":64},{"srcX":592,"srcY":0,"destX":720,"destY":384,"width":64,"height":64},{"srcX":592,"srcY":64,"destX":192,"destY":192,"width":64,"height":64},{"srcX":592,"srcY":128,"destX":512,"destY":704,"width":64,"height":64},{"srcX":592,"srcY":192,"destX":0,"destY":512,"width":64,"height":64},{"srcX":592,"srcY":256,"destX":320,"destY":1072,"width":64,"height":64},{"srcX":592,"srcY":320,"destX":640,"destY":384,"width":64,"height":64},{"srcX":592,"srcY":384,"destX":128,"destY":192,"width":64,"height":64},{"srcX":592,"srcY":448,"destX":448,"destY":704,"width":64,"height":64},{"srcX":592,"srcY":512,"destX":784,"destY":64,"width":64,"height":64},{"srcX":592,"srcY":576,"destX":256,"destY":1072,"width":64,"height":64},{"srcX":592,"srcY":640,"destX":576,"destY":384,"width":64,"height":64},{"srcX":592,"srcY":704,"destX":64,"destY":192,"width":64,"height":64},{"srcX":592,"srcY":768,"destX":384,"destY":704,"width":64,"height":64},{"srcX":592,"srcY":832,"destX":720,"destY":64,"width":64,"height":64},{"srcX":592,"srcY":896,"destX":192,"destY":1072,"width":64,"height":64},{"srcX":592,"srcY":960,"destX":512,"destY":384,"width":64,"height":64},{"srcX":592,"srcY":1024,"destX":0,"destY":192,"width":64,"height":64},{"srcX":592,"srcY":1136,"destX":320,"destY":704,"width":64,"height":64},{"srcX":656,"srcY":0,"destX":784,"destY":704,"width":64,"height":64},{"srcX":656,"srcY":64,"destX":256,"destY":512,"width":64,"height":64},{"srcX":656,"srcY":128,"destX":592,"destY":1024,"width":64,"height":64},{"srcX":656,"srcY":192,"destX":64,"destY":880,"width":64,"height":64},{"srcX":656,"srcY":256,"destX":384,"destY":192,"width":64,"height":64},{"srcX":656,"srcY":320,"destX":720,"destY":704,"width":64,"height":64},{"srcX":656,"srcY":384,"destX":192,"destY":512,"width":64,"height":64},{"srcX":656,"srcY":448,"destX":528,"destY":1024,"width":64,"height":64},{"srcX":656,"srcY":512,"destX":0,"destY":880,"width":64,"height":64},{"srcX":656,"srcY":576,"destX":320,"destY":192,"width":64,"height":64},{"srcX":656,"srcY":640,"destX":656,"destY":704,"width":64,"height":64},{"srcX":656,"srcY":704,"destX":128,"destY":512,"width":64,"height":64},{"srcX":656,"srcY":768,"destX":464,"destY":1024,"width":64,"height":64},{"srcX":656,"srcY":832,"destX":784,"destY":384,"width":64,"height":64},{"srcX":656,"srcY":896,"destX":256,"destY":192,"width":64,"height":64},{"srcX":656,"srcY":960,"destX":592,"destY":704,"width":64,"height":64},{"srcX":656,"srcY":1024,"destX":64,"destY":512,"width":64,"height":64},{"srcX":656,"srcY":1136,"destX":384,"destY":1024,"width":64,"height":64},{"srcX":720,"srcY":0,"destX":0,"destY":320,"width":64,"height":64},{"srcX":720,"srcY":64,"destX":320,"destY":832,"width":64,"height":64},{"srcX":720,"srcY":128,"destX":656,"destY":192,"width":64,"height":64},{"srcX":720,"srcY":192,"destX":128,"destY":0,"width":64,"height":64},{"srcX":720,"srcY":256,"destX":464,"destY":512,"width":64,"height":64},{"srcX":720,"srcY":320,"destX":784,"destY":1024,"width":64,"height":64},{"srcX":720,"srcY":384,"destX":256,"destY":832,"width":64,"height":64},{"srcX":720,"srcY":448,"destX":592,"destY":192,"width":64,"height":64},{"srcX":720,"srcY":512,"destX":64,"destY":0,"width":64,"height":64},{"srcX":720,"srcY":576,"destX":400,"destY":512,"width":64,"height":64},{"srcX":720,"srcY":640,"destX":720,"destY":1024,"width":64,"height":64},{"srcX":720,"srcY":704,"destX":192,"destY":880,"width":64,"height":64},{"srcX":720,"srcY":768,"destX":528,"destY":192,"width":64,"height":64},{"srcX":720,"srcY":832,"destX":0,"destY":0,"width":64,"height":64},{"srcX":720,"srcY":896,"destX":320,"destY":512,"width":64,"height":64},{"srcX":720,"srcY":960,"destX":656,"destY":1024,"width":64,"height":64},{"srcX":720,"srcY":1024,"destX":128,"destY":880,"width":64,"height":64},{"srcX":720,"srcY":1136,"destX":448,"destY":192,"width":64,"height":64},{"srcX":784,"srcY":0,"destX":64,"destY":640,"width":64,"height":64},{"srcX":784,"srcY":64,"destX":384,"destY":0,"width":64,"height":64},{"srcX":784,"srcY":128,"destX":720,"destY":512,"width":64,"height":64},{"srcX":784,"srcY":192,"destX":192,"destY":320,"width":64,"height":64},{"srcX":784,"srcY":256,"destX":512,"destY":832,"width":64,"height":64},{"srcX":784,"srcY":320,"destX":0,"destY":688,"width":64,"height":64},{"srcX":784,"srcY":384,"destX":320,"destY":0,"width":64,"height":64},{"srcX":784,"srcY":448,"destX":656,"destY":512,"width":64,"height":64},{"srcX":784,"srcY":512,"destX":128,"destY":320,"width":64,"height":64},{"srcX":784,"srcY":576,"destX":448,"destY":832,"width":64,"height":64},{"srcX":784,"srcY":640,"destX":784,"destY":192,"width":64,"height":64},{"srcX":784,"srcY":704,"destX":256,"destY":0,"width":64,"height":64},{"srcX":784,"srcY":768,"destX":592,"destY":512,"width":64,"height":64},{"srcX":784,"srcY":832,"destX":64,"destY":320,"width":64,"height":64},{"srcX":784,"srcY":896,"destX":384,"destY":832,"width":64,"height":64},{"srcX":784,"srcY":960,"destX":720,"destY":192,"width":64,"height":64},{"srcX":784,"srcY":1024,"destX":192,"destY":0,"width":64,"height":64},{"srcX":784,"srcY":1136,"destX":528,"destY":512,"width":64,"height":64}]] \ No newline at end of file diff --git a/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_orig.jpg b/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_orig.jpg new file mode 100644 index 0000000..55424e7 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_orig.jpg differ diff --git a/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_orig.png b/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_orig.png new file mode 100644 index 0000000..13369a7 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_orig.png differ diff --git a/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_reference.jpg b/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_reference.jpg new file mode 100644 index 0000000..e96e2d1 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_reference.jpg differ diff --git a/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_reference.png b/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_reference.png new file mode 100644 index 0000000..974319c Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/tonarinoyj_jp_reference.png differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/canvas0.png b/manga-py-stable_1.x/tests/mosaic/viz/canvas0.png new file mode 100644 index 0000000..e6bedc8 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/canvas0.png differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/canvas1.png b/manga-py-stable_1.x/tests/mosaic/viz/canvas1.png new file mode 100644 index 0000000..4a85883 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/canvas1.png differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/canvas2.png b/manga-py-stable_1.x/tests/mosaic/viz/canvas2.png new file mode 100644 index 0000000..74ca64c Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/canvas2.png differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/canvas3.png b/manga-py-stable_1.x/tests/mosaic/viz/canvas3.png new file mode 100644 index 0000000..02369f1 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/canvas3.png differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/canvas4.png b/manga-py-stable_1.x/tests/mosaic/viz/canvas4.png new file mode 100644 index 0000000..c185f8e Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/canvas4.png differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/canvas5.png b/manga-py-stable_1.x/tests/mosaic/viz/canvas5.png new file mode 100644 index 0000000..ee1d6a3 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/canvas5.png differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/canvas6.png b/manga-py-stable_1.x/tests/mosaic/viz/canvas6.png new file mode 100644 index 0000000..04d78ed Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/canvas6.png differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/index0.jfif b/manga-py-stable_1.x/tests/mosaic/viz/index0.jfif new file mode 100644 index 0000000..f0bdac9 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/index0.jfif differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/index1.jfif b/manga-py-stable_1.x/tests/mosaic/viz/index1.jfif new file mode 100644 index 0000000..d155ca3 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/index1.jfif differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/index2.jfif b/manga-py-stable_1.x/tests/mosaic/viz/index2.jfif new file mode 100644 index 0000000..8a7b860 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/index2.jfif differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/index3.jfif b/manga-py-stable_1.x/tests/mosaic/viz/index3.jfif new file mode 100644 index 0000000..ce10788 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/index3.jfif differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/index4.jfif b/manga-py-stable_1.x/tests/mosaic/viz/index4.jfif new file mode 100644 index 0000000..142a92a Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/index4.jfif differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/index5.jfif b/manga-py-stable_1.x/tests/mosaic/viz/index5.jfif new file mode 100644 index 0000000..f3ae79f Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/index5.jfif differ diff --git a/manga-py-stable_1.x/tests/mosaic/viz/index6.jfif b/manga-py-stable_1.x/tests/mosaic/viz/index6.jfif new file mode 100644 index 0000000..b117701 Binary files /dev/null and b/manga-py-stable_1.x/tests/mosaic/viz/index6.jfif differ diff --git a/manga-py-stable_1.x/tests/std.py b/manga-py-stable_1.x/tests/std.py new file mode 100644 index 0000000..453dc2d --- /dev/null +++ b/manga-py-stable_1.x/tests/std.py @@ -0,0 +1,76 @@ +import unittest +from .base import root_path +from manga_py.fs import path_join +from manga_py.providers.helpers.std import Std +from manga_py.provider import Provider + + +class M(Std, Provider): + def get_main_content(self): + return self._get_content('{}/{}') + + def get_manga_name(self) -> str: + return self._get_name(r'\.\w+/([^/]+)\?') + + def get_chapters(self) -> list: + return self._elements('a') + + def get_files(self) -> list: + parser = self.document_fromstring(self.content) + return self._images_helper(parser, 'a', 'href') + + def get_archive_name(self) -> str: + return 'archive' + + def get_chapter_index(self) -> str: + return '0' + + +class TestStd(unittest.TestCase): + @property + def _provider(self): + provider = M() + provider._params['url'] = 'https://www.google.com/imghp?hl=&tab=wi' + return provider + + def test_base(self): + provider = self._provider + self.assertTrue(provider.get_manga_name() == provider.manga_name) + self.assertNotIn('manga_name', provider._storage) + provider._storage['manga_name'] = provider.get_manga_name() + '-name' + self.assertFalse(provider.get_manga_name() == provider.manga_name) + self.assertTrue(~provider.manga_name.find('imghp')) + provider._storage['manga_name'] = provider.get_manga_name() + self.assertTrue(len(provider.content) > 100) + + def test_cookies(self): + provider = self._provider + provider._base_cookies(provider.get_url()) + self.assertTrue(len(provider._storage['cookies'].keys()) > 0) + + def test_normal_name(self): + name = self._provider.normal_arc_name(['1', '2']) + self.assertEqual('vol_001-2', name) + + def test_iterators(self): + provider = self._provider + provider._storage['manga_name'] = provider.manga_name + self.assertTrue(len(provider.get_chapters()) > 0) + self.assertTrue(~provider.get_files()[0].find('.google.')) + + def test_cover_from_content(self): + self.content = '' + src = self._provider._cover_from_content('img') + self.assertEqual('http', src[:4]) + + def test_first_select(self): + provider = self._provider + with open(path_join(root_path, 'files', 'select.html'), 'r') as f: + provider._storage['main_content'] = f.read() + reference = len(provider._elements('select')[0].cssselect('option')) + parser = provider.document_fromstring(provider.content) + skip = len(provider._first_select_options(parser, 'select')) + not_exists = len(provider._first_select_options(parser, 'select#not_exists_selector')) + + self.assertTrue(reference == (skip + 1)) + self.assertTrue(not_exists == 0) diff --git a/manga-py-stable_1.x/tests/web_driver.py b/manga-py-stable_1.x/tests/web_driver.py new file mode 100644 index 0000000..ec9d9c8 --- /dev/null +++ b/manga-py-stable_1.x/tests/web_driver.py @@ -0,0 +1,22 @@ +import unittest + +from pyvirtualdisplay import Display +from selenium.common.exceptions import NoSuchElementException + +from manga_py.base_classes import WebDriver + + +class TestWebDriver(unittest.TestCase): + def test_driver(self): + display = Display(visible=0, size=(800, 600)) + display.start() + driver = WebDriver().get_driver() + driver.get('https://ya.ru') + result = True + try: + driver.find_element_by_id('text') + except NoSuchElementException: + result = False + driver.close() + display.stop() + self.assertTrue(result)