Added manga-py source

This commit is contained in:
2019-12-14 22:33:14 -05:00
parent 9a4dd4b09b
commit 45067caea6
420 changed files with 18054 additions and 0 deletions

View File

@@ -0,0 +1,23 @@
exclude_paths:
- 'tests/'
- 'manga_py/storage/'
- 'manga_py/providers/'
- 'manga_py/gui/langs/*.json'
- 'helpers/'
- 'Dockerfile'
- 'LICENSE'
- 'README*'
- '*.yml'
- '*.txt'
- '.scrutinizer.yml'
- '.travis.yml'
- '.codeclimate.yml'
- '.gitignore'
- '.gitmodules'
languages:
Python: true
pep8:
enabled: true
checks:
E501:
enabled: false

107
manga-py-stable_1.x/.gitignore vendored Normal file
View File

@@ -0,0 +1,107 @@
.idea/
Manga/
tests/temp/
manga_py/rebreakcaptcha
manga_py/storage/.passwords.json
manga_py/storage/chromedriver
manga_py/storage/chromedriver.exe
### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
!helpers/manga.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# dotenv
.env
# virtualenv
.venv
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject

View File

@@ -0,0 +1,39 @@
checks:
python:
code_rating: true
duplicate_code: true
javascript: true
build:
nodes:
analysis:
project_setup:
override: true
tests:
before:
- sudo apt remove chromium-browser -y
- sudo apt update
- sudo apt install -y dpkg
- pip3 install coverage
- pip3 install -r requirements_dev.txt
- wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb -O /tmp/chrome.deb
- sudo dpkg -i /tmp/chrome.deb
- sudo apt install -y -f --fix-missing
override:
-
command: 'coverage run --omit=manga_py/providers/*.py --source=manga_py run_tests.py'
coverage:
file: '.coverage'
format: 'py-cc'
- py-scrutinizer-run
environment:
python: 3.5.3
node: 6.0.0
filter:
excluded_paths:
- manga_py/crypt/sunday_webry_com.py
- manga_py/crypt/aes.js
- manga_py/crypt/aes_zp.js
- tests/*.py
- run_tests.py
- manga.py

View File

@@ -0,0 +1,63 @@
env:
global:
- CC_TEST_REPORTER_ID=ff7add7a0f454aff7e13c739a06a7aba8e5c8229d3e776e051294341b4721871
addons:
artifacts: true
language: python
dist: xenial
python:
- "3.5"
- "3.6"
- "3.7"
# - "nightly"
cache: pip
before_install:
- sudo apt-get -y install nodejs python-setuptools libwebp-dev
- python -V
- pwd
- chmod +x helpers/after_script.sh
- chmod +x helpers/before_deploy.sh
- chmod +x helpers/before_script.sh
- source ./helpers/before_script.sh
install:
- npm install -g sass node-sass html-minifier
- pip install --upgrade coverage codeclimate-test-reporter setuptools pyinstaller
- pip install -r requirements_dev.txt
before_script:
- curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter
- chmod +x ./cc-test-reporter
- ./cc-test-reporter before-build
script:
- if [[ "$TRAVIS_TAG" = "" ]]; then coverage run --omit=manga_py/providers/*.py --source=manga_py run_tests.py; fi
after_script:
- source ./helpers/after_script.sh
before_deploy:
- echo "$allow_deploy"
- echo "Start make gh-pages content"
- source ./helpers/before_deploy.sh
deploy:
- provider: pages
edge: true
local-dir: helpers/gh_pages_content
target-branch: gh-pages
github-token: $GH_TOKEN
skip-cleanup: true
skip_cleanup: true
on:
branch: stable_1.x
condition: $TRAVIS_PYTHON_VERSION == "3.6"
tags: false
- provider: pypi
server: https://upload.pypi.org/legacy/
user: 1271
password: $PYPI_PASS
skip_cleanup: true
on:
branch: stable_1.x
tags: true
condition: $TRAVIS_PYTHON_VERSION == "3.6"
# fqdn: yuru-yuri.sttv.me
#see https://docs.travis-ci.com/user/deployment/pages/
allow_failures:
- python: nightly
- python: 3.5

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 yuru-yuri
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,180 @@
Manga-py |Travis CI result|
===================================
Universal assistant download manga.
'''''''''''''''''''''''''''''''''''
Approximately 300 providers are available now.
''''''''''''''''''''''''''''''''''''''''''''''
|Scrutinizer CI result| |Scrutinizer CI coverage| |GitHub issues|
|Code Climate| |Issue Count| |GitHub repo size| |PyPI - size|
|PyPI - Python Version| |PyPi version| |PyPI - Downloads|
Supported resources
-------------------
see:
- https://manga-py.com/manga-py/#resources-list
- https://manga-py.github.io/manga-py/#resources-list (alternative)
- https://yuru-yuri.github.io/manga-py/#resources-list (deprecated)
Plans for improvement:
----------------------
see:
- https://manga-py.com/manga-py/improvement.html
- https://manga-py.github.io/manga-py/improvement.html (alternative)
How to use
----------
Installation
~~~~~~~~~~~~
1) Download python 3.5+ https://www.anaconda.com/downloads
2) Install pip package:
.. code:: bash
pip install manga-py
3) Run program:
.. code:: bash
manga-py http://manga.url/manga/name # For download manga
Installation on the Android
~~~~~~~~~~~~~~~~~~~~~~~~~~~
See https://github.com/manga-py/manga-py/issues/48
Docker image:
~~~~~~~~~~~~~
See:
- https://hub.docker.com/r/mangadl/manga-py/tags?page=1&ordering=last_updated
- https://github.com/manga-py/manga-py-docker
Downloading manga
-----------------
**:warning:For sites with cloudflare protect need installed Node.js**
**:warning:Notice! By default, the mode of multithreaded image loading
is enabled**
**To change this behavior, add the key --no-multi-threads**
.. code:: bash
# download to "./Manga/<manga-name-here>" directory
manga-py http://manga-url-here/manga-name
# download to "./Manga/Manga Name" directory
manga-py http://manga-url-here/manga-name --name 'Manga Name'
# or download to /manga/destination/path/<manga-name-here> directory
manga-py http://manga-url-here/manga-name -d /manga/destination/path/
# skip 3 volumes
manga-py --skip-volumes 3 http://manga-url-here/manga-name
# skip 3 volumes and download 2 volumes
manga-py --skip-volumes 3 --max-volumes 2 http://manga-url-here/manga-name
# reverse volumes downloading (24 -> 1)
manga-py --reverse-downloading http://manga-url-here/manga-name
# Disable progressbar
manga-py --no-progress http://manga-url-here/manga-name
Embedded example:
-----------------
https://github.com/manga-py/manga-py/blob/stable_1.x/embedded.md
Help
----
.. code:: bash
manga-py -h
# or
manga-py --help
Suported by JetBrains
---------------------
|JetBrains logo|
Manga-py Docker
---------------
1. Install docker
- Summary https://docs.docker.com/install/
- Mac https://docs.docker.com/docker-for-mac/install/
- Windows https://docs.docker.com/docker-for-windows/install/
2. Install manga-py
.. code:: bash
docker pull mangadl/manga-py
3. Run it
.. code:: bash
docker run -it -v ${PWD}:/home/manga mangadl/manga-py
Or docker-compose:
1. Install docker compose https://docs.docker.com/compose/install/
2. Download manga-py-docker https://github.com/manga-py/manga-py-docker/archive/master.zip
3. Unzip it
4. Run compose
.. code:: bash
# build docker
docker-compose build
# run it
docker-compose run manga_py
.. |Travis CI result| image:: https://travis-ci.com/manga-py/manga-py.svg?branch=stable_1.x
:target: https://travis-ci.com/manga-py/manga-py/branches
.. |Code Climate| image:: https://codeclimate.com/github/manga-py/manga-py/badges/gpa.svg
:target: https://codeclimate.com/github/manga-py/manga-py
.. |Issue Count| image:: https://codeclimate.com/github/manga-py/manga-py/badges/issue_count.svg
:target: https://codeclimate.com/github/manga-py/manga-py
.. |PyPI - Python Version| image:: https://img.shields.io/pypi/pyversions/manga-py.svg
:target: https://pypi.org/project/manga-py/
.. |Scrutinizer CI result| image:: https://scrutinizer-ci.com/g/manga-py/manga-py/badges/quality-score.png?b=stable_1.x
:target: https://scrutinizer-ci.com/g/manga-py/manga-py
.. |Scrutinizer CI coverage| image:: https://scrutinizer-ci.com/g/manga-py/manga-py/badges/coverage.png?b=stable_1.x
:target: https://scrutinizer-ci.com/g/manga-py/manga-py
.. |GitHub issues| image:: https://img.shields.io/github/issues/manga-py/manga-py.svg
:target: https://github.com/manga-py/manga-py/issues
.. |PyPi version| image:: https://badge.fury.io/py/manga-py.svg
:alt: PyPI
:target: https://pypi.org/project/manga-py/
.. |JetBrains logo| image:: https://github.com/yuru-yuri/manga-py/raw/stable_1.x/.github/jetbrains.png
:alt: JetBrains
:target: https://www.jetbrains.com/?from=manga-py
.. |MicroBadger Layers| image:: https://img.shields.io/microbadger/layers/mangadl/manga-py
:alt: MicroBadger Layers
.. |MicroBadger Size| image:: https://img.shields.io/microbadger/image-size/mangadl/manga-py
:alt: MicroBadger Size
.. |GitHub repo size| image:: https://img.shields.io/github/repo-size/manga-py/manga-py
:alt: GitHub repo size
.. |PyPI - Downloads| image:: https://img.shields.io/pypi/dm/manga-py
:alt: PyPI - Downloads
.. |PyPI - size| image:: https://img.shields.io/badge/dynamic/json?color=success&label=PyPI+size&query=%24.size&url=https://sttv.me/manga-py.json&?cacheSeconds=3600&suffix=+Kb
:alt: PyPI - size

View File

@@ -0,0 +1,12 @@
version: '3.0'
services:
manga_py:
container_name: mangadl/manga-py
image: python
build: ./helpers/python
volumes:
- ./:/home/manga
volumes:
manga_volume:

View File

@@ -0,0 +1,72 @@
### Use manga-py in your project
```python
from manga_py.parser import Parser
from manga_py.info import Info
my_awesome_handler = open('my-handler')
class MyAwesomeInfo(Info):
pass
# main class (you will have your own)
class MyAwesomeClass:
args = {}
"""
is just a Namespace or dict with arguments
(filled below. You can implement your implementation. The main thing is to have all keys possible)
see manga_py.cli.args.get_cli_arguments()
"""
parser = None # the found parser gets here (see below)
def get_info(self):
MyAwesomeInfo(self.args) # use the Info class from manga-py or overload the Info class from manga-py
def start(self):
self.parser = Parser(self.args)
try:
self.parser.init_provider(
progress=self.progress,
log=self.print,
quest=self.quest,
quest_password=self.quest_password,
info=self.get_info(),
)
except AttributeError as e:
raise e
self.parser.start() # provider main method
def progress(self, items_count: int, current_item: int, re_init: bool = False): # the same progress function. re_init = True means "next chapter"
# simple progress
pass
def print(self, text, **kwargs):
"""
Not used everywhere. Better reload global print method
"""
print(text, **kwargs, file=my_awesome_handler)
def quest(self, variants: enumerate, title: str, select_type=0): # 0 = single, 1 = multiple
if select_type == 0:
print(' Question ')
return 'Answer'
else:
print(' Question multiple answers')
return [
'Answer 1',
'Answer 2',
...
]
def quest_password(self, title):
"""
used to ask user password
"""
print(title)
return 'my_awesome_password'
```

View File

@@ -0,0 +1,14 @@
function ytc(y) {
var x = "", y = y.split(" ");
for (var i = 0, n = y.length; i < n; i++) x += String.fromCharCode(y[i]);
return x;
}
function kxatz() {
for (i = ytaw.length - 1; i >= 0; i--) {
ytaw[i] = ytc(ytaw[i]);
var obj = $('#imgs .wrap_img:eq(' + i + ') img'), alt = $('#imgs').attr('data-alt');
obj.attr('alt', alt + ' - ' + obj.attr('alt'));
obj.attr('data-src', ytaw[i]);
}
}

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env bash
if [[ "$allow_deploy" = "true" ]]
then
coverage xml
./cc-test-reporter after-build -t coverage.py --exit-code $TRAVIS_TEST_RESULT || true
fi

View File

@@ -0,0 +1,22 @@
from requests import get
from os import system, path
from lxml.html import document_fromstring
_path = path.dirname(path.dirname(path.realpath(__file__)))
all_manga_list = None
n = 0
base_path = 'http://animextremist.com/mangas-online/'
while n < 10:
try:
all_manga_list = document_fromstring(get(base_path)).cssselect('li > a + a')
break
except Exception:
pass
n += 1
for i in all_manga_list:
href = i.get('href')
print('Downloading %s' % href)
_str = 'cd {}; python3 manga.py --cli -i -u {}'
system(_str.format(_path, href))

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
if [[ "$allow_deploy" = "true" ]]
then
if [[ "$TRAVIS_TAG" != "" ]]
then
echo "Skip build bin package"
# echo "Start build bin package"
# cp helpers/.builder.py .
# cp helpers/.providers_updater.py .
# cp helpers/manga.spec .
# python .providers_updater.py
# pyinstaller manga.spec --log-level CRITICAL -y -F
else
echo "Make gh-pages"
node-sass helpers/gh_pages_content/style.scss helpers/gh_pages_content/style.css --output-style compressed
html-minifier helpers/gh_pages_content/index.html --output helpers/gh_pages_content/index.html --html5 --remove-comments --remove-tag-whitespace --collapse-inline-tag-whitespace --remove-attribute-quotes --collapse-whitespace
html-minifier helpers/gh_pages_content/improvement.html --output helpers/gh_pages_content/improvement.html --html5 --remove-comments --remove-tag-whitespace --collapse-inline-tag-whitespace --remove-attribute-quotes --collapse-whitespace
git add -A
git commit -a -m upd
fi
fi

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
allow_deploy="false"
py_version=$(python --version)
if [[ "${py_version:7:-2}" = "3.5" ]]
then
allow_deploy="true"
fi
if [[ "$TRAVIS_TAG" = "" ]]
then wget 'https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb' -O /tmp/chrome.deb && sudo dpkg -i /tmp/chrome.deb && sudo apt-get install -y -f --fix-missing
fi

View File

@@ -0,0 +1,118 @@
from manga_py.providers import providers_list
from manga_py.fs import root_path
from manga_py.meta import __repo_name__
from json import dumps
from datetime import datetime
start_items = [
# [ address, (0 - not worked, 1 - worked, 2 - alias), 'Comment']
# ['http://bato.to', 0, ' - Batoto will be closing down permanently (Jan 18, 2018)'],
['http://bulumanga.com', 0, ' - Closed'],
['http://bwahahacomics.ru', 0, ' - Very little content. Possibly, will be done in the future.'],
['http://com-x.life', 1, ' - One thread only!!! --no-multi-threads. <i class="v0"></i>'],
['http://comic-walker.com', 0, ' - Maybe...'],
['http://comico.jp', 1, ' - only public downloading now'],
['http://comixology.com', 0, ' - Buy only. Not reading.'],
['http://e-hentai.org', 1, '<i class="td"></i>'],
['http://eatmanga.me', 1, '<i class="v0"></i>'],
['http://dm5.com', 0, '<i class="d"></i>'],
['http://gogomanga.co', 1, '<i class="v0"></i>'],
['http://heavenmanga.biz', 2, '- See heavenmanga.site'],
['http://hentai-chan.me', 1, '- Need fill access file'],
['http://heymanga.me', 1, '<i class="v0"></i>'],
['http://comic.k-manga.jp', 0, ' - Maybe...'],
['http://japscan.com', 2, ' - See japscan.to'],
['http://japscan.cc', 2, ' - See japscan.to'],
['http://lhscans.com', 1, '- See rawlh.com'],
['http://luscious.net', 1, '<i class="td"></i>'],
['http://lezhin.com', 0, ' - Maybe...'],
['http://manga-zone.org', 0, ' - Will not be implemented'],
['http://mangaall.com', 2, '- See mangatrue.com'],
['http://mangaforall.com', 1, ''],
['http://mangafreak.net', 1, '<i class="v0"></i>, site down now'],
['http://mangahead.me', 1, '<i class="v0"></i>, site down now'],
['http://mangaleader.com', 1, '<i class="v0"></i> site down now'],
['http://mangamove.com', 1, '<i class="v0"></i>, site down now'],
['http://manganel.com', 1, '<i class="v0"></i>, site down now'],
['http://mangaroot.com', 1, '<i class="v0"></i>, site down now, one thread only!!! --no-multi-threads'],
['http://mangatail.com', 2, '- See mangatail.me'],
['http://mangatrue.com', 1, ' - Site down now'],
['http://mangaz.com', 0, ' - Maybe...'],
['http://mg-zip.com', 0, ' - Will not be implemented'],
['http://raw-zip.com', 0, ' - Will not be implemented'],
['http://rawdevart.com', 1, '<i class="v0"></i>, very little content'],
['http://s-manga.net', 0, ' - Maybe'],
['http://sunday-webry.com', 0, ' - Not worked decryption images now. In develop.'],
['http://tapas.io', 1, '<i class="v0"></i>, only public downloading now'],
['http://tsumino.com', 1, '<i class="d"></i>'],
['http://zip.raw.im', 0, ' - Will not be implemented'],
['http://rawlh.com', 1, '- See lhscan.net'],
['http://8muses.com', 0, '- Need decode page.'],
['http://mangago.me', 0, '- Need decode page.'],
['http://digitalteam1.altervista.org', 0, ' - Site down now'],
['http://heymanga.me', 0, ' - Site down now'],
['http://lector.dangolinenofansub.com', 0, ' - See kumanga.com'],
['http://lector.ytnofan.com', 0, ' - Site down now'],
['http://leomanga.com', 0, ' - Site down now'],
['http://mang.as', 0, ' - Site down now'],
['http://santosfansub.com', 0, ' - Site down now'],
]
_start_items = [i[0] for i in start_items]
def merge(*providers):
for p in providers:
yield from providers_list[p]
def clean(providers):
_list = {}
for i in providers:
_ = i.find('/')
if not ~_:
_ = i.strip('()')
else:
_ = i[:_].strip('()')
_list['http://' + _.replace(r'\.', '.')] = ''
return list(_list.keys())
def aggregate(providers):
_list = []
for i in providers:
if i not in _start_items:
_list.append([i, 1, ''])
return _list
def prepare_html(html):
with open(html, 'r') as r:
content = r.read()
with open(html, 'w') as w:
content = content.replace('__repo_name__', __repo_name__)
today = datetime.today()
content = content.replace('__last_update__', '{}/{:0>2}/{:0>2} {:0>2}-{:0>2}-{:0>2}'.format(
today.year, today.month, today.day, today.hour, today.minute, today.second
))
w.write(content)
def build_providers():
items = aggregate(clean(merge(*providers_list))) + start_items
items = sorted(items, key=lambda l: l[0])
return dumps(items)
def main():
path = root_path() + '/helpers/gh_pages_content/'
with open(path + 'providers.json', 'w') as w:
w.write(build_providers())
prepare_html(path + 'index.html')
prepare_html(path + 'improvement.html')
# print(len(build_providers()))

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 KiB

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 yuru-yuri
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1 @@
Go to [README.md](https://github.com/manga-py/manga-py/blob/master/README_RU.md)

View File

@@ -0,0 +1,2 @@
<?xml version="1.0" encoding="utf-8"?>
<browserconfig><msapplication><tile><square70x70logo src="./favicon/ms-icon-70x70.png"/><square150x150logo src="./favicon/ms-icon-150x150.png"/><square310x310logo src="./favicon/ms-icon-310x310.png"/><TileColor>#ffffff</TileColor></tile></msapplication></browserconfig>

View File

@@ -0,0 +1,58 @@
((d) => {
d.addEventListener('DOMContentLoaded', () => {
/** global: repoUrl */
if(typeof repoUrl == 'undefined')
{
// example: https://api.github.com/repos/manga-py/manga-py/releases/latest
// example: https://api.github.com/repos/yuru-yuri/manga-py/releases/latest
return;
}
fetch(repoUrl)
.then(r => r.json())
.then((r) => {
const links = d.querySelector('#download-links');
const tar = links.querySelector('.tar');
const zip = links.querySelector('.zip');
tar.setAttribute('href', r.tarball_url);
tar.setAttribute('active', 'true');
zip.setAttribute('href', r.zipball_url);
zip.setAttribute('active', 'true');
});
const ul = d.querySelector('#supported-list');
if(!ul)
{
return;
}
fetch('./providers.json')
.then(r => r.json())
.then((r) => {
let html = '', m = 0, done = 0;
const sites = [];
for(let i in r) {
if (!r.hasOwnProperty(i)) continue;
m+=1;
html += '<li><input id="I' + m + '" type="checkbox" ' +
(r[i][1] ? 'checked="" ' : '') +
'disabled=""><label for="I' + m + '"></label><span>' +
'<a target="_blank" href="' +
r[i][0] + '">' +
r[i][0] + '</a> ' +
r[i][2] + '</span></li>';
done += r[i][1] ? 1 : 0;
r[i][1] && sites.push(r[i][0]);
}
ul.innerHTML = ('<!-- ' + r.length + ' ( ' + done + ' ) -->') + html;
let sitesLen = sites.length;
const buttonElement = document.querySelector('#random-site');
buttonElement.setAttribute('target', '_blank');
buttonElement.addEventListener('click', () => {
const idx = parseInt(Math.random() * sitesLen);
buttonElement.setAttribute('href', sites[idx]);
return true;
});
});
});
})(document);

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

View File

@@ -0,0 +1,252 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<meta name="keywords" content="manga,manga downloader,download manga,скачать манга,скачать мангу,манга">
<link href="./style.css" media="all" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Roboto&amp;subset=cyrillic-ext,latin-ext" rel="stylesheet">
<title>Universal manga downloader :: Plans for improvement</title>
<link rel="apple-touch-icon" sizes="57x57" href="./favicon/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="./favicon/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="./favicon/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="./favicon/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="./favicon/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="./favicon/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="./favicon/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="./favicon/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="./favicon/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="./favicon/android-icon-192x192.png">
<link rel="icon" type="image/png" sizes="96x96" href="./favicon/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="32x32" href="./favicon/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="./favicon/favicon-16x16.png">
<link rel="manifest" href="./manifest.json">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="./favicon/ms-icon-144x144.png">
<meta name="theme-color" content="#ffffff">
<script>const repoUrl = 'https://api.github.com/repos/__repo_name__/releases/latest';</script>
<script src="download_btns.js"></script>
</head>
<body>
<div class="container">
<div class="header">
<h1 class="panel">Manga downloader</h1>
<h2 class="h2-panel">Plans for improvement</h2>
<div id="download-links" class="download-links">
<a href="https://github.com/__repo_name__/zipball/master">Download .zip</a>
<a href="https://github.com/__repo_name__/tarball/master">Download .tar</a>
</div>
</div>
<ul class="links">
<li>
<a href="https://github.com/__repo_name__/#universal-assistant-download-manga">Documentation</a>
</li>
<li>
<a href="./#resources-list">Resources list</a>
</li>
</ul>
<hr>
<div class="content">
<div class="content-inner-wrapper">
<h4 class="h-ul" data-symbol="1">
Implement providers for the following resources:
</h4>
<ul class="list">
<!--<li>
http://mangabackup.com
</li>
<li>
http://mimotomi.com/manga
</li>
http://tits.asianhotties.me
</li>
<li>
http://mangaboom.com
</li>
<li>
http://eyeonmanga.com
</li>
<li>
http://mg-zip.com
</li>
<li>
http://saikoscans.ml
</li>
<li>
http://mangasmash.com
</li>
<li>
http://maigo.us
</li>
<li>
http://www.friendshipscans.com
http://www.friendshipscans.com/slide/directory/
</li>
<li>
http://www.friendshipscans.com
http://www.friendshipscans.com/slide/directory/
</li>-->
<li>
http://comicride.jp
</li>
<li>
http://exhentai.org
</li>
<li>
http://lhscans.com
</li>
<li>
http://https://imgur.com
<!--http://https://imgur.com/a/f9tpg-->
</li>
<li>
http://mangahost.cc
</li>
<li>
http://manga.madokami.al
<!-- END DAY -->
</li>
<li>
http://3asq.info
</li>
<li>
http://hamtruyen.com
</li>
<li>
http://nettruyen.com
</li>
<li>
http://truyenchon.com
</li>
<li>
http://manga.mexat.com
</li>
<li>
http://hamtruyen.com
</li>
<li>
http://reader.sworddemon-scans.org/directory/
</li>
<li>
http://sworddemon-scans.org
<!--http://sworddemon-scans.org/reader/directory/-->
<!--http://sworddemon-scans.org/knts/directory/-->
</li>
<li>
http://choutensei.260mb.net
<!--http://choutensei.260mb.net/directory/-->
</li>
<!--http://gofansub.net/visor/-->
<!--<li>
http://bmanga.net
&lt;!&ndash;http://bmanga.net/mangas/&ndash;&gt;
</li>-->
<li>
http://yaoimangaonline.com
</li>
</ul>
<h4 class="h-ul" data-symbol="2">Maybe</h4>
<ul>
<li>
http://comic-meteor.jp
</li>
<li>
http://mangaz.com
</li>
<li>
http://comic-polaris.jp
</li>
<li>
http://comic.mag-garden.co.jp
</li>
<li>
http://www.comic-valkyrie.com
</li>
<li>
http://seiga.nicovideo.jp/manga/official/biggangan
</li>
<li>
http://www.alphapolis.co.jp
</li>
<li>
http://urasunday.com/index.html
</li>
<li>
http://comic.naver.com/index.nhn
</li>
<li>
http://comic-walker.com
</li>
<li>
http://lezhin.com
</li>
<li>
http://mangaz.com
</li>
<li>
http://exhentai.org
</li>
<li>
http://gameofscanlation.moe
</li>
<!--<li>
http://kumanga.com
</li>-->
<li>
http://lhtranslation.com
</li>
<li>
http://manga.madokami.al
</li>
<li>
http://corocoro.tv/webmanga/index.html
</li>
<li>
http://s-manga.net
</li>
<li>
http://sunday-webry.com
</li>
<li>
http://lhtranslation.com
</li>
<li>
http://ebookrenta.com
</li>
<li>
http://crunchyroll.com <!-- flash -->
</li>
<li>
http://buenaisla.net - Poor site structure
<!--http://buenaisla.net/temas/categoria-manga-->
</li>
<li>
http://nude-moon.me/
</li>
</ul>
<!--mangadex.com-->
<!--lhtranslation.net-->
<!--https://theporndude.com/ru/hentai-porn-sites - need more sites -->
<h4 class="h-ul" data-symbol="3">
Implement an online resource that allows you to download manga without a PC.
<span class="subcontent"> - Questionable</span>
</h4>
<h4 class="h-ul h-ul-two" data-symbol="3.1">
Perhaps, to make a project on Heroku to implement the previous paragraph.
<span class="subcontent"> - Questionable</span>
</h4>
<h4 class="h-ul" data-symbol="4">
Learn more sites with the ability to read manga online
</h4>
</div>
<div class="last-update">Latest update: __last_update__</div>
</div>
</div>
</body>
</html>

View File

@@ -0,0 +1,85 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<meta name="keywords" content="manga,manga downloader,download manga,скачать манга,скачать мангу,манга">
<link href="./style.css" media="all" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Roboto&amp;subset=cyrillic-ext,latin-ext" rel="stylesheet">
<title>Universal manga downloader</title>
<link rel="apple-touch-icon" sizes="57x57" href="./favicon/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="./favicon/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="./favicon/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="./favicon/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="./favicon/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="./favicon/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="./favicon/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="./favicon/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="./favicon/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="./favicon/android-icon-192x192.png">
<link rel="icon" type="image/png" sizes="32x32" href="./favicon/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="./favicon/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="./favicon/favicon-16x16.png">
<link rel="manifest" href="./manifest.json">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="./favicon/ms-icon-144x144.png">
<meta name="theme-color" content="#ffffff">
<script>const repoUrl = 'https://api.github.com/repos/__repo_name__/releases/latest';</script>
<script src="download_btns.js"></script>
<style>
i.d {color: #4f0800}i.d:after{content: ' - Debug mode'}
i.td {color: #4f0800}i.d:after{content: ' - Need test. Debug mode'}
i.v0 {color: #005}i.v0:after{content: ' - Version 0.x only'}
</style>
</head>
<body>
<div class="container">
<div class="header">
<h1 class="panel">Manga downloader</h1>
<div id="download-links" class="download-links">
<a class="zip" href="https://github.com/__repo_name__/zipball/master">Download .zip</a>
<a class="tar" href="https://github.com/__repo_name__/tarball/master">Download .tar</a>
</div>
</div>
<ul class="links">
<li>
<a href="https://github.com/__repo_name__/#universal-assistant-download-manga">Documentation</a>
</li><li>
<a href="./improvement.html">Plans for improvement</a>
</li><li>
<a href="#random" id="random-site">Go to random site</a>
</li>
</ul>
<hr>
<div class="resources-list">
<a name="resources-list"></a>
<ul class="list" id="supported-list"></ul>
</div>
<div class="questions">
<h2 class="panel">Questions</h2>
<ul>
<li>
<div class="quest">How to offer more resources?</div>
<div class="answer">
Very simple. Write to me at sttv&#45;pc&#64;mail&#46;ru or make a pull-request on &nbsp;
<a target="_blank" href="https://github.com/__repo_name__/pulls"> github.com</a>
</div>
</li>
<li>
<div class="quest">How to help?</div>
<div class="answer">
You can also make a &nbsp;
<a target="_blank" href="https://github.com/__repo_name__/pulls">pull-request</a>,
or create an &nbsp;
<a target="_blank" href="https://github.com/__repo_name__/issues">issue</a>.<br>
You can also crawl more
sites that have a manga online reading available.
</div>
</li>
</ul>
</div>
<div class="last-update">Latest update: __last_update__</div>
</div>
</body>
</html>

View File

@@ -0,0 +1,41 @@
{
"name": "App",
"icons": [
{
"src": ".\/favicon\/android-icon-36x36.png",
"sizes": "36x36",
"type": "image\/png",
"density": "0.75"
},
{
"src": ".\/favicon\/android-icon-48x48.png",
"sizes": "48x48",
"type": "image\/png",
"density": "1.0"
},
{
"src": ".\/favicon\/android-icon-72x72.png",
"sizes": "72x72",
"type": "image\/png",
"density": "1.5"
},
{
"src": ".\/favicon\/android-icon-96x96.png",
"sizes": "96x96",
"type": "image\/png",
"density": "2.0"
},
{
"src": ".\/favicon\/android-icon-144x144.png",
"sizes": "144x144",
"type": "image\/png",
"density": "3.0"
},
{
"src": ".\/favicon\/android-icon-192x192.png",
"sizes": "192x192",
"type": "image\/png",
"density": "4.0"
}
]
}

View File

@@ -0,0 +1 @@
<svg viewBox="0 0 49 41" xmlns="http://www.w3.org/2000/svg"><g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd"><polyline id="Path" stroke="#4291DB" stroke-width="11" points="3.75653148 18.9804052 22.7537424 33.0899986 44.5883171 3.49546922"></polyline></g></svg>

After

Width:  |  Height:  |  Size: 289 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 KiB

View File

@@ -0,0 +1,355 @@
/* http://meyerweb.com/eric/tools/css/reset/
v2.0 | 20110126
License: none (public domain)
*/
html, body, div, span, applet, object, iframe,
h1, h2, h3, h4, h5, h6, p, blockquote, pre,
a, abbr, acronym, address, big, cite, code,
del, dfn, em, img, ins, kbd, q, s, samp,
small, strike, strong, sub, sup, tt, var,
b, u, i, center,
dl, dt, dd, ol, ul, li,
fieldset, form, label, legend,
table, caption, tbody, tfoot, thead, tr, th, td,
article, aside, canvas, details, embed,
figure, figcaption, footer, header, hgroup,
menu, nav, output, ruby, section, summary,
time, mark, audio, video {
margin: 0;
padding: 0;
border: 0;
font-size: 100%;
font: inherit;
vertical-align: baseline;
}
/* HTML5 display-role reset for older browsers */
article, aside, details, figcaption, figure,
footer, header, hgroup, menu, nav, section {
display: block;
}
body {
line-height: 1;
}
ol, ul {
list-style: none;
}
blockquote, q {
quotes: none;
}
blockquote:before, blockquote:after,
q:before, q:after {
content: '';
content: none;
}
table {
border-collapse: collapse;
border-spacing: 0;
}
body {
background: #fff;
font-size: 16px;
font-family: 'Roboto', 'Noto', sans-serif;
}
h1, h2, h3, h4, h5, h6 {
display: block;
&.panel {
border-bottom: 1px solid #eaecef;
padding: .5rem .6rem;
max-width: 31.875rem;
}
}
.header h2.h2-panel {
font-size: 1.9rem;
max-width: 100%;
padding: .5rem .6rem;
}
h1 {
font-weight: bold;
font-size: 2.6rem;
margin-bottom: .2rem;
}
h2 {
font-weight: bold;
font-size: 2.2rem;
}
h3 {
font-weight: bold;
font-size: 1.8rem;
}
h4 {
font-weight: bold;
font-size: 1.4rem;
}
h5 {
font-weight: bold;
font-size: 1.2rem;
}
h6 {
font-size: 1.2rem;
color: #776;
}
a[href] {
color: #7bc7ff;
font-size: 1.1rem;
text-decoration: underline;
&:hover {
text-decoration: none;
}
}
ul.links {
& > li {
line-height: 3rem;
display: inline-flex;
margin-bottom: .5rem;
margin-right: .5rem;
}
a {
line-height: 1.25;
text-align: center;
white-space: nowrap;
vertical-align: middle;
user-select: none;
border: 1px solid #ccc;
padding: .5rem 1rem;
font-size: 1rem;
border-radius: .25rem;
transition: all .2s ease-in-out;
color: #292b2c;
background-color: #fff;
}
}
ul, ul > li {
list-style: none;
}
ul {
padding: 1rem 1rem;
background: #f6f8fa;
}
hr {
padding-bottom: 0.3em;
font-size: 1.5em;
border: 0;
border-bottom: 1px solid #eaecef;
margin: 0;
}
.questions {
font-size: 1.4rem;
li {
padding-left: 1rem;
& + li {
padding-top: 1rem;
}
}
.quest {
font-weight: bold;
padding-bottom: .3rem;
}
}
ul.list li {
& + li {
padding-top: .5rem;
}
a {
color: #63a7d2;
line-height: 1.7rem;
}
$blue: rgba(66, 145, 219, 0.53);
$active-blue: rgba(186, 214, 241, 0.53);
label {
position: relative;
cursor: pointer;
padding-left: 1em;
}
input[type=checkbox] {
display: none;
//position: absolute;
//width: 1px;
//height: 1px;
//border: 0 none;
//background: transparent;
//margin: 0;
//margin-top: 5px;
& + label {
background: #fff no-repeat center left 2px;
display: inline-block;
width: 1rem;
height: 1.25rem;
top: -.01rem;
position: relative;
border-radius: 3px;
border: 2px solid $active-blue;
//transition: border .3s ease;
font-size: 1rem;
box-sizing: border-box;
margin-right: .4rem;
}
&:checked {
& + label {
background-image: url('ok_flag.svg');
background-size: 75%;
border-color: $blue;
}
}
}
}
.header {
position: relative;
}
.download-links {
position: absolute;
top: .5rem;
right: 1rem;
a {
background: url('sprite_download.png');
background-position-y: bottom;
display: inline-block;
width: 90px;
height: 70px;
text-indent: -5000px;
overflow: hidden;
opacity: .9;
& + a {
background-position-x: right;
}
&[active] {
opacity: 1;
}
}
}
.container {
width: 100%;
margin: auto;
background: #f5f7fa;
min-height: 100vh;
}
// Small devices (landscape phones, 576px and up)
@media (max-width: 767px) {
.header h1.panel,
.header h2 {
max-width: 20rem;
max-width: calc(100% - 185px);
font-size: 2.2rem;
}
}
@media (max-width: 360px) {
.header h1.panel {
padding-top: 3rem;
}
}
@media (max-width: 575px) {
.header h1.panel {
max-width: 16rem;
max-width: calc(100% - 165px);
}
.download-links {
right: .1rem;
transform: scale(.9);
}
}
@media (min-width: 576px) {
.container {
width: 550px;
}
}
// Medium devices (tablets, 768px and up)
@media (min-width: 768px) {
.container {
width: 750px;
}
}
// Large devices (desktops, 992px and up)
@media (min-width: 992px) {
.container {
width: 750px;
}
}
// Extra large devices (large desktops, 1200px and up)
@media (min-width: 1200px) {
.container {
width: 750px;
}
}
// touch devices
@media (pointer: coarse) {
ul.list li {
a {
font-size: 1.4rem;
}
& + li {
padding-top: 1rem;
}
}
}
h4[data-symbol] {
margin-left: 1.3rem;
position: relative;
&:after {
font-weight: 500;
font-size: 1.2rem;
opacity: .9;
font-style: normal;
content: attr(data-symbol) ")";
position: absolute;
left: -1rem;
top: 1.05rem;
}
}
.h-ul {
padding: 1rem 1rem 0;
&.h-ul-two {
padding: 1rem 1rem 0 2rem;
}
}
.content-inner-wrapper {
padding-left: .4rem;
}
span.subcontent {
font-size: 1rem;
font-weight: normal;
color: #565656;
}
.last-update {
text-align: center;
font-size: .7rem;
color: #ccc;
padding-bottom: 1rem;
}

View File

@@ -0,0 +1,22 @@
from json import loads
from requests import get
from os import system, path
_path = path.dirname(path.dirname(path.realpath(__file__)))
all_manga_list = None
n = 0
while n < 10:
try:
all_manga_list = loads(get('http://inmanga.com/OnMangaQuickSearch/Source/QSMangaList.json').text)
break
except Exception:
pass
n += 1
if not all_manga_list:
print('Error! QSMangaList is not correct json?')
for i in all_manga_list:
print('Downloading %s' % i['Name'])
_str = 'cd {}; python3 manga.py --cli -i -u http://inmanga.com/ver/manga/{}/{}'
system(_str.format(_path, i['Name'], i['Name'], i['Identification']))

View File

@@ -0,0 +1,161 @@
FROM debian:jessie-slim
# ensure local python is preferred over distribution python
ENV PATH /usr/local/bin:$PATH
# http://bugs.python.org/issue19846
# > At the moment, setting "LANG=C" on a Linux system *fundamentally breaks Python 3*, and that's not OK.
ENV LANG C.UTF-8
# runtime dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
netbase \
&& rm -rf /var/lib/apt/lists/*
ENV GPG_KEY 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D
ENV PYTHON_VERSION 3.6.8
# if this is called "PIP_VERSION", pip explodes with "ValueError: invalid truth value '<VERSION>'"
ENV PYTHON_PIP_VERSION 19.1.1
ARG HOST_UID=1000
ARG HOST_GID=1000
ARG HOST_USER=manga
ARG HOST_GROUP=manga
ARG HOME='/home/manga'
RUN groupadd -g $HOST_GID $HOST_GROUP \
&& groupadd sudonopswd \
&& useradd -m -l -g $HOST_GROUP -u $HOST_UID $HOST_USER
RUN mkdir $HOME -p; \
chown $HOST_USER:$HOST_GROUP $HOME
RUN touch $HOME/.bashrc; \
mkdir $HOME/Manga; \
chown $HOST_USER:$HOST_GROUP $HOME/.bashrc; \
chown $HOST_USER:$HOST_GROUP $HOME/Manga
RUN set -ex \
\
&& savedAptMark="$(apt-mark showmanual)" \
&& apt-get update && apt-get install -y --no-install-recommends \
dpkg-dev \
gcc \
libbz2-dev \
libc6-dev \
libexpat1-dev \
libffi-dev \
libgdbm-dev \
liblzma-dev \
libncursesw5-dev \
libreadline-dev \
libsqlite3-dev \
libssl-dev \
make \
tk-dev \
build-essential \
wget \
xz-utils \
zlib1g-dev \
curl \
# python autocomplete utilite
python3-argcomplete \
# as of Stretch, "gpg" is no longer included by default
$(command -v gpg > /dev/null || echo 'gnupg dirmngr') \
\
&& wget -O python.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz" \
&& wget -O python.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc" \
&& export GNUPGHOME="$(mktemp -d)" \
&& gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$GPG_KEY" \
&& gpg --batch --verify python.tar.xz.asc python.tar.xz \
&& { command -v gpgconf > /dev/null && gpgconf --kill all || :; } \
&& rm -rf "$GNUPGHOME" python.tar.xz.asc \
&& mkdir -p /usr/src/python \
&& tar -xJC /usr/src/python --strip-components=1 -f python.tar.xz \
&& rm python.tar.xz \
\
&& cd /usr/src/python \
&& gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)" \
&& ./configure \
--build="$gnuArch" \
--enable-loadable-sqlite-extensions \
--enable-shared \
--with-system-expat \
--with-system-ffi \
--without-ensurepip \
&& make -j "$(nproc)" \
&& make install \
&& ldconfig \
\
&& apt-mark auto '.*' > /dev/null \
&& apt-mark manual $savedAptMark \
&& find /usr/local -type f -executable -not \( -name '*tkinter*' \) -exec ldd '{}' ';' \
| awk '/=>/ { print $(NF-1) }' \
| sort -u \
| xargs -r dpkg-query --search \
| cut -d: -f1 \
| sort -u \
| xargs -r apt-mark manual; \
\
wget -O get-pip.py 'https://bootstrap.pypa.io/get-pip.py'; \
\
python3 get-pip.py \
--disable-pip-version-check \
--no-cache-dir \
"pip==$PYTHON_PIP_VERSION" \
; \
python3 -mpip --version \
\
&& python3 --version \
# Because it requires gcc
&& python3 -mpip install manga-py -U \
&& curl -sL https://deb.nodesource.com/setup_12.x | bash - \
&& apt-get install -y nodejs \
&& apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false \
&& rm -rf /var/lib/apt/lists/* \
\
&& find /usr/local -depth \
\( \
\( -type d -a \( -name test -o -name tests \) \) \
-o \
\( -type f -a \( -name '*.pyc' -o -name '*.pyo' \) \) \
\) -exec rm -rf '{}' + \
&& rm -rf /usr/src/python
# make some useful symlinks that are expected to exist
RUN cd /usr/local/bin \
&& ln -s idle3 idle \
&& ln -s pydoc3 pydoc \
&& ln -s python3 python \
&& ln -s python3-config python-config
RUN set -ex; \
\
savedAptMark="$(apt-mark showmanual)"; \
\
apt-mark auto '.*' > /dev/null; \
[ -z "$savedAptMark" ] || apt-mark manual $savedAptMark; \
rm -rf /var/lib/apt/lists/*; \
apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \
\
find /usr/local -depth \
\( \
\( -type d -a \( -name test -o -name tests \) \) \
-o \
\( -type f -a \( -name '*.pyc' -o -name '*.pyo' \) \) \
\) -exec rm -rf '{}' +; \
rm -f get-pip.py
USER $HOST_USER
WORKDIR $HOME
RUN echo 'Manga-py version: '; \
manga-py --version; \
rm -rf /tmp/.P*
# docker run -it -v /tmp/destination:/home/manga mangadl/manga-py
CMD ["bash"]

View File

@@ -0,0 +1,8 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from manga_py import main
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,103 @@
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import traceback
from atexit import register as atexit_register
from json import dumps
from os import makedirs, path
from shutil import rmtree
from sys import exit, stderr
try:
from loguru import logger
catch = logger.catch
except ImportError:
def catch(x):
print('Setup in progress?')
try:
from .cli import Cli
from .cli.args import get_cli_arguments
from .fs import get_temp_path, get_info
from .info import Info
from .meta import __version__
except ImportError:
print('Setup in progress?', file=stderr)
__author__ = 'Sergey Zharkov'
__license__ = 'MIT'
__email__ = 'sttv-pc@mail.ru'
@atexit_register
def before_shutdown():
temp_dir = get_temp_path()
path.isdir(temp_dir) and rmtree(temp_dir)
def _init_cli(args, _info):
error_lvl = -5
try:
_info.start()
cli_mode = Cli(args, _info)
cli_mode.start()
code = 0
except Exception as e:
traceback.print_tb(e.__traceback__, error_lvl, file=stderr)
code = 1
_info.set_error(e)
return code
def _run_util(args) -> tuple:
parse_args = args.parse_args()
_info = Info(parse_args)
code = _init_cli(args, _info)
if parse_args.print_json:
_info = dumps(
_info.get(),
indent=2,
separators=(',', ': '),
sort_keys=True,
)
else:
_info = []
return code, _info
def _update_all(args):
parse_args = args.parse_args()
parse_args.quiet or print('Update all', file=stderr)
multi_info = {}
dst = parse_args.destination
json_info = get_info(dst)
for i in json_info:
parse_args.manga_name = i['manga_name']
parse_args.url = i['url']
code, _info = _run_util(args)
multi_info[i['directory']] = _info
parse_args.quiet or (parse_args.print_json and print(multi_info))
@catch
def main():
# if ~__version__.find('alpha'):
# print('Alpha release! There may be errors!', file=stderr)
temp_path = get_temp_path()
path.isdir(temp_path) or makedirs(temp_path)
args = get_cli_arguments()
parse_args = args.parse_args()
code, _info = _run_util(args)
parse_args.quiet or (parse_args.print_json and print(_info))
exit(code)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,8 @@
from .abstract import Abstract
from .archive import Archive
from .base import Base
from .callbacks import Callbacks
from .cf_protect import CloudFlareProtect
from .chapter_helper import ChapterHelper
from .static import Static
from .web_driver import WebDriver

View File

@@ -0,0 +1,51 @@
from abc import abstractmethod
class Abstract:
@abstractmethod
def get_main_content(self): # call once
pass
@abstractmethod
def get_manga_name(self) -> str: # call once
return ''
@abstractmethod
def get_chapters(self) -> list: # call once
return []
def prepare_cookies(self): # if site with cookie protect
pass
@abstractmethod
def get_files(self) -> list: # call ever volume loop
return []
# @abstractmethod
# def get_archive_name(self) -> str:
# pass
# for chapters selected by manual (cli)
@abstractmethod
def get_chapter_index(self) -> str:
pass
def book_meta(self) -> dict:
pass
def before_download_chapter(self):
pass
def get_cover(self):
pass
def before_file_save(self, url, idx) -> str: # return url !
return url
def after_file_save(self, _path: str, idx: int):
pass
@abstractmethod
def chapter_for_json(self) -> str:
pass

View File

@@ -0,0 +1,82 @@
from os import path
from zipfile import ZipFile, ZIP_DEFLATED
from manga_py.fs import is_file, make_dirs, basename, dirname, unlink, get_temp_path
# from PIL import Image as PilImage
from manga_py.image import Image
class Archive:
_archive = None
_writes = None
files = None
not_change_files_extension = False
no_webp = False
has_error = False
def __init__(self):
self.files = []
self._writes = {}
def write_file(self, data, in_arc_name):
self._writes[in_arc_name] = data
def add_file(self, file, in_arc_name=None):
if in_arc_name is None:
in_arc_name = basename(file)
self.files.append((file, in_arc_name))
def set_files_list(self, files):
self.files = files
def add_book_info(self, data):
self.write_file('comicbook.xml', data)
def __add_files(self):
for file in self.files:
if is_file(file[0]):
ext = self.__update_image_extension(file[0])
if self.no_webp and ext[ext.rfind('.'):] == '.webp':
jpeg = ext[:ext.rfind('.')] + '.jpeg'
jpeg_path = path.join(dirname(file[0]), jpeg)
Image(file[0]).convert(jpeg_path)
file = jpeg_path, jpeg
elif ext:
file = file[0], ext
self._archive.write(*file)
def __add_writes(self):
for file in self._writes:
self._archive.writestr(file, self._writes[file])
def add_info(self, data):
self.write_file(data, 'info.txt')
def make(self, dst):
if not len(self.files) and not len(self._writes):
return
make_dirs(dirname(dst))
self._archive = ZipFile(dst, 'w', ZIP_DEFLATED)
try:
self.__add_files()
self.__add_writes()
self._archive.close()
except OSError as e:
self._archive.close()
raise e
self._archive.close()
self._maked()
def _maked(self):
for file in self.files:
unlink(file[0])
def __update_image_extension(self, filename) -> str:
fn, extension = path.splitext(filename)
if not self.not_change_files_extension:
ext = Image.real_extension(get_temp_path(filename))
if ext:
extension = ext
return basename(fn + extension)

View File

@@ -0,0 +1,157 @@
import re
from os import path
from sys import stderr
from loguru import logger
from lxml.html import HtmlElement
from manga_py.http import Http
from manga_py.image import Image
class Base:
_storage = None
_params = None
_image_params = None
_http_kwargs = None
__http = None
def __init__(self):
self._storage = {
'cookies': {},
'main_content': None,
'chapters': [],
'current_chapter': 0,
'current_file': 0,
'proxies': {},
'domain_uri': None,
}
self._params = {
'destination': 'Manga',
'cf-protect': False,
}
self._image_params = {
'crop': (0, 0, 0, 0),
# 'crop': (left, upper, right, lower)
'auto_crop': False,
# 'auto_crop': True,
}
self._http_kwargs = {}
def _archive_type(self):
arc_type = 'zip'
if self._params['cbz']:
arc_type = 'cbz'
return arc_type
def get_url(self):
return self._params['url']
@property
def domain(self) -> str:
try:
if not self._storage.get('domain_uri', None):
self._storage['domain_uri'] = re.search('(https?://[^/]+)', self._params['url']).group(1)
return self._storage.get('domain_uri', '')
except Exception:
print('url is broken!', file=stderr)
exit()
@staticmethod
def image_auto_crop(src_path, dest_path=None):
image = Image(src_path=src_path)
image.crop_auto(dest_path=dest_path)
image.close()
def image_manual_crop(self, src_path, dest_path=None): # sizes: (left, top, right, bottom)
if isinstance(self._image_params['crop'], tuple) != (0, 0, 0, 0):
image = Image(src_path=src_path)
image.crop_manual_with_offsets(offsets=self._image_params['crop'], dest_path=dest_path)
image.close()
def _build_http_params(self, params):
if params is None:
params = {}
params.setdefault('allow_webp', not self._params.get('disallow_webp', None))
params.setdefault('referer', self._storage.get('referer', self.domain))
params.setdefault('user_agent', self._get_user_agent())
params.setdefault('proxies', self._storage.get('proxies', None))
params.setdefault('cookies', self._storage.get('cookies', None))
params.setdefault('kwargs', self._http_kwargs)
return params
def http(self, new=False, params=None) -> Http:
http_params = self._build_http_params(params)
if new:
http = Http(**http_params)
return http
elif not self.__http:
self.__http = Http(**http_params)
return self.__http
def http_get(self, url: str, headers: dict = None, cookies: dict = None):
return self.http().get(url=url, headers=headers, cookies=cookies)
def http_post(self, url: str, headers: dict = None, cookies: dict = None, data=()):
return self.http().post(url=url, headers=headers, cookies=cookies, data=data)
def _get_user_agent(self):
ua_storage = self._storage.get('user_agent', None)
ua_params = self._params.get('user_agent', None)
if self._params.get('cf_protect', False):
return ua_storage
return ua_params
@property
def chapter_id(self):
return self._storage.get('current_chapter', 0)
@chapter_id.setter
def chapter_id(self, idx):
self._storage['current_chapter'] = idx
@classmethod
def __normalize_chapters(cls, n, element):
if isinstance(element, HtmlElement):
return n(element.get('href'))
if isinstance(element, str):
return n(element)
return element
def _prepare_chapters(self, chapters):
n = self.http().normalize_uri
items = []
if chapters and len(chapters):
for i in chapters:
url = self.__normalize_chapters(n, i)
items.append(url)
else:
logger.warning('Chapters list empty. Check %s' % self.get_url())
return items
@property
def chapter(self):
return self._storage['chapters'][self.chapter_id]
def get_current_file(self):
return self._storage['files'][self._storage['current_file']]
def book_meta(self) -> dict:
return {}
def _image_name(self, idx, filename):
if idx is None:
idx = self._storage['current_file']
fn, extension = path.splitext(filename)
_path = '{:0>3}_{}'.format(idx, fn)
if self._params['rename_pages']:
_path = '{:0>3}'.format(idx)
return _path + extension
def chapter_for_json(self) -> str:
return self.chapter
def put_info_json(self, meta):
# manga_name, url, directory
pass

View File

@@ -0,0 +1,35 @@
from typing import Callable
class Callbacks:
def _call_files_progress_callback(self):
if callable(self.progress):
_max, _current = len(self._storage['files']), self._storage['current_file']
self.progress(_max, _current, _current < 1)
def set_quest_callback(self, callback: Callable): # Required call from initiator (CLI, GUI)
setattr(self, 'quest', callback)
def set_progress_callback(self, callback: Callable): # Required call from initiator (CLI, GUI)
setattr(self, 'progress', callback)
def set_log_callback(self, callback: Callable): # Required call from initiator (CLI, GUI)
setattr(self, 'log', callback)
def set_quest_password_callback(self, callback: Callable): # Required call from iterator (CLI, GUI)
setattr(self, 'quest_password', callback)
def quest(self, *args, **kwargs):
pass
def quest_password(self, *args, **kwargs):
pass
def progress(self, *args, **kwargs):
pass
def log(self, *args, **kwargs):
pass
def book_meta(self) -> dict:
return {}

View File

@@ -0,0 +1,18 @@
from sys import stderr
import cloudscraper
class CloudFlareProtect:
protector = []
def run(self, url): # pragma: no cover
if not self.protector:
scraper = cloudscraper.create_scraper()
try:
self.protector = scraper.get_tokens(url)
except Exception as e:
print('CF error! %s' % e, file=stderr)
return self.protector

View File

@@ -0,0 +1,15 @@
# cli chapters parser
class ChapterHelper:
chapters = ''
def __init__(self, chapters: str):
self.chapters = chapters
if isinstance(self.chapters, str):
self.chapters = self.chapters.split(' ')
def get_chapters(self, urls):
chapters = []
for i, url in enumerate(urls):
if i in self.chapters:
chapters.append(urls)
return chapters

View File

@@ -0,0 +1,41 @@
from lxml.html import document_fromstring
from purifier.purifier import HTMLPurifier
class Static:
@staticmethod
def _clear_html(body):
purifier = HTMLPurifier({
'div': ['*'], 'span': ['*'],
'img': ['*'], 'a': ['*'],
'h1': ['*'], 'h2': ['*'],
'h3': ['*'], 'h4': ['*'],
'h5': ['*'], 'h6': ['*'],
})
return purifier.feed(body)
@staticmethod
def document_fromstring(body, selector: str = None, idx: int = None): # pragma: no cover
result = document_fromstring(body) # todo
if isinstance(selector, str):
result = result.cssselect(selector)
if isinstance(idx, int):
result = result[idx]
return result
@staticmethod
def _set_if_not_none(var, key, value): # pragma: no cover
if value is not None:
var[key] = value
@staticmethod
def __test_ascii(i):
o = ord(i)
_ = 39 < o < 127
_ = _ and o not in [42, 47, 92, 94]
return _ or o > 161
@staticmethod
def remove_not_ascii(value):
return "".join(i for i in value if i == '_' or Static.__test_ascii(i))

View File

@@ -0,0 +1,48 @@
from os import chmod
from sys import platform
from zipfile import ZipFile
from requests import get
from manga_py.fs import is_file, dirname, path_join, get_util_home_path
class WebDriver:
driver_version = '2.40'
@staticmethod
def is_win():
return ~platform.find('win32')
def download_drivder(self):
url_prefix = 'https://chromedriver.storage.googleapis.com/'
url = '/chromedriver_linux64.zip'
if ~platform.find('darwin'):
url = '/chromedriver_mac64.zip'
if self.is_win():
url = '/chromedriver_win32.zip'
path = path_join(get_util_home_path(), 'driver.zip')
with open(path, 'wb') as driver:
driver.write(get(url_prefix + self.driver_version + url).content)
driver.close()
with ZipFile(path) as file:
file.extractall(dirname(self._driver_path()))
def _driver_path(self):
if self.is_win():
driver = 'chromedriver.exe'
else:
driver = 'chromedriver'
return path_join(get_util_home_path(), driver)
def get_driver(self):
from selenium import webdriver # need, if captcha detected
driver_path = self._driver_path()
if not is_file(driver_path):
self.download_drivder()
self.is_win() or chmod(driver_path, 0o755)
driver = webdriver.Chrome(executable_path=driver_path)
driver.set_window_size(500, 600)
return driver

View File

@@ -0,0 +1,95 @@
import sys
from argparse import ArgumentParser
from getpass import getpass
from os import name as os_name
from progressbar import ProgressBar
from manga_py.fs import check_free_space, get_temp_path
from manga_py.parser import Parser
class Cli: # pragma: no cover
args = None
parser = None
_info = None
__progress_bar = None
def __init__(self, args: ArgumentParser, info=None):
self.args = args.parse_args()
self.parser = Parser(args)
self._info = info
space = self.args.min_free_space
if not check_free_space(get_temp_path(), space) or not check_free_space(self.args.destination, space):
raise OSError('No space left on device')
def start(self):
try:
self.parser.init_provider(
progress=self.progress,
log=self.print,
quest=self.quest,
quest_password=self.quest_password,
info=self._info,
)
except AttributeError as e:
print(e, file=sys.stderr)
print('Please check if your inputed domain is supported by manga-py: ', file=sys.stderr)
print('- https://manga-py.com/manga-py/#resources-list', file=sys.stderr)
print('- https://manga-py.github.io/manga-py/#resources-list (alternative)', file=sys.stderr)
print('- https://yuru-yuri.github.io/manga-py/ (deprecated)', file=sys.stderr)
print('Make sure that your inputed URL is correct\n\nTrace:', file=sys.stderr)
raise e
self.parser.start()
self.__progress_bar and self.__progress_bar.value > 0 and self.__progress_bar.finish()
self.args.quiet or self.print(' ')
def __init_progress(self, items_count: int, re_init: bool):
if re_init or not self.__progress_bar:
if re_init:
self.__progress_bar.finish()
bar = ProgressBar()
self.__progress_bar = bar(range(items_count))
self.__progress_bar.init()
def progress(self, items_count: int, current_item: int, re_init: bool = False):
if not items_count:
return
if not self.args.no_progress and not self.args.print_json:
current_val = 0
if self.__progress_bar:
current_val = self.__progress_bar.value
self.__init_progress(items_count, re_init and current_val > 0)
self.__progress_bar.update(current_item)
def print(self, text, **kwargs):
if os_name == 'nt':
text = str(text).encode().decode(sys.stdout.encoding, 'ignore')
self.args.quiet or print(text, **kwargs)
def _single_quest(self, variants, title):
self.print(title)
for v in variants:
self.print(v)
return input()
def _multiple_quest(self, variants, title):
self.print('Accept - blank line + enter')
self.print(title)
for v in variants:
self.print(v)
result = []
while True:
_ = input().strip()
if not len(_):
return result
result.append(_)
def quest(self, variants: enumerate, title: str, select_type=0): # 0 = single, 1 = multiple
if select_type:
return self._multiple_quest(variants, title)
return self._single_quest(variants, title)
def quest_password(self, title):
return getpass(title)

View File

@@ -0,0 +1,338 @@
'''
manga-py module for CLI and its options.
'''
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, RawDescriptionHelpFormatter
from manga_py.meta import __version__
class DescriptionDefaultsHelpFormatter(ArgumentDefaultsHelpFormatter,
RawDescriptionHelpFormatter):
'''
Class to format --help cli option with 2 features to output:
programm's description in a raw mode,
options default values.
'''
def _image_args(args_parser): # pragma: no cover
args = args_parser.add_argument_group('Image options')
args.add_argument(
'-E',
'--not-change-files-extension',
action='store_true',
help=(
'Save downloaded files to archive "as is".'
)
)
args.add_argument(
'-W',
'--no-webp',
action='store_true',
help=(
'Convert `*.webp` images to `*.jpg` format.'
)
)
def _debug_args(args_parser): # pragma: no cover
args = args_parser.add_argument_group('Debug / Simulation options')
args.add_argument(
'-h',
'--help',
action='help',
help=(
'Show this help and exit.'
)
)
args.add_argument(
'-j',
'--print-json',
action='store_true',
help=(
'Print information about the results in the JSON format (after completion).'
)
)
args.add_argument(
'-l',
'--simulate',
action='store_true',
help=(
'Simulate running %(prog)s, where: '
'1) do not download files and, '
'2) do not write anything on disk.'
)
)
args.add_argument(
'-i',
'--show-current-chapter-info',
action='store_true',
help=(
'Show current processing chapter info.'
)
)
args.add_argument(
'-b',
'--debug',
action='store_true',
help=(
'Debug %(prog)s.'
)
)
args.add_argument(
'-q',
'--quiet',
action='store_true',
help=(
'Dont show any messages.'
)
)
def _downloading_args(args_parser): # pragma: no cover
args = args_parser.add_argument_group('Downloading options')
args.add_argument(
'-s',
'--skip-volumes',
metavar='COUNT',
type=int,
help=(
'Skip a total number, i.e. %(metavar)s, of volumes.'
),
default=0
)
args.add_argument(
'-m',
'--max-volumes',
metavar='COUNT',
type=int,
default=0,
help=(
'Download a maximum number, i.e. %(metavar)s, of volumes. '
'E.g.: `--max-volumes 2` will download at most 2 volumes. '
'If %(metavar)s is `0` (zero) then it will download all available volumes.'
)
)
args.add_argument(
'-a',
'--user-agent',
type=str,
help=(
'Set an user-agent. '
'Don\'t work from protected sites.'
)
)
args.add_argument(
'-x',
'--proxy',
type=str,
help=(
'Set a http proxy.'
)
)
args.add_argument(
'-e',
'--reverse-downloading',
action='store_true',
help=(
'Download manga volumes in a reverse order. '
'By default, manga is downloaded in ascendent order '
'(i.e. volume 00, volume 01, volume 02...). '
'If `--reverse-downloading` is actived, then manga is downloaded in descendent order '
'(i.e. volume 99, volume 98, volume 97...).'
)
)
args.add_argument(
'-w',
'--rewrite-exists-archives',
action='store_true',
help=(
'(Re)Download manga volume if it already exists locally in the directory destination. '
'Your manga files can be overwrited, so be careful.'
)
)
args.add_argument(
'-t',
'--max-threads',
type=int,
default=None,
help=(
'Set the maximum number of threads, i.e. MAX_THREADS, to be avaliable to manga-py. '
'Threads run in pseudo-parallel when execute the process to download the manga images.'
)
)
args.add_argument(
'-f',
'--zero-fill',
action='store_true',
help=(
'Pad a `-0` (dash-and-zero) at right for all downloaded manga volume filenames. '
'E.g. from `vol_001.zip` to `vol_001-0.zip`. '
'It is useful to standardize the filenames between: '
'1) normal manga volumes (e.g. vol_006.zip) and, '
'2) abnormal manga volumes (e.g. vol_006-5.zip). '
'An abnormal manga volume is a released volume like: '
'extra chapters, '
'bonuses, '
'updated, '
'typos corrected, '
'spelling errors corrected; '
'and so on.'
)
)
args.add_argument(
'-g',
'--with-manga-name',
action='store_true',
help=(
'Pad the manga name at left for all downloaded manga volumes filenames. '
'E.g. from `vol_001.zip` to `manga_name-vol_001.zip`.'
)
)
args.add_argument(
'-o',
'--override-archive-name',
metavar='ARCHIVE_NAME',
type=str,
default='',
dest='override_archive_name',
help=(
'Pad %(metavar)s at left for all downloaded manga volumes filename. '
'E.g from `vol_001.zip` to `%(metavar)s-vol_001.zip`.'
)
)
args.add_argument(
'-c',
'--min-free-space',
metavar='MB',
type=int,
default=100,
help=(
'Alert when the minimum free disc space, i.e. MB, is reached. '
'Insert it in order of megabytes (Mb).'
)
)
def _reader_args(args_parser): # pragma: no cover
args = args_parser.add_argument_group('Archive options')
args.add_argument(
'-z',
'--cbz',
action='store_true',
help=(
'Make `*.cbz` archives (for reader).'
)
)
args.add_argument(
'-r',
'--rename-pages',
action='store_true',
help=(
'Normalize image filenames. '
'E.g. from `0_page_1.jpg` to `0001.jpg`.'
)
)
def get_cli_arguments() -> ArgumentParser: # pragma: no cover
'''
Method to generate manga-py CLI with its options.
'''
args_parser = ArgumentParser(
add_help=False,
formatter_class=DescriptionDefaultsHelpFormatter,
prog="manga-py",
description=(
'%(prog)s is the universal manga downloader (for your offline reading).\n '
'Site: https://manga-py.com/manga-py/\n '
'Source-code: https://github.com/manga-py/manga-py\n '
'Version: ' + __version__
),
epilog=(
'So, that is how %(prog)s can be executed to download yours favourite mangas.\n'
'Enjoy! 😉'
)
)
args = args_parser.add_argument_group('General options')
args.add_argument(
'url',
metavar='URL',
type=str,
help=(
'%(metavar)s, i.e. link from manga, to be downloaded.'
)
)
args.add_argument(
'-v',
'--version',
action='version',
version=__version__,
help=(
'Show %(prog)s\'s version number and exit.'
)
)
args.add_argument(
'-n',
'--name',
metavar='NAME',
type=str,
default='',
help=(
'Rename manga, i.e. by %(metavar)s, and its folder to where it will be saved locally.'
)
)
args.add_argument(
'-d',
'--destination',
metavar='PATH',
type=str,
default='Manga',
help=(
'Destination folder to where the manga will be saved locally. '
'The path will be `./%(metavar)s/manga_name/`.'
)
)
args.add_argument(
'-P',
'--no-progress',
action='store_true',
help=(
'Don\'t show progress bar.'
)
)
_image_args(args_parser)
_reader_args(args_parser)
_downloading_args(args_parser)
_debug_args(args_parser)
return args_parser

View File

@@ -0,0 +1,8 @@
from .ac_qq_com import AcQqComCrypt
from .base_lib import BaseLib
from .kissmanga_com import KissMangaComCrypt
from .mangago_me import MangaGoMe
from .mangarock_com import MangaRockComCrypt
from .manhuagui_com import ManhuaGuiComCrypt
from .puzzle import Puzzle
from .sunday_webry_com import SundayWebryCom

View File

@@ -0,0 +1,38 @@
class AcQqComCrypt:
_provider = None
_site_key = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
def __init__(self, provider):
self._provider = provider
def decode(self, data):
data = self._provider.re.sub('[^A-Za-z0-9%+/=]', '', data)
a = ''
e = 0
while e < len(data) - 4:
e += 1
b = self._site_key.find(data[e])
e += 1
d = self._site_key.find(data[e])
e += 1
f = self._site_key.find(data[e])
e += 1
g = self._site_key.find(data[e])
b = b << 2 | d >> 4
d = (d & 15) << 4 | f >> 2
h = (f & 3) << 6 | g
a += chr(b)
if f != 64:
a += chr(d)
if g != 64:
a += chr(h)
return self._protect(a)
def _protect(self, data):
try:
data = self._provider.re.search('({.+}})', data).group(1)
return self._provider.json.loads(data)
except Exception:
return {}

View File

@@ -0,0 +1,35 @@
/*
CryptoJS v3.1.2
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
var CryptoJS=CryptoJS||function(u,p){var d={},l=d.lib={},s=function(){},t=l.Base={extend:function(a){s.prototype=this;var c=new s;a&&c.mixIn(a);c.hasOwnProperty("init")||(c.init=function(){c.$super.init.apply(this,arguments)});c.init.prototype=c;c.$super=this;return c},create:function(){var a=this.extend();a.init.apply(a,arguments);return a},init:function(){},mixIn:function(a){for(var c in a)a.hasOwnProperty(c)&&(this[c]=a[c]);a.hasOwnProperty("toString")&&(this.toString=a.toString)},clone:function(){return this.init.prototype.extend(this)}},
r=l.WordArray=t.extend({init:function(a,c){a=this.words=a||[];this.sigBytes=c!=p?c:4*a.length},toString:function(a){return(a||v).stringify(this)},concat:function(a){var c=this.words,e=a.words,j=this.sigBytes;a=a.sigBytes;this.clamp();if(j%4)for(var k=0;k<a;k++)c[j+k>>>2]|=(e[k>>>2]>>>24-8*(k%4)&255)<<24-8*((j+k)%4);else if(65535<e.length)for(k=0;k<a;k+=4)c[j+k>>>2]=e[k>>>2];else c.push.apply(c,e);this.sigBytes+=a;return this},clamp:function(){var a=this.words,c=this.sigBytes;a[c>>>2]&=4294967295<<
32-8*(c%4);a.length=u.ceil(c/4)},clone:function(){var a=t.clone.call(this);a.words=this.words.slice(0);return a},random:function(a){for(var c=[],e=0;e<a;e+=4)c.push(4294967296*u.random()|0);return new r.init(c,a)}}),w=d.enc={},v=w.Hex={stringify:function(a){var c=a.words;a=a.sigBytes;for(var e=[],j=0;j<a;j++){var k=c[j>>>2]>>>24-8*(j%4)&255;e.push((k>>>4).toString(16));e.push((k&15).toString(16))}return e.join("")},parse:function(a){for(var c=a.length,e=[],j=0;j<c;j+=2)e[j>>>3]|=parseInt(a.substr(j,
2),16)<<24-4*(j%8);return new r.init(e,c/2)}},b=w.Latin1={stringify:function(a){var c=a.words;a=a.sigBytes;for(var e=[],j=0;j<a;j++)e.push(String.fromCharCode(c[j>>>2]>>>24-8*(j%4)&255));return e.join("")},parse:function(a){for(var c=a.length,e=[],j=0;j<c;j++)e[j>>>2]|=(a.charCodeAt(j)&255)<<24-8*(j%4);return new r.init(e,c)}},x=w.Utf8={stringify:function(a){try{return decodeURIComponent(escape(b.stringify(a)))}catch(c){throw Error("Malformed UTF-8 data");}},parse:function(a){return b.parse(unescape(encodeURIComponent(a)))}},
q=l.BufferedBlockAlgorithm=t.extend({reset:function(){this._data=new r.init;this._nDataBytes=0},_append:function(a){"string"==typeof a&&(a=x.parse(a));this._data.concat(a);this._nDataBytes+=a.sigBytes},_process:function(a){var c=this._data,e=c.words,j=c.sigBytes,k=this.blockSize,b=j/(4*k),b=a?u.ceil(b):u.max((b|0)-this._minBufferSize,0);a=b*k;j=u.min(4*a,j);if(a){for(var q=0;q<a;q+=k)this._doProcessBlock(e,q);q=e.splice(0,a);c.sigBytes-=j}return new r.init(q,j)},clone:function(){var a=t.clone.call(this);
a._data=this._data.clone();return a},_minBufferSize:0});l.Hasher=q.extend({cfg:t.extend(),init:function(a){this.cfg=this.cfg.extend(a);this.reset()},reset:function(){q.reset.call(this);this._doReset()},update:function(a){this._append(a);this._process();return this},finalize:function(a){a&&this._append(a);return this._doFinalize()},blockSize:16,_createHelper:function(a){return function(b,e){return(new a.init(e)).finalize(b)}},_createHmacHelper:function(a){return function(b,e){return(new n.HMAC.init(a,
e)).finalize(b)}}});var n=d.algo={};return d}(Math);
(function(){var u=CryptoJS,p=u.lib.WordArray;u.enc.Base64={stringify:function(d){var l=d.words,p=d.sigBytes,t=this._map;d.clamp();d=[];for(var r=0;r<p;r+=3)for(var w=(l[r>>>2]>>>24-8*(r%4)&255)<<16|(l[r+1>>>2]>>>24-8*((r+1)%4)&255)<<8|l[r+2>>>2]>>>24-8*((r+2)%4)&255,v=0;4>v&&r+0.75*v<p;v++)d.push(t.charAt(w>>>6*(3-v)&63));if(l=t.charAt(64))for(;d.length%4;)d.push(l);return d.join("")},parse:function(d){var l=d.length,s=this._map,t=s.charAt(64);t&&(t=d.indexOf(t),-1!=t&&(l=t));for(var t=[],r=0,w=0;w<
l;w++)if(w%4){var v=s.indexOf(d.charAt(w-1))<<2*(w%4),b=s.indexOf(d.charAt(w))>>>6-2*(w%4);t[r>>>2]|=(v|b)<<24-8*(r%4);r++}return p.create(t,r)},_map:"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="}})();
(function(u){function p(b,n,a,c,e,j,k){b=b+(n&a|~n&c)+e+k;return(b<<j|b>>>32-j)+n}function d(b,n,a,c,e,j,k){b=b+(n&c|a&~c)+e+k;return(b<<j|b>>>32-j)+n}function l(b,n,a,c,e,j,k){b=b+(n^a^c)+e+k;return(b<<j|b>>>32-j)+n}function s(b,n,a,c,e,j,k){b=b+(a^(n|~c))+e+k;return(b<<j|b>>>32-j)+n}for(var t=CryptoJS,r=t.lib,w=r.WordArray,v=r.Hasher,r=t.algo,b=[],x=0;64>x;x++)b[x]=4294967296*u.abs(u.sin(x+1))|0;r=r.MD5=v.extend({_doReset:function(){this._hash=new w.init([1732584193,4023233417,2562383102,271733878])},
_doProcessBlock:function(q,n){for(var a=0;16>a;a++){var c=n+a,e=q[c];q[c]=(e<<8|e>>>24)&16711935|(e<<24|e>>>8)&4278255360}var a=this._hash.words,c=q[n+0],e=q[n+1],j=q[n+2],k=q[n+3],z=q[n+4],r=q[n+5],t=q[n+6],w=q[n+7],v=q[n+8],A=q[n+9],B=q[n+10],C=q[n+11],u=q[n+12],D=q[n+13],E=q[n+14],x=q[n+15],f=a[0],m=a[1],g=a[2],h=a[3],f=p(f,m,g,h,c,7,b[0]),h=p(h,f,m,g,e,12,b[1]),g=p(g,h,f,m,j,17,b[2]),m=p(m,g,h,f,k,22,b[3]),f=p(f,m,g,h,z,7,b[4]),h=p(h,f,m,g,r,12,b[5]),g=p(g,h,f,m,t,17,b[6]),m=p(m,g,h,f,w,22,b[7]),
f=p(f,m,g,h,v,7,b[8]),h=p(h,f,m,g,A,12,b[9]),g=p(g,h,f,m,B,17,b[10]),m=p(m,g,h,f,C,22,b[11]),f=p(f,m,g,h,u,7,b[12]),h=p(h,f,m,g,D,12,b[13]),g=p(g,h,f,m,E,17,b[14]),m=p(m,g,h,f,x,22,b[15]),f=d(f,m,g,h,e,5,b[16]),h=d(h,f,m,g,t,9,b[17]),g=d(g,h,f,m,C,14,b[18]),m=d(m,g,h,f,c,20,b[19]),f=d(f,m,g,h,r,5,b[20]),h=d(h,f,m,g,B,9,b[21]),g=d(g,h,f,m,x,14,b[22]),m=d(m,g,h,f,z,20,b[23]),f=d(f,m,g,h,A,5,b[24]),h=d(h,f,m,g,E,9,b[25]),g=d(g,h,f,m,k,14,b[26]),m=d(m,g,h,f,v,20,b[27]),f=d(f,m,g,h,D,5,b[28]),h=d(h,f,
m,g,j,9,b[29]),g=d(g,h,f,m,w,14,b[30]),m=d(m,g,h,f,u,20,b[31]),f=l(f,m,g,h,r,4,b[32]),h=l(h,f,m,g,v,11,b[33]),g=l(g,h,f,m,C,16,b[34]),m=l(m,g,h,f,E,23,b[35]),f=l(f,m,g,h,e,4,b[36]),h=l(h,f,m,g,z,11,b[37]),g=l(g,h,f,m,w,16,b[38]),m=l(m,g,h,f,B,23,b[39]),f=l(f,m,g,h,D,4,b[40]),h=l(h,f,m,g,c,11,b[41]),g=l(g,h,f,m,k,16,b[42]),m=l(m,g,h,f,t,23,b[43]),f=l(f,m,g,h,A,4,b[44]),h=l(h,f,m,g,u,11,b[45]),g=l(g,h,f,m,x,16,b[46]),m=l(m,g,h,f,j,23,b[47]),f=s(f,m,g,h,c,6,b[48]),h=s(h,f,m,g,w,10,b[49]),g=s(g,h,f,m,
E,15,b[50]),m=s(m,g,h,f,r,21,b[51]),f=s(f,m,g,h,u,6,b[52]),h=s(h,f,m,g,k,10,b[53]),g=s(g,h,f,m,B,15,b[54]),m=s(m,g,h,f,e,21,b[55]),f=s(f,m,g,h,v,6,b[56]),h=s(h,f,m,g,x,10,b[57]),g=s(g,h,f,m,t,15,b[58]),m=s(m,g,h,f,D,21,b[59]),f=s(f,m,g,h,z,6,b[60]),h=s(h,f,m,g,C,10,b[61]),g=s(g,h,f,m,j,15,b[62]),m=s(m,g,h,f,A,21,b[63]);a[0]=a[0]+f|0;a[1]=a[1]+m|0;a[2]=a[2]+g|0;a[3]=a[3]+h|0},_doFinalize:function(){var b=this._data,n=b.words,a=8*this._nDataBytes,c=8*b.sigBytes;n[c>>>5]|=128<<24-c%32;var e=u.floor(a/
4294967296);n[(c+64>>>9<<4)+15]=(e<<8|e>>>24)&16711935|(e<<24|e>>>8)&4278255360;n[(c+64>>>9<<4)+14]=(a<<8|a>>>24)&16711935|(a<<24|a>>>8)&4278255360;b.sigBytes=4*(n.length+1);this._process();b=this._hash;n=b.words;for(a=0;4>a;a++)c=n[a],n[a]=(c<<8|c>>>24)&16711935|(c<<24|c>>>8)&4278255360;return b},clone:function(){var b=v.clone.call(this);b._hash=this._hash.clone();return b}});t.MD5=v._createHelper(r);t.HmacMD5=v._createHmacHelper(r)})(Math);
(function(){var u=CryptoJS,p=u.lib,d=p.Base,l=p.WordArray,p=u.algo,s=p.EvpKDF=d.extend({cfg:d.extend({keySize:4,hasher:p.MD5,iterations:1}),init:function(d){this.cfg=this.cfg.extend(d)},compute:function(d,r){for(var p=this.cfg,s=p.hasher.create(),b=l.create(),u=b.words,q=p.keySize,p=p.iterations;u.length<q;){n&&s.update(n);var n=s.update(d).finalize(r);s.reset();for(var a=1;a<p;a++)n=s.finalize(n),s.reset();b.concat(n)}b.sigBytes=4*q;return b}});u.EvpKDF=function(d,l,p){return s.create(p).compute(d,
l)}})();
CryptoJS.lib.Cipher||function(u){var p=CryptoJS,d=p.lib,l=d.Base,s=d.WordArray,t=d.BufferedBlockAlgorithm,r=p.enc.Base64,w=p.algo.EvpKDF,v=d.Cipher=t.extend({cfg:l.extend(),createEncryptor:function(e,a){return this.create(this._ENC_XFORM_MODE,e,a)},createDecryptor:function(e,a){return this.create(this._DEC_XFORM_MODE,e,a)},init:function(e,a,b){this.cfg=this.cfg.extend(b);this._xformMode=e;this._key=a;this.reset()},reset:function(){t.reset.call(this);this._doReset()},process:function(e){this._append(e);return this._process()},
finalize:function(e){e&&this._append(e);return this._doFinalize()},keySize:4,ivSize:4,_ENC_XFORM_MODE:1,_DEC_XFORM_MODE:2,_createHelper:function(e){return{encrypt:function(b,k,d){return("string"==typeof k?c:a).encrypt(e,b,k,d)},decrypt:function(b,k,d){return("string"==typeof k?c:a).decrypt(e,b,k,d)}}}});d.StreamCipher=v.extend({_doFinalize:function(){return this._process(!0)},blockSize:1});var b=p.mode={},x=function(e,a,b){var c=this._iv;c?this._iv=u:c=this._prevBlock;for(var d=0;d<b;d++)e[a+d]^=
c[d]},q=(d.BlockCipherMode=l.extend({createEncryptor:function(e,a){return this.Encryptor.create(e,a)},createDecryptor:function(e,a){return this.Decryptor.create(e,a)},init:function(e,a){this._cipher=e;this._iv=a}})).extend();q.Encryptor=q.extend({processBlock:function(e,a){var b=this._cipher,c=b.blockSize;x.call(this,e,a,c);b.encryptBlock(e,a);this._prevBlock=e.slice(a,a+c)}});q.Decryptor=q.extend({processBlock:function(e,a){var b=this._cipher,c=b.blockSize,d=e.slice(a,a+c);b.decryptBlock(e,a);x.call(this,
e,a,c);this._prevBlock=d}});b=b.CBC=q;q=(p.pad={}).Pkcs7={pad:function(a,b){for(var c=4*b,c=c-a.sigBytes%c,d=c<<24|c<<16|c<<8|c,l=[],n=0;n<c;n+=4)l.push(d);c=s.create(l,c);a.concat(c)},unpad:function(a){a.sigBytes-=a.words[a.sigBytes-1>>>2]&255}};d.BlockCipher=v.extend({cfg:v.cfg.extend({mode:b,padding:q}),reset:function(){v.reset.call(this);var a=this.cfg,b=a.iv,a=a.mode;if(this._xformMode==this._ENC_XFORM_MODE)var c=a.createEncryptor;else c=a.createDecryptor,this._minBufferSize=1;this._mode=c.call(a,
this,b&&b.words)},_doProcessBlock:function(a,b){this._mode.processBlock(a,b)},_doFinalize:function(){var a=this.cfg.padding;if(this._xformMode==this._ENC_XFORM_MODE){a.pad(this._data,this.blockSize);var b=this._process(!0)}else b=this._process(!0),a.unpad(b);return b},blockSize:4});var n=d.CipherParams=l.extend({init:function(a){this.mixIn(a)},toString:function(a){return(a||this.formatter).stringify(this)}}),b=(p.format={}).OpenSSL={stringify:function(a){var b=a.ciphertext;a=a.salt;return(a?s.create([1398893684,
1701076831]).concat(a).concat(b):b).toString(r)},parse:function(a){a=r.parse(a);var b=a.words;if(1398893684==b[0]&&1701076831==b[1]){var c=s.create(b.slice(2,4));b.splice(0,4);a.sigBytes-=16}return n.create({ciphertext:a,salt:c})}},a=d.SerializableCipher=l.extend({cfg:l.extend({format:b}),encrypt:function(a,b,c,d){d=this.cfg.extend(d);var l=a.createEncryptor(c,d);b=l.finalize(b);l=l.cfg;return n.create({ciphertext:b,key:c,iv:l.iv,algorithm:a,mode:l.mode,padding:l.padding,blockSize:a.blockSize,formatter:d.format})},
decrypt:function(a,b,c,d){d=this.cfg.extend(d);b=this._parse(b,d.format);return a.createDecryptor(c,d).finalize(b.ciphertext)},_parse:function(a,b){return"string"==typeof a?b.parse(a,this):a}}),p=(p.kdf={}).OpenSSL={execute:function(a,b,c,d){d||(d=s.random(8));a=w.create({keySize:b+c}).compute(a,d);c=s.create(a.words.slice(b),4*c);a.sigBytes=4*b;return n.create({key:a,iv:c,salt:d})}},c=d.PasswordBasedCipher=a.extend({cfg:a.cfg.extend({kdf:p}),encrypt:function(b,c,d,l){l=this.cfg.extend(l);d=l.kdf.execute(d,
b.keySize,b.ivSize);l.iv=d.iv;b=a.encrypt.call(this,b,c,d.key,l);b.mixIn(d);return b},decrypt:function(b,c,d,l){l=this.cfg.extend(l);c=this._parse(c,l.format);d=l.kdf.execute(d,b.keySize,b.ivSize,c.salt);l.iv=d.iv;return a.decrypt.call(this,b,c,d.key,l)}})}();
(function(){for(var u=CryptoJS,p=u.lib.BlockCipher,d=u.algo,l=[],s=[],t=[],r=[],w=[],v=[],b=[],x=[],q=[],n=[],a=[],c=0;256>c;c++)a[c]=128>c?c<<1:c<<1^283;for(var e=0,j=0,c=0;256>c;c++){var k=j^j<<1^j<<2^j<<3^j<<4,k=k>>>8^k&255^99;l[e]=k;s[k]=e;var z=a[e],F=a[z],G=a[F],y=257*a[k]^16843008*k;t[e]=y<<24|y>>>8;r[e]=y<<16|y>>>16;w[e]=y<<8|y>>>24;v[e]=y;y=16843009*G^65537*F^257*z^16843008*e;b[k]=y<<24|y>>>8;x[k]=y<<16|y>>>16;q[k]=y<<8|y>>>24;n[k]=y;e?(e=z^a[a[a[G^z]]],j^=a[a[j]]):e=j=1}var H=[0,1,2,4,8,
16,32,64,128,27,54],d=d.AES=p.extend({_doReset:function(){for(var a=this._key,c=a.words,d=a.sigBytes/4,a=4*((this._nRounds=d+6)+1),e=this._keySchedule=[],j=0;j<a;j++)if(j<d)e[j]=c[j];else{var k=e[j-1];j%d?6<d&&4==j%d&&(k=l[k>>>24]<<24|l[k>>>16&255]<<16|l[k>>>8&255]<<8|l[k&255]):(k=k<<8|k>>>24,k=l[k>>>24]<<24|l[k>>>16&255]<<16|l[k>>>8&255]<<8|l[k&255],k^=H[j/d|0]<<24);e[j]=e[j-d]^k}c=this._invKeySchedule=[];for(d=0;d<a;d++)j=a-d,k=d%4?e[j]:e[j-4],c[d]=4>d||4>=j?k:b[l[k>>>24]]^x[l[k>>>16&255]]^q[l[k>>>
8&255]]^n[l[k&255]]},encryptBlock:function(a,b){this._doCryptBlock(a,b,this._keySchedule,t,r,w,v,l)},decryptBlock:function(a,c){var d=a[c+1];a[c+1]=a[c+3];a[c+3]=d;this._doCryptBlock(a,c,this._invKeySchedule,b,x,q,n,s);d=a[c+1];a[c+1]=a[c+3];a[c+3]=d},_doCryptBlock:function(a,b,c,d,e,j,l,f){for(var m=this._nRounds,g=a[b]^c[0],h=a[b+1]^c[1],k=a[b+2]^c[2],n=a[b+3]^c[3],p=4,r=1;r<m;r++)var q=d[g>>>24]^e[h>>>16&255]^j[k>>>8&255]^l[n&255]^c[p++],s=d[h>>>24]^e[k>>>16&255]^j[n>>>8&255]^l[g&255]^c[p++],t=
d[k>>>24]^e[n>>>16&255]^j[g>>>8&255]^l[h&255]^c[p++],n=d[n>>>24]^e[g>>>16&255]^j[h>>>8&255]^l[k&255]^c[p++],g=q,h=s,k=t;q=(f[g>>>24]<<24|f[h>>>16&255]<<16|f[k>>>8&255]<<8|f[n&255])^c[p++];s=(f[h>>>24]<<24|f[k>>>16&255]<<16|f[n>>>8&255]<<8|f[g&255])^c[p++];t=(f[k>>>24]<<24|f[n>>>16&255]<<16|f[g>>>8&255]<<8|f[h&255])^c[p++];n=(f[n>>>24]<<24|f[g>>>16&255]<<16|f[h>>>8&255]<<8|f[k&255])^c[p++];a[b]=q;a[b+1]=s;a[b+2]=t;a[b+3]=n},keySize:8});u.AES=p._createHelper(d)})();

View File

@@ -0,0 +1,7 @@
/*
CryptoJS v3.1.2
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
CryptoJS.pad.ZeroPadding={pad:function(a,c){var b=4*c;a.clamp();a.sigBytes+=b-(a.sigBytes%b||b)},unpad:function(a){for(var c=a.words,b=a.sigBytes-1;!(c[b>>>2]>>>24-8*(b%4)&255);)b--;a.sigBytes=b+1}};

View File

@@ -0,0 +1,126 @@
import base64
import codecs
import gzip
import zlib
from binascii import unhexlify
from struct import pack, unpack
from Crypto.Cipher import AES
from Crypto.Hash import SHA256, MD5
from execjs import compile
class BaseLib:
@staticmethod
def decode_escape(data): # pragma: no cover
if isinstance(data, str):
data = data.encode()
try:
data = codecs.escape_decode(data)
return data[0]
except Exception:
return ''
@staticmethod
def encode_hex(data): # pragma: no cover
return codecs.decode(data, 'hex')
@staticmethod
def to_sha_256(data): # pragma: no cover
if isinstance(data, str):
data = data.encode()
sha = SHA256.new()
sha.update(data)
return sha.digest()
@staticmethod
def decrypt_aes(iv, key, data, mode: int = AES.MODE_CBC): # pragma: no cover
aes = AES.new(key, mode, iv)
return aes.decrypt(data)
@staticmethod
def base64decode(data, altchars=None, validate=False): # pragma: no cover
return base64.b64decode(data, altchars, validate)
@staticmethod
def base64encode(data, altchars=None): # pragma: no cover
return base64.b64encode(data, altchars)
@staticmethod
def exec_js(source, js): # pragma: no cover
return compile(source).eval(js)
@staticmethod
def gunzip(data): # pragma: no cover
return gzip.decompress(data)
@staticmethod
def gzip(data, lvl: int = 9): # pragma: no cover
return gzip.compress(data, lvl)
@staticmethod
def zlib_d(data, **kwargs): # pragma: no cover
return zlib.decompress(data, **kwargs)
@staticmethod
def zlib_c(data, **kwargs): # pragma: no cover
return zlib.compress(data, **kwargs)
@staticmethod
def md5(string): # pragma: no cover
if isinstance(string, str):
string = string.encode()
_ = MD5.new()
_.update(string)
return _
@staticmethod
def pack(fmt, *args): # pragma: no cover
return pack(fmt, *args)
@staticmethod
def unpack(fmt, string): # pragma: no cover
return unpack(fmt, string)
@staticmethod
def pack_auto(int_list) -> bytes:
"""
:param int_list: list
:return: str
"""
base_frm = '{}B'.format(len(int_list))
return pack(base_frm, *int_list)
@staticmethod
def unpack_auto(string) -> list:
"""
:param string: str
:return: tuple
"""
if isinstance(string, str):
string = string.encode()
return list(string)
@staticmethod
def str2hex(string):
hex_str = ''
if isinstance(string, bytes):
string = string.decode()
for char in string:
int_char = ord(char)
hex_num = hex(int_char).lstrip("0x")
hex_str += hex_num
return hex_str
@staticmethod
def hex2str(string):
clear_str = ''
if isinstance(string, bytes):
string = string.decode()
for counter in range(0, len(string), 2):
hex_char = string[counter] + string[counter + 1]
clear_str += unhexlify(hex_char)
return clear_str

View File

@@ -0,0 +1,16 @@
from .base_lib import BaseLib
from sys import stderr
class KissMangaComCrypt(BaseLib):
def decrypt(self, iv, key, data):
iv = self.encode_hex(iv)
key = self.to_sha_256(key)
data = self.base64decode(data)
try:
return self.decrypt_aes(iv, key, data)
except Exception as e:
print(e, file=stderr)
return False

View File

@@ -0,0 +1,68 @@
from pathlib import Path
from .base_lib import BaseLib
from .puzzle import Puzzle
class MangaGoMe(BaseLib):
_key = 'e10adc3949ba59abbe56e057f20f883e'
_iv = '1234567890abcdef1234567890abcdef'
# https://codepen.io/1271/pen/mKYLrG
def decrypt(self, data):
scripts = [
'aes.js',
'aes_zp.js',
]
script = ''
path = Path(__file__).resolve().parent
for i in scripts:
with open(str(path.joinpath(i)), 'r') as f:
script += f.read()
decrypted = self.exec_js(script,
'CryptoJS.AES.decrypt("%s",CryptoJS.enc.Hex.parse("%s"),{iv:CryptoJS.enc.Hex.parse("%s"),padding:CryptoJS.pad.ZeroPadding}).toString(CryptoJS.enc.Utf8)' % (
data, self._key, self._iv))
order_js = """function replacePos(e,r,i){return e.substr(0,r)+i+e.substring(r+1,e.length)}function dorder(e,r){for(j=r.length-1;j>=0;j--)for(i=e.length-1;i-r[j]>=0;i--)i%2!=0&&(temp=e[i-r[j]],e=replacePos(e=replacePos(e,i-r[j],e[i]),i,temp));return e}"""
code = decrypted[19] + decrypted[23] + decrypted[31] + decrypted[39]
decrypted = decrypted[:19] + decrypted[20:23] + decrypted[24:31] + decrypted[32:39] + decrypted[40:]
return self.exec_js(order_js, 'dorder("%s","%s")' % (decrypted, code))
@staticmethod
def puzzle(_path, _dst, url):
values = {
'60a2b0ed56cd458c4633d04b1b76b7e9': '18a72a69a64a13a1a43a3aa42a23a66a26a19a51a54a78a34a17a31a35a15a58a29a61a48a73a74a44a52a60a24a63a20a32a7a45a53a75a55a62a59a41a76a68a2a36a21a10a38a33a71a40a67a22a4a50a80a65a27a37a47a70a14a28a16a6a56a30a57a5a11a79a9a77a46a39a25a49a8a12',
'400df5e8817565e28b2e141c533ed7db': '61a74a10a45a3a37a72a22a57a39a25a56a52a29a70a60a67a41a63a55a27a28a43a18a5a9a8a40a17a48a44a79a38a47a32a73a4a6a13a34a33a49a2a42a50a76a54a36a35a14a58a7a69a46a16a30a21a11aa51a53a77a26a31a1a19a20a80a24a62a68a59a66a75a12a64a78a71a15a65a23',
'84ba0d8098f405b14f4dbbcc04c93bac': '61a26a35a16a55a10a72a37a2a60a66a65a33a44a7a28a70a62a32a56a30a40a58a15a74a47aa36a78a75a11a6a77a67a39a23a9a31a64a59a13a24a80a14a38a45a21a63a19a51a17a34a50a46a5a29a73a8a57a69a48a68a49a71a41a12a52a18a79a76a54a42a22a4a1a3a53a20a25a43a27',
'56665708741979f716e5bd64bf733c33': '23a7a41a48a57a27a69a36a76a62a40a75a26a2a51a6a10a65a43a24a1aa20a71a28a30a13a38a79a78a72a14a49a55a56a58a25a70a12a80a3a66a11a39a42a17a15a54a45a34a74a31a8a61a46a73a63a22a64a19a77a50a9a59a37a68a52a18a32a16a33a60a67a21a44a53a5a35a4a29a47',
'37abcb7424ce8df47ccb1d2dd9144b49': '67a45a39a72a35a38a61a11a51a60a13a22a31a25a75a30a74a43a69a50a6a26a16a49a77a68a59a64a17a56a18a1a10a54a44a62a53a80a5a23a48a32a29a79a24a70a28a58a71a3a52a42a55a9a14a36a73a34a2a27a57a0a21a41a33a37a76a8a40a65a7a20a12a19a47a4a78a15a63a66a46',
'874b83ba76a7e783d13abc2dabc08d76': '26a59a42a43a4a20a61a28a12a64a37a52a2a77a34a13a46a74a70a0a44a29a73a66a55a38a69a67a62a9a63a6a54a79a21a33a8a58a40a47a71a49a22a50a57a78a56a25a17a15a36a16a48a32a5a10a14a80a24a72a76a45a3a53a23a41a60a11a65a19a27a51a68a35a31a1a75a39a30a7a18',
'930b87ad89c2e2501f90d0f0e92a6b97': '9a29a49a67a62a40a28a50a64a77a46a31a16a73a14a45a51a44a7a76a22a78a68a37a74a69a25a65a41a11a52aa18a36a10a38a12a15a2a58a48a8a27a75a20a4a80a61a55a42a13a43a47a39a35a60a26a30a63a66a57a33a72a24a71a34a23a3a70a54a56a32a79a5a21a6a59a53a17a1a19',
'1269606c6c3d8bb6508426468216d6b1': '49a15a0a60a14a26a34a69a61a24a35a4a77a80a70a40a39a6a68a17a41a56a28a46a79a16a21a1a37a42a44a58a78a18a52a73a32a9a12a50a8a13a20a19a67a36a45a75a48a10a65a7a38a66a3a2a43a27a29a31a72a74a55a23a54a22a59a57a11a62a47a53a30a5a64a25a76a71a51a33a63',
'33a3b21bb2d14a09d15f995224ae4284': '30a59a35a34a42a8a10a56a70a64a48a69a26a18a6a16a54a24a73a79a68a33a32a2a63a53a31a14a17a57a41a80a76a40a60a12a43a29a39a4a77a58a66a36a38a52a13a19a0a75a28a55a25a61a71a11a67a49a23a45a5a15a1a50a51a9a44a47a65a74a72a27a7a37a46a20a22a62a78a21a3',
'9ae6640761b947e61624671ef841ee78': '62a25a21a75a42a61a73a59a23a19a66a38a71a70a6a55a3a16a43a32a53a37a41a28a49a63a47a17a7a30a78a46a20a67a56a79a65a14a69a60a8a52a22a9a24a2a4a13a36a27a0a18a33a12a44a5a76a26a29a40a1a11a64a48a39a51a80a72a68a10a58a35a77a54a34a74a57a31a50a45a15',
'a67e15ed870fe4aab0a502478a5c720f': '8a12a59a52a24a13a37a21a55a56a41a71a65a43a40a66a11a79a67a44a33a20a72a2a31a42a29a34a58a60a27a48a28a15a35a51a76a80a0a63a69a53a39a46a64a50a75a1a57a9a62a74a18a16a73a14a17a6a19a61a23a38a10a3a32a26a36a54a4a30a45a47a70a22a7a68a49a77a5a25a78',
'b6a2f75185754b691e4dfe50f84db57c': '47a63a76a58a37a4a56a21a1a48a62a2a36a44a34a42a23a9a60a72a11a74a70a20a77a16a15a35a69a0a55a46a24a6a32a75a68a43a41a78a31a71a52a33a67a25a80a30a5a28a40a65a39a14a29a64a3a53a49a59a12a66a38a27a79a45a18a22a8a61a50a17a51a10a26a13a57a19a7a54a73',
'db99689c5a26a09d126c7089aedc0d86': '57a31a46a61a55a41a26a2a39a24a75a4a45a13a23a51a15a8a64a37a72a34a12a3a79a42a80a17a62a49a19a77a48a68a78a65a14a10a29a16a20a76a38a36a54a30a53a40a33a21a44a22a32a5a1a7a70a67a58a0a71a74a43a66a6a63a35a56a73a9a27a25a59a47a52a11a50a18a28a60a69',
'd320d2647d70c068b89853e1a269c609': '77a38a53a40a16a3a20a18a63a9a24a64a50a61a45a59a27a37a8a34a11a55a79a13a47a68a12a22a46a33a1a69a52a54a31a23a62a43a0a2a35a28a57a36a51a78a70a5a32a75a41a30a4a80a19a21a42a71a49a10a56a74a17a7a25a6a14a73a29a44a48a39a60a58a15a66a67a72a65a76a26',
'c587e77362502aaedad5b7cddfbe3a0d': '50aa59a70a68a30a56a10a49a43a45a29a23a28a61a15a40a71a14a44a32a34a17a26a63a76a75a33a74a12a11a21a67a31a19a80a7a64a8a3a51a53a38a18a6a42a27a9a52a20a41a60a1a22a77a16a54a47a79a24a78a2a46a37a73a65a36a35a39a5a4a25a72a13a62a55a57a58a69a66a48',
'f4ab0903149b5d94baba796a5cf05938': '40a37a55a73a18a42a15a59a50a13a22a63a52a58a6a80a47a17a38a71a74a70a30a11a10a19a0a31a36a21a51a68a1a3a14a66a45a2a79a7a76a75a8a67a20a78a25a69a43a28a35a60a4a23a65a54a34a9a5a39a27a57a26a33a12a24a46a72a56a44a49a61a64a29a53a48a32a62a41a16a77',
'f5baf770212313f5e9532ec5e6103b61': '55a69a78a75a38a25a20a60a6a80a46a5a48a18a23a24a17a67a64a70a63a57a22a10a49a19a8a16a11a12a61a76a34a27a54a73a44a0a56a3a15a29a28a13a4a2a7a77a74a35a37a26a30a58a9a71a50a1a43a79a47a32a14a53a52a66a72a59a68a31a42a45a62a51a40a39a33a65a41a36a21',
'e2169a4bfd805e9aa21d3112d498d68c': '54a34a68a69a26a20a66a1a67a74a22a39a63a70a5a37a75a15a6a14a62a50a46a35a44a45a28a8a40a25a29a76a51a77a17a47a0a42a2a9a48a27a13a64a58a57a18a30a80a23a61a36a60a59a71a32a7a38a41a78a12a49a43a79a24a31a52a19a3a53a72a10a73a11a33a16a4a55a65a21a56',
'1796550d20f64decb317f9b770ba0e78': '37a55a39a79a2a53a75a1a30a32a3a13a25a49a45a5a60a62a71a78a63a24a27a33a19a64a67a57a0a8a54a9a41a61a50a73a7a65a58a51a15a14a43a4a35a77a68a72a34a80a22a17a48a10a70a46a40a28a20a74a52a23a38a76a42a18a66a11a59a6a69a31a56a16a47a21a12a44a36a29a26',
'bf53be6753a0037c6d80ca670f5d12d5': '55a41a18a19a4a13a36a12a56a69a64a80a30a39a57a50a48a26a46a73a17a52a49a66a11a25a61a51a68a24a70a7a67a53a43a8a29a75a65a42a38a58a9a28a0a78a54a31a22a5a15a3a79a77a59a23a45a40a47a44a6a2a1a35a14a62a63a76a20a16a32a21a71a10a74a60a34a37a33a72a27',
'6c41ff7fbed622aa76e19f3564e5d52a': '40a3a13a59a68a34a66a43a67a14a26a46a8a24a33a73a69a31a2a57a10a51a62a77a74a41a47a35a64a52a15a53a6a80a76a50a28a75a56a79a17a45a25a49a48a65a78a27a9a63a12a55a32a21a58a38a0a71a44a30a61a36a16a23a20a70a22a37a4a19a7a60a11a5a18a39a1a54a72a29a42',
}
ik = '18a72a69a64a13a1a43a3aa42a23a66a26a19a51a54a78a34a17a31a35a15a58a29a61a48a73a74a44a52a60a24a63a20a32a7a45a53a75a55a62a59a41a76a68a2a36a21a10a38a33a71a40a67a22a4a50a80a65a27a37a47a70a14a28a16a6a56a30a57a5a11a79a9a77a46a39a25a49a8a12'
for k in values:
if ~url.find(k):
ik = values[k]
matrix = {}
for n, i in enumerate(ik.split('a')):
if len(i) > 0:
m = int(i)
else:
m = 0
matrix[n] = m
puzzle = Puzzle(9, 9, matrix, 0)
puzzle.de_scramble(_path, _dst)

View File

@@ -0,0 +1,22 @@
from .base_lib import BaseLib
class MangaRockComCrypt(BaseLib):
def decrypt(self, string):
if isinstance(string, str):
string = string.encode()
n = len(string) + 7
tmp = self.pack_auto([82, 73, 70, 70])
tmp += self.pack_auto([
n & 0xff,
(n >> 8) & 0xff,
(n >> 16) & 0xff,
(n >> 24) & 0xff,
])
tmp += self.pack_auto([87, 69, 66, 80, 86, 80, 56])
for i in range(len(string)):
tmp += self.pack('1B', string[i] ^ 101)
return tmp

View File

@@ -0,0 +1,24 @@
from .base_lib import BaseLib
class ManhuaGuiComCrypt(BaseLib):
def decrypt(self, js, default=''):
# try:
return self.exec_js(self.js_gz_b64_data(), js)
# except Exception:
# return default
def js_gz_b64_data(self): # FIXME
data = """eJzNVdtymzAQ/RWHhwwqawp2mqbI60yaNr0lvaXXeJgMARFIiEQlYTt1+fcKfOt03Olj8iAQR7tn
j3YO0jiSneOzUy1zfolpxWOdC26T2djgAue4m0pxc5hF8lAkDHKc1bRZ5jhLWCxuSsmUOjIhTyPF
dneCFYsgM8l0JXmHV0WBKPYtK7CsZtIAAXfPPVu4BeOXOoN+D1aZfJW5JgMD5qm9lY9EaGbm2QhJ
hbQbMRo9qgdLMqodhzQhI+HGRviBtjUJUdcL1naJh7VtHTw9fPb86MXLV6/fHJ+8fff+w8fTT5+/
fP32/Sy6iBOWXmb51XVxw0X5QypdjSfT25+e3+vvPNp9vPfEeYgWrEpwQmpSw7m3bkEOHPS8mxIU
RFACgwJiSHEUQoY7UJkxxj4kaFkwadApmvgi0LZHoBQqb4gCDjlP2DTw51uWZrty0KfSQZ+kIxmi
bPEIPWB4EunMLcXE7kGPQIE+LbaQUVLi1DXU21N3yQvr6XCIPniIa2R7215/YBNrklEbsNViWkwg
+oV2OfT2/cAjDwooBgNTTU1yHWd2RGaxsUTH9GOTtL27kBajMLrohWTRNW3V+ZvV+bv3Q14vmHvW
supGZzrqhxiDMmPilpXK7JhQ2v4ZC/JhTpYZmy0xvkNLxHgfTZGOKscJ29ZDjFXXh6zbvbce+a/a
pWU6E/dK5Ny2LFIbf5jqmSma/eWFsakE6SgOSYLNi7JCscZP8RZiRf44wWmCylHL084j9cKBSZPf
alJOsl42Jk2aLXe7/ypb1zVd8tc2oYvrppRCC31bMleVRR7jhgtleWW5m24gW2e5Im2yNjk1/Q2+
WUKy"""
return self.zlib_d(self.base64decode(data)).decode()

View File

@@ -0,0 +1,66 @@
from PIL import Image
class Puzzle:
need_copy_orig = False
__x = 0
__y = 0
__multiply = 1
__matrix = None
__image_src = None
__image_dst = None
__block_width = None
__block_height = None
def __init__(self, x: int, y: int, matrix: dict, multiply: int = 1):
self.__x = x
self.__y = y
self.__matrix = matrix
self.__multiply = multiply
def de_scramble(self, path_src: str, path_dst: str):
self.__image_src = Image.open(path_src, 'r')
self._process()
self.__image_dst.save(path_dst)
self.__image_src.close()
self.__image_dst.close()
def _process(self):
self.__image_dst = Image.new(self.__image_src.mode, self.__image_src.size)
self._calc_block_size()
self._check_copy_orig_image()
self._solve_matrix()
def _check_copy_orig_image(self):
if self.need_copy_orig:
self.__image_dst.paste(self.__image_src)
def _calc_block_size(self):
if self.__multiply <= 1:
self.__block_width = int(self.__image_src.size[0] / self.__x)
self.__block_height = int(self.__image_src.size[1] / self.__y)
else:
self.__block_width = self.__multiply * int(self.__image_src.size[0] / self.__y / self.__multiply)
self.__block_height = self.__multiply * int(self.__image_src.size[1] / self.__x / self.__multiply)
def _src_rect(self, i):
row = int(i / self.__x)
col = i - row * self.__x
x = col * self.__block_width
y = row * self.__block_height
return x, y, x + self.__block_width, y + self.__block_height
def _dst_rect(self, i):
row = int(self.__matrix[i] / self.__x)
col = self.__matrix[i] - row * self.__y
x = col * self.__block_width
y = row * self.__block_height
return x, y, x + self.__block_width, y + self.__block_height
def _solve_matrix(self):
for i in range(self.__x * self.__y):
src_rect = self._src_rect(i)
dst_rect = self._dst_rect(i)
region = self.__image_src.crop(src_rect)
self.__image_dst.paste(region, dst_rect)

View File

@@ -0,0 +1,180 @@
from PIL import Image
# DO NOT SEE HERE! IT WORKED!
class MatrixSunday:
__image_src = None
__image_dst = None
def de_scramble(self, path_src: str, path_dst: str, data: list):
self.__image_src = Image.open(path_src, 'r')
self.__process(data)
self.__image_dst.save(path_dst)
self.__image_src.close()
self.__image_dst.close()
def __process(self, data: list):
size_src = self.__image_src.size
self.__image_dst = Image.new(self.__image_src.mode, size_src)
for i in data:
x, y = i['srcX'] + i['width'], i['srcY'] + i['height']
dx, dy = i['destX'] + i['width'], i['destY'] + i['height']
c1 = i['srcX'] < size_src[0]
c2 = i['srcX'] + i['width'] >= 0
c3 = i['srcY'] < size_src[1]
c4 = i['srcY'] + i['height'] >= 0
if c1 and c2 and c3 and c4:
region = self.__image_src.crop((i['destX'], i['destY'], dx, dy))
self.__image_dst.paste(region, (i['srcX'], i['srcY'], x, y))
class SundayWebryCom: # pragma: no cover
_result = None
def solve_by_img(self, src: str, element_width: int, element_height: int, n: int):
img = Image.open(src)
sizes = img.size
img.close()
return self.solve(*sizes, element_width, element_height, n)
def solve(self, width: int, height: int, element_width: int, element_height: int, n: int):
e = width
t = height
r = element_width
i = element_height
y = int(e / r)
g = int(t / i)
f = e % r
b = t % i
self._result = []
s = y - 43 * n % y
if s % y == 0:
s = (y - 4) % y
a = g - 47 * n % g
if a % g == 0:
a = (g - 4) % g
if 0 == a:
a = g - 1
self.def1(f, b, s, r, a, i)
self.def2(y, i, n, a, s, f, r, g, b)
if f > 0:
self.def3(g, n, s, a, y, b, i, r, f)
self.def4(y, g, n, r, f, s, a, i, b)
return self._result
def def1(self, f, b, s, r, a, i):
if f > 0 and b > 0:
o = s * r
u = a * i
self._result.append({
'srcX': o,
'srcY': u,
'destX': o,
'destY': u,
'width': f,
'height': b,
# 'debug': 1
})
def def2(self, y, i, n, a, s, f, r, g, b):
for l in range(y):
d = self._calc_x_x(l, y, n)
h = self._calc_y_x(d, s, a, g, n)
c = self._calc_pos_rest(d, s, f, r)
p = h * i
o = self._calc_pos_rest(l, s, f, r)
u = a * i
self._result.append({
'srcX': o,
'srcY': u,
'destX': c,
'destY': p,
'width': r,
'height': b,
# 'debug': 2
})
def def3(self, g, n, s, a, y, b, i, r, f):
for m in range(g):
h = self._calc_y_y(m, g, n)
d = self._calc_x_y(h, s, a, y, n)
p = self._calc_pos_rest(h, a, b, i)
u = self._calc_pos_rest(m, a, b, i)
self._result.append({
'srcX': s * r,
'srcY': u,
'destX': d * r,
'destY': p,
'width': f,
'height': i,
# 'debug': 3
})
def def4(self, y, g, n, r, f, s, a, i, b):
for l in range(y):
for m in range(g):
d = (l + 29 * n + 31 * m) % y
h = (m + 37 * n + 41 * d) % g
c = d * r + (f if d >= self._calc_x_y(h, s, a, y, n) else 0)
p = h * i + (b if h >= self._calc_y_x(d, s, a, g, n) else 0)
o = l * r + (f if l >= s else 0)
u = m * i + (b if m >= a else 0)
self._result.append({
'srcX': o,
'srcY': u,
'destX': c,
'destY': p,
'width': r,
'height': i,
# 'debug': 4
})
@staticmethod
def _calc_pos_rest(e, t, r, i):
m = 0
if e >= t:
m = r
return e * i + m
@staticmethod
def _calc_x_x(e, t, r):
return (e + 61 * r) % t
@staticmethod
def _calc_x_y(e, t, r, i, n):
o = (n % 2 == 1)
if (e < r and o) or (e >= r and not o):
a = i - t
s = t
else:
a = t
s = 0
return (e + 67 * n + t + 71) % a + s
@staticmethod
def _calc_y_x(e, t, r, i, n):
o = (n % 2 == 1)
if (e < t and o) or (e >= t and not o):
a = r
s = 0
else:
a = i - r
s = r
return (e + 53 * n + 59 * r) % a + s
@staticmethod
def _calc_y_y(e, t, r):
return (e + 73 * r) % t

View File

@@ -0,0 +1,77 @@
import re
from typing import List, Optional, Tuple
from PIL import Image
from sys import stderr
WIDTH = 256
HEIGHT = 257
KEY = 42016
class VizComMatrix:
@classmethod
def solve_image(cls, path: str, metadata: dict) -> Optional[Image.Image]:
orig = Image.open(path) # type: Image.Image
new_size = (orig.size[0] - 90, orig.size[1] - 140)
ref = Image.new(orig.mode, new_size) # type: Image.Image
ref.paste(orig)
exif = orig._getexif()
if KEY in exif:
key = [int(i, 16) for i in exif[KEY].split(':')]
width, height = exif[WIDTH], exif[HEIGHT]
else:
key = []
width, height = metadata['width'], metadata['height']
small_width = int(width / 10)
small_height = int(height / 15)
cls.paste(ref, orig, (
0, small_height + 10,
small_width, height - 2 * small_height,
), (
0, small_height,
small_width, height - 2 * small_height,
))
cls.paste(ref, orig, (
0, 14 * (small_height + 10),
width, orig.height - 14 * (small_height + 10),
), (
0, 14 * small_height,
width, orig.height - 14 * (small_height + 10),
))
cls.paste(ref, orig, (
9 * (small_width + 10), small_height + 10,
small_width + (width - 10 * small_width), height - 2 * small_height,
), (
9 * small_width, small_height,
small_width + (width - 10 * small_width), height - 2 * small_height,
))
for i, j in enumerate(key):
cls.paste(ref, orig, (
(i % 8 + 1) * (small_width + 10), (int(i / 8) + 1) * (small_height + 10),
small_width, small_height,
), (
(j % 8 + 1) * small_width, (int(j / 8) + 1) * small_height,
small_width, small_height,
))
return ref
@classmethod
def paste(cls, ref: Image.Image, orig: Image.Image, orig_box: Tuple, ref_box: Tuple):
ref.paste(orig.crop((
int(orig_box[0]), int(orig_box[1]),
int(orig_box[0] + orig_box[2]), int(orig_box[1] + orig_box[3]),
)), (
int(ref_box[0]), int(ref_box[1]),
int(ref_box[0] + ref_box[2]), int(ref_box[1] + ref_box[3]),
))
solve = VizComMatrix().solve_image
__all__ = ['solve']

View File

@@ -0,0 +1,209 @@
import tempfile
from json import loads as json_loads
from os import name as os_name, getpid, makedirs, walk
from pathlib import Path
from shutil import move
from shutil import rmtree
__dir_name__ = '.PyMangaDownloader'
def mark_as_hidden(_path: str):
try:
from ctypes import windll
windll.kernel32.SetFileAttributesW(_path, 2)
except Exception:
pass
def get_temp_path(*args) -> str:
temp = 'temp_%s' % getpid()
return path_join(tempfile.gettempdir(), __dir_name__, temp, *args)
def root_path() -> str:
# fs.py/manga_py/../
file = Path(__file__).resolve()
return str(file.parent.parent)
def get_util_home_path() -> str:
if os_name == 'nt':
home = path_join(str(Path.home()), 'AppData', 'Roaming', __dir_name__)
else:
home = path_join(str(Path.home()), __dir_name__)
make_dirs(home)
return str(home)
def make_dirs(directory):
is_dir(directory) or makedirs(directory)
def remove_file_query_params(name, save_path: bool = True) -> str:
if name is None:
raise AttributeError
file_path = dirname(name)
name = basename(name)
position = name.find('?')
if position == 0:
name = 'image.png' # fake image name
elif position > 0:
name = name[:position]
return str(path_join(file_path, name) if save_path else name)
def is_file(_path) -> bool:
return Path(_path).is_file()
def is_dir(_path) -> bool:
return Path(_path).is_dir()
def basename(_path) -> str:
return str(Path(_path).name)
def dirname(_path) -> str:
return str(Path(_path).parent)
def path_join(_path, *args) -> str:
return str(Path(_path).joinpath(*args))
def unlink(_path, allow_not_empty=False):
if is_dir(_path):
if allow_not_empty:
rmtree(_path)
else:
Path(_path).rmdir()
elif is_file(_path):
Path(_path).unlink()
def os_stat(_path):
if is_file(_path):
return Path(_path).stat()
return None
def file_size(_path):
"""
:param _path:
:return:
:rtype: int
"""
data = os_stat(_path)
if data:
return data.st_size
return None
def rename(_from, _to):
if is_file(_from) or is_dir(_from):
is_dir(dirname(_to)) or makedirs(dirname(_to))
move(_from, _to)
def storage(_path) -> str:
_path = get_temp_path('storage', _path)
make_dirs(dirname(_path))
return str(_path)
def listing(_path) -> dict:
"""
:param _path:
:return: {'directories': [], 'files': []}
"""
_dirname, _dirnames, _filenames = walk(_path)
return {'directories': _dirnames, 'files': _filenames}
def __get_info(_path):
try:
with open(path_join(_path, 'info.json'), 'r') as r:
return json_loads(r.read())
except FileNotFoundError:
return None
def get_info(_path) -> dict:
"""
listing subdirectories and reading info.json files
:param _path: [{..}, {..}, {..}]
:return:
"""
result = {}
for d in listing(_path)['directories']:
directory = path_join(_path, d)
info = __get_info(directory)
if info is not None:
result[directory] = info
return result
def __dirname(_path) -> str:
if not is_dir(_path):
_path = __dirname(dirname(_path))
return str(_path)
def _disk_stat_posix(_path) -> dict:
import os
st = os.statvfs(_path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return {'total': total, 'used': used, 'free': free}
def _disc_stat_win(_path) -> dict:
import ctypes
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), ctypes.c_ulonglong()
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fun(_path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
ret = fun(_path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
used = total.value - free.value
return {'total': total.value, 'used': used, 'free': free.value}
def get_disk_stat(_path) -> dict:
import os
_path = __dirname(_path)
if hasattr(os, 'statvfs'): # POSIX
return _disk_stat_posix(_path)
elif os.name == 'nt': # Windows
return _disc_stat_win(_path)
else:
raise NotImplementedError('Platform not supported')
def check_free_space(_path: str, min_size: int = 100, percent: bool = False) -> bool:
"""
min_size = 10 # percent = True
min_size = 10 # percent = False (default)
:param _path:
:param min_size:
:param percent:
:return:
"""
_stat = get_disk_stat(_path)
if percent:
_free = _stat['free'] / _stat['total']
if (_free * 100) < min_size:
return False
return True
else:
_free = _stat['free'] / (2 << 19) # 1Mb
if _free < min_size:
return False
return True

View File

@@ -0,0 +1,121 @@
from sys import stderr
from time import sleep
import requests
from manga_py.fs import get_temp_path, make_dirs, remove_file_query_params, basename, path_join, dirname, file_size
from .multi_threads import MultiThreads
from .request import Request
from .url_normalizer import normalize_uri
class Http(Request):
count_retries = 20
has_error = False
mute = False
def __init__(
self,
allow_webp=True,
referer='',
user_agent=None,
proxies=None,
cookies=None,
kwargs=None
):
super().__init__()
self.__set_param('allow_webp', allow_webp)
self.__set_param('referer', referer)
self.__set_param('user_agent', user_agent)
self.__set_param('proxies', proxies)
self.__set_param('cookies', cookies)
self.__set_param('kwargs', kwargs)
def __set_param(self, name, value):
if value is not None:
self_val = getattr(self, name)
_type = type(self_val)
if self_val is not None and not isinstance(value, _type):
raise AttributeError('{} type not {}'.format(name, _type))
setattr(self, name, value)
def _download(self, file_name, url, method):
now_try_count = 0
with open(file_name, 'wb') as out_file:
now_try_count += 1
response = self.requests(url, method=method, timeout=60, allow_redirects=True)
if response.status_code >= 400:
self.debug and print('\nERROR! Code {}\nUrl: {}\n'.format(
response.status_code,
url,
))
sleep(2)
if response.status_code == 403:
response = requests.request(method=method, url=url, timeout=60, allow_redirects=True)
if response.status_code < 400:
out_file.write(response.content)
response.close()
out_file.close()
def _safe_downloader(self, url, file_name, method='get') -> bool:
try:
make_dirs(dirname(file_name))
url = self.normalize_uri(url)
self._download(file_name, url, method)
except OSError as ex:
self.debug and print(ex)
return False
return True
def _download_one_file_helper(self, url, dst, callback: callable = None, success_callback: callable = None,
callback_args=()):
r = 0
while r < self.count_retries:
if self._safe_downloader(url, dst):
if file_size(dst) < 64:
return None
callable(success_callback) and success_callback(dst, *callback_args)
return True
r += 1
mode = 'Retry'
if r >= self.count_retries:
mode = 'Skip image'
callable(callback) and callback(text=mode)
return False
def download_file(self, url: str,
dst: str = None,
idx=-1,
callback: callable = None,
success_callback: callable = None,
callback_args=()) -> bool:
if not dst:
name = basename(remove_file_query_params(url))
dst = path_join(get_temp_path(), name)
result = self._download_one_file_helper(url, dst, callback, success_callback, callback_args)
if result is None and not self.mute:
self.has_error = True # issue 161
self.debug and print('\nWarning: 0 bit image downloaded, please check for redirection or broken content', file=stderr)
if ~idx:
self.debug and print('Broken url: %s\nPage idx: %d' % (url, (1 + idx)), file=stderr)
return result
def normalize_uri(self, uri, referer=None):
if not referer:
referer = self.referer
if isinstance(uri, str):
return normalize_uri(uri.strip(), referer)
return uri
def multi_download_get(self, urls, dst: str = None, callback: callable = None):
threading = MultiThreads()
for idx, url in enumerate(urls):
threading.add(self.download_file, (url, dst, idx))
threading.start(callback)
def get_redirect_url(self, url, **kwargs):
location = self.requests(url=url, method='head', **kwargs)
url = location.headers.get('Location', url)
return self.normalize_uri(url)

View File

@@ -0,0 +1,53 @@
import requests
from lxml.html import document_fromstring
class AutoProxy:
checked_url = 'https://httpbin.org/ip'
@staticmethod
def __strip(text):
return text.text_content().strip(' \n\t\r\0')
def _s(self, item):
td = item.cssselect('td')
proxy = self.__strip(td[4]) # proxy type
https = self.__strip(td[6]) # https (yes|no)
if (
proxy == 'anonymous'
or proxy == 'elite proxy'
) and https == 'yes':
return self.__strip(td[0]) + ':' + self.__strip(td[1])
return None
def _test_proxy(self, url):
proxies = {
'http': url,
'https': url,
}
try:
requests.head(url=self.checked_url, proxies=proxies, timeout=6)
except Exception:
return False
return proxies
def _change_checked_url(self, checked_url):
if checked_url:
self.checked_url = checked_url
def auto_proxy(self, checked_url=None):
self._change_checked_url(checked_url)
url = 'https://www.us-proxy.org'
items = document_fromstring(requests.get(url).text)
items = items.cssselect('#proxylisttable tbody tr')
for n, i in enumerate(items):
proxy = self._s(i)
test = False
if proxy:
test = self._test_proxy(proxy)
if test:
return test
return None
auto_proxy = AutoProxy().auto_proxy

View File

@@ -0,0 +1,33 @@
import random
import time
from manga_py.crypt.base_lib import BaseLib
from .request import Request
class GoogleDCP:
host = 'proxy.googlezip.net'
authkey = 'ac4500dd3b7579186c1b0620614fdb1f7d61f944'
http = None
def __init__(self, http: Request):
self.http = http
def randint(self):
return random.randint(0, 999999999)
def _build_header(self):
timestamp = int(time.time())
md5 = BaseLib.md5('{}{}{}'.format(timestamp, self.authkey, timestamp))
return 'Chrome-Proxy: ps={}-{}-{}-{}, sid={}, c=win, b=3029, p=110'.format(
int(time.time()),
self.randint(),
self.randint(),
self.randint(),
BaseLib.str2hex(md5.hexdigest())
)
def set_proxy(self):
self.http.proxies['http'] = self.host
self.http.headers = self._build_header()
return self.http

View File

@@ -0,0 +1,33 @@
from threading import Thread
class MultiThreads:
threads = None
max_threads = 2
to_run = None
def __init__(self):
self.threads = []
self.to_run = []
try:
import multiprocessing
self.max_threads = multiprocessing.cpu_count()
except Exception:
pass
def add(self, target: callable, args: tuple):
self.threads.append(Thread(target=target, args=args))
def _run_processes(self, callback: callable = None, n: int = None):
for t in self.to_run:
if not n:
t.join()
callback is not None and callback()
def start(self, callback: callable = None):
for n, t in enumerate(self.threads): # starting all threads
t.start()
self.to_run.append(t)
self._run_processes(callback, (n + 1) % self.max_threads)
self._run_processes(callback)
self.threads = []

View File

@@ -0,0 +1,171 @@
import requests
from .url_normalizer import normalize_uri
class Request:
__redirect_base_url = ''
_headers = None
referer = ''
proxies = None
allow_webp = True
user_agent = '{} {} {} {}'.format(
'Mozilla/5.0 (Windows NT 10.0; Win64; x64)',
'AppleWebKit/537.36 (KHTML, like Gecko)',
'Chrome/60.0.3112.101',
'Safari/537.36'
)
default_lang = 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3'
cookies = None
kwargs = None
debug = False
response = None
_history = None
allow_send_referer = True
def __init__(self):
self.proxies = {}
self.cookies = {}
self._history = []
def __patch_headers(self, headers):
if isinstance(self._headers, dict):
for i in self._headers:
headers[i] = self._headers[i]
return headers
def _get_cookies(self, cookies=None):
return cookies if cookies else self.cookies
def _prepare_redirect_base_url(self, url):
if not self.__redirect_base_url:
self.__redirect_base_url = url
def _get_kwargs(self):
kwargs = {}
if self.kwargs:
kwargs = self.kwargs
return kwargs
def __update_cookies(self, r):
_ = r.cookies.get_dict()
for c in _:
self.cookies[c] = _[c]
def __redirect_helper(self, r, url, method):
proxy = None
location = url
if r.status_code == 303:
method = 'get'
elif r.status_code == 305:
proxy = {
'http': r.headers['location'],
'https': r.headers['location'],
}
else:
location = normalize_uri(r.headers['location'], self.__redirect_base_url)
return proxy, location, method
def _requests_helper(
self, method, url, headers=None, data=None,
max_redirects=10, **kwargs
) -> requests.Response:
self._prepare_redirect_base_url(url)
headers = self.__patch_headers(headers)
args = {
'url': url,
'headers': headers,
'data': data,
}
self.__set_defaults(args, kwargs)
self.__set_defaults(args, self._get_kwargs())
args.setdefault('allow_redirects', False)
r = getattr(requests, method)(**args)
self.__update_cookies(r)
if r.is_redirect and method != 'head':
if max_redirects < 1:
self.debug and print(self._history)
raise AttributeError('Too many redirects')
self._history.append(url)
proxy, location, method = self.__redirect_helper(r, url, method)
if proxy:
kwargs['proxies'] = proxy
return self._requests_helper(
method=method, url=location, headers=headers,
data=data, max_redirects=(max_redirects - 1),
**kwargs
)
return r
@staticmethod
def __set_defaults(args_orig: dict, args_vars: dict):
for idx in args_vars:
args_orig.setdefault(idx, args_vars[idx])
def requests(
self, url: str, headers: dict = None, cookies: dict = None,
data=None, method='get', files=None, timeout=None, **kwargs
) -> requests.Response:
if not isinstance(headers, dict):
headers = {}
self._history = []
cookies = self._get_cookies(cookies)
headers.setdefault('User-Agent', self.user_agent)
if self.allow_send_referer and self.referer:
headers.setdefault('Referer', self.referer)
headers.setdefault('Accept-Language', self.default_lang)
if self.allow_webp:
headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=1.0,image/webp,image/apng,*/*;q=1.0'
kwargs.setdefault('proxies', self.proxies)
self.response = self._requests_helper(
method=method, url=url, headers=headers, cookies=cookies,
data=data, files=files, timeout=timeout,
**kwargs
)
return self.response
def get(self, url: str, headers: dict = None, cookies: dict = None, **kwargs) -> str:
response = self.requests(
url=url,
headers=headers,
cookies=cookies,
method='get',
**kwargs
)
text = response.text
response.close()
return text
def post(self, url: str, headers: dict = None, cookies: dict = None, data: dict = (), files=None, **kwargs) -> str:
response = self.requests(
url=url,
headers=headers,
cookies=cookies,
method='post',
data=data,
files=files,
**kwargs
)
text = response.text
response.close()
return text
def reset_proxy(self):
self.proxies = {}
def set_proxy(self, proxy):
self.reset_proxy()
if isinstance(proxy, dict):
self.proxies['http'] = proxy.get('http', None)
self.proxies['https'] = proxy.get('https', None)
elif isinstance(proxy, str):
self.proxies['http'] = proxy
def get_base_cookies(self, url: str):
"""
:param url:
:return:
"""
response = self.requests(url=url, method='head')
response.close()
return response.cookies

View File

@@ -0,0 +1,25 @@
import json
import webbrowser
from packaging import version
from requests import get
from manga_py.meta import __version__, __repo_name__
def check_version():
api_url = 'https://api.github.com/repos/' + __repo_name__ + '/releases/latest'
api_content = json.loads(get(api_url).text)
tag_name = api_content.get('tag_name', None)
if tag_name and version.parse(tag_name) > version.parse(__version__):
download_addr = api_content['assets'][0]
return tag_name, download_addr['browser_download_url']
return ()
def download_update():
pass
def open_browser(url):
webbrowser.open(url)

View File

@@ -0,0 +1,73 @@
from urllib.parse import urlparse
class UrlNormalizer:
@staticmethod
def _parse_sheme(parse, base_parse):
if not parse.scheme:
uri = base_parse.scheme
else:
uri = parse.scheme
return uri + '://'
@staticmethod
def _parse_netloc(parse, base_parse):
if not parse.netloc:
uri = base_parse.netloc
else:
uri = parse.netloc
return uri
@staticmethod
def _test_path_netloc(parse):
if parse.path.find('://') == 0:
return urlparse('http' + parse.path).path
return parse.path
@staticmethod
def __parse_rel_path(parse, base_parse):
path = ''
if base_parse.path.rfind('/') > 0:
path = base_parse.path[0:base_parse.path.rfind('/')]
return path.rstrip('/') + '/' + parse.path.lstrip('/')
@staticmethod
def _parse_path(parse, base_parse):
if parse.netloc:
return parse.path
_path = UrlNormalizer._test_path_netloc(parse)
if _path:
if _path.find('/') == 0:
return _path
else:
return UrlNormalizer.__parse_rel_path(parse, base_parse)
else:
return base_parse.path
@staticmethod
def _parse_query(parse):
if parse.query:
return '?' + parse.query
return ''
@staticmethod
def _parse_fragment(parse):
if parse.fragment:
return '#' + parse.fragment
return ''
@staticmethod
def url_helper(url: str, base_url: str) -> str:
parse = urlparse(url)
base_parse = urlparse(base_url)
un = UrlNormalizer
sheme = un._parse_sheme(parse, base_parse)
netloc = un._parse_netloc(parse, base_parse)
path = un._parse_path(parse, base_parse)
query = un._parse_query(parse)
fragment = un._parse_fragment(parse)
return sheme + netloc + path + query + fragment
normalize_uri = UrlNormalizer.url_helper

View File

@@ -0,0 +1,2 @@
class WebSocket:
pass

View File

@@ -0,0 +1,110 @@
import imghdr
from os import path
from PIL import Image as PilImage, ImageChops
class Image:
_image = None # type: PilImage
src_path = None # type: str
def __init__(self, src_path):
"""
:param src_path:
"""
if not path.isfile(src_path):
raise AttributeError('Image not found')
self.src_path = src_path
self.__open(src_path)
@property
def image(self) -> PilImage:
return self._image
@image.setter
def image(self, image: PilImage):
self._image = image
def __open(self, _path):
"""
:param _path:
:return:
"""
if self.image is None:
self._image = PilImage.open(_path)
def gray(self, dest_path: str):
"""
:param dest_path:
:return:
"""
try:
image = self.image.convert('LA')
except (ValueError, OSError):
image = self.image.convert('L')
if dest_path is not None:
image.save(dest_path)
return image
def convert(self, dest_path: str, quality: int = 95):
"""
see http://pillow.readthedocs.io/en/3.4.x/handbook/image-file-formats.html
:param dest_path:
:param quality:
:return:
"""
self.image.save(dest_path, quality=quality)
return dest_path
def crop_manual_with_offsets(self, offsets, dest_path: str):
"""
:param offsets:
:param dest_path:
:return:
"""
left, upper, right, lower = offsets
width, height = self.image.size
image = self.image.crop((
left,
upper,
width - right,
height - lower
))
image.save(dest_path)
def crop_manual(self, sizes: tuple, dest_path: str):
"""
:param sizes: The crop rectangle, as a (left, upper, right, lower)-tuple.
:param dest_path:
:return:
"""
self.image.crop(sizes).save(dest_path)
def crop_auto(self, dest_path: str):
"""
:param dest_path:
:return:
"""
bg = PilImage.new(
self.image.mode,
self.image.size,
self.image.getpixel((0, 0))
)
diff = ImageChops.difference(self.image, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
crop = self.image.crop(bbox)
if dest_path:
crop.save(dest_path)
def close(self):
self.image is not None and self.image.close()
@staticmethod
def real_extension(_path):
img = imghdr.what(_path)
if img:
return '.' + img
return None

View File

@@ -0,0 +1,125 @@
from argparse import Namespace
from datetime import datetime
from sys import argv
from typing import Union
from manga_py import meta
class Info:
__doc__ = """
--print-json argument helper
{
'site': 'https://example.org/kumo-desu-ga-nani-ka',
'downloader': [
'https://manga-py.com/manga-py/',
'https://github.com/manga-py/manga-py',
'https://github.com/yuru-yuri/manga-py',
'https://yuru-yuri.github.io/manga-py',
],
'version': '1.1.4',
'delta': '0:00:00.003625',
'start': '2018-06-08 17:22:24.419565',
'end': '2018-06-08 17:22:24.423190',
'user_agent': 'Mozilla/5.0',
'cookies': {'cf_clearance': 'ec-1528654923-86400', '__cfduid': '21528654914'},
'args': {
'_raw_params': 'manga-py --cbz https://example.org/kumo-desu-ga-nani-ka',
'url': 'https://example.org/kumo-desu-ga-nani-ka',
'name': None,
'destination': None,
'no-progress': False,
'cbz': False,
'skip-volumes': None,
'max-volumes': None,
'user-agent': None,
'proxy': None,
'reverse-downloading': None,
'rewrite-exists-archives': None,
'no-multi-threads': None,
},
'error': False,
'error_msg': '',
'volumes': [
{
'name': 'Kumo desu ga, nani ka? - 0',
'path': 'Manga/kumo-desu-ga-nani-ka/vol_000.zip',
},
{
'name': 'Kumo desu ga, nani ka? - 1',
'path': 'Manga/kumo-desu-ga-nani-ka/vol_001.zip',
},
],
}
"""
_data = None
_start_time = None
@staticmethod
def _dt(dt, fmt: str = '%A, %d. %B %Y %H:%M:%S'):
return dt.strftime(fmt)
def __init__(self, args: Union[Namespace, dict]): # see manga_py.cli arguments
_args = args.__dict__ if args is not dict else args
_args['_raw_params'] = ' '.join(argv)
self._data = {
'site': args.url,
'downloader': meta.__downloader_uri__,
'version': meta.__version__,
'delta': None,
'init': self._dt(datetime.now()),
'start': None,
'end': None,
'user_agent': None,
'cookies': None,
'args': _args,
'return_code': 0,
'error': False,
'error_msg': None,
'volumes': [],
}
self._volumes = []
def set_ua(self, ua):
self._data['user_agent'] = ua
def set_error(self, e, rc: int = 1):
self._data['return_code'] = rc
self._data['error'] = e
def start(self):
self._start_time = datetime.now()
def set_cookies(self, cookies):
self._data['cookies'] = cookies
def set_volumes(self, volumes: list):
self._data['volumes'] = volumes
def set_last_volume_error(self, error_message):
try:
self._data['volumes'][-1]['error'] = True
self._data['volumes'][-1]['error_message'] = error_message
except IndexError:
pass
def add_volume(self, url: str, path: str, files: list = None):
volume = {
'url': url,
'path': path,
'error': False,
'error_message': '',
}
if files is not None:
volume['files'] = files
volume['num_files'] = len(files)
self._data['volumes'].append(volume)
def get(self):
self._data['delta'] = str(datetime.now() - self._start_time)
self._data['start'] = self._dt(self._start_time)
self._data['end'] = self._dt(datetime.now())
return self._data

View File

@@ -0,0 +1,3 @@
__version__ = '1.11.0'
__repo_name__ = 'manga-py/manga-py'
__downloader_uri__ = 'https://github.com/%s' % __repo_name__

View File

@@ -0,0 +1,45 @@
from argparse import ArgumentParser
from loguru import logger
from .info import Info
from .providers import get_provider
class Parser:
params = None
provider = None
def __init__(self, args):
self.params = {}
self.args = args
self._add_params(args)
def _add_params(self, params: ArgumentParser = None):
if params is None:
params = self.args.parse_args()
else:
params = params.parse_args()
self.params = params.__dict__
def init_provider(
self,
progress: callable = None,
log: callable = None,
quest: callable = None,
info: Info = None,
quest_password: callable = None,
):
provider = get_provider(self.params.get('url', ''))
if isinstance(provider, bool):
raise AttributeError('Provider not found')
self.provider = provider(info) # provider __init__
self.provider.set_progress_callback(None if self.params['quiet'] else progress)
self.provider.set_log_callback(log)
self.provider.set_quest_callback(quest)
self.provider.set_quest_password_callback(quest_password)
@logger.catch
def start(self):
self.provider.process(self.params['url'], self.params)

View File

@@ -0,0 +1,274 @@
import json
import re
from abc import ABC
from sys import stderr
from .base_classes import (
Abstract,
Archive,
Base,
Callbacks,
# TODO
CloudFlareProtect,
Static
)
from .fs import (
get_temp_path,
is_file,
basename,
remove_file_query_params,
path_join,
unlink,
file_size,
)
from .http import MultiThreads
from .info import Info
from .meta import __downloader_uri__
from .meta import __version__
class Provider(Base, Abstract, Static, Callbacks, ABC):
_volumes_count = 0
_archive = None
_zero_fill = False
_with_manga_name = False
_info = None
_simulate = False
_volume = None
_show_chapter_info = False
__debug = False
_override_name = ''
def __init__(self, info: Info = None):
super().__init__()
self.re = re
self.json = json
self._params['temp_directory'] = get_temp_path()
self._info = info
def _params_parser(self, params):
# image params
self._set_if_not_none(self._image_params, 'crop_blank', params.get('crop_blank', False))
self._set_if_not_none(
self._image_params, 'crop',
(params.get('xt', 0),
params.get('xr', 0),
params.get('xb', 0),
params.get('xl', 0)),
)
self._image_params['no_webp'] = params.get('no_webp', False)
# downloading params
self._set_if_not_none(self._params, 'destination', params.get('destination', None))
self._zero_fill = params.get('zero_fill')
self._with_manga_name = params.get('with_manga_name')
self._simulate = params.get('simulate')
self._show_chapter_info = params.get('show_current_chapter_info', False)
self.__debug = params.get('debug', False)
self._override_name = self._params.get('override_archive_name')
if self._with_manga_name and self._override_name:
raise RuntimeError('Conflict of parameters. Please use only --with-manga-name, or --override-archive-name')
def process(self, url, params=None): # Main method
self._params['url'] = url
params = params if isinstance(params, dict) else {}
self._params_parser(params)
for i in params:
self._params.setdefault(i, params[i])
proxy = params.get('proxy', None)
if proxy is not None:
self._storage['proxies'] = {
'http': proxy,
'https': proxy,
}
self.prepare_cookies()
self._storage['manga_name'] = self.get_manga_name()
self._storage['main_content'] = self.content
self._storage['chapters'] = self._prepare_chapters(self.get_chapters())
if not self._params.get('reverse_downloading', False):
self._storage['chapters'] = self._storage['chapters'][::-1]
self._storage['init_cookies'] = self._storage['cookies']
self._info and self._info.set_ua(self.http().user_agent)
self.loop_chapters()
def _check_archive(self):
# check
_path = self.get_archive_path()
not_allow_archive = not self._params.get('rewrite_exists_archives', False)
return not_allow_archive and is_file(_path)
def _download_chapter(self):
if not self._simulate:
try:
self.before_download_chapter()
self._storage['files'] = self.get_files()
self.loop_files()
except Exception as e:
# Main debug here
if self.__debug:
raise e
self.log([e], file=stderr)
self._info.set_last_volume_error(e)
def loop_chapters(self):
volumes = self._storage['chapters']
_min = self._params.get('skip_volumes', 0)
_max = self._params.get('max_volumes', 0)
count = 0 # count downloaded chapters
for idx, __url in enumerate(volumes):
self.chapter_id = idx
if idx < _min or (count >= _max > 0) or self._check_archive():
continue
count += 1
self._info.add_volume(self.chapter_for_json(), self.get_archive_path())
self._download_chapter()
def loop_files(self):
if isinstance(self._storage['files'], list):
if self._show_chapter_info:
self.log('\n\nCurrent chapter url: %s\n' % (self.chapter,))
if len(self._storage['files']) == 0:
# see Std
self.log('Error processing file: %s' % self.get_archive_name(), file=stderr)
return
self._archive = Archive()
self._archive.not_change_files_extension = self._params.get('not_change_files_extension', False)
self._archive.no_webp = self._image_params.get('no_webp', False)
self._call_files_progress_callback()
self._multi_thread_save(self._storage['files'])
self.make_archive()
def _save_file_params_helper(self, url, idx):
if url is None:
_url = self.http().normalize_uri(self.get_current_file())
else:
_url = url
_url = self.before_file_save(_url, idx)
filename = remove_file_query_params(basename(_url))
_path = self.remove_not_ascii(self._image_name(idx, filename))
_path = get_temp_path(_path)
return _path, idx, _url
def save_file(self, idx=None, callback=None, url=None, in_arc_name=None):
_path, idx, _url = self._save_file_params_helper(url, idx)
if not is_file(_path) or file_size(_path) < 32:
self.http().download_file(_url, _path, idx)
self.after_file_save(_path, idx)
self._archive.add_file(_path)
callable(callback) and callback()
return _path
def get_archive_path(self):
if self._override_name:
_path = "{}_{}".format(self._override_name, str(self.normal_arc_name(self.get_chapter_index().split('-'))))
else:
# see Std
_path = remove_file_query_params(self.get_archive_name())
_path = self.remove_not_ascii(_path)
if not _path:
_path = str(self.chapter_id)
name = self._params.get('name', '')
if not len(name):
name = self._storage['manga_name']
additional_data_name = ''
if self.http().has_error:
additional_data_name = 'ERROR.'
self.http().has_error = False
return path_join(
self._params.get('destination', 'Manga'),
name,
_path + '.%s%s' % (additional_data_name, self._archive_type())
) \
.replace('?', '_') \
.replace('"', '_') \
.replace('>', '_') \
.replace('<', '_') \
.replace('|', '_') # Windows...
def make_archive(self):
_path = self.get_archive_path()
info = 'Site: {}\nDownloader: {}\nVersion: {}'.format(self.get_url(), __downloader_uri__, __version__)
# """
# make book info
# """
# if self._params['cbz']:
# self._archive.add_book_info(self._arc_meta_info())
self._archive.add_info(info)
try:
self._archive.make(_path)
except OSError as e:
self.log('')
self.log(e)
self.log(e, file=stderr)
self._info.set_last_volume_error(str(e))
unlink(_path)
raise e
def html_fromstring(self, url, selector: str = None, idx: int = None):
params = {}
if isinstance(url, dict):
params = url['params']
url = url['url']
return self.document_fromstring(self.http_get(url, **params), selector, idx)
def _multi_thread_callback(self):
self._call_files_progress_callback()
self._storage['current_file'] += 1
def _multi_thread_save(self, files):
threading = MultiThreads()
# hack
self._storage['current_file'] = 0
if self._params.get('max_threads', None) is not None:
threading.max_threads = int(self._params.get('max_threads'))
for idx, url in enumerate(files):
threading.add(self.save_file, (idx, self._multi_thread_callback, url, None))
threading.start()
def cf_protect(self, url):
"""
WARNING! Thins function replace cookies!
:param url: str
:return:
"""
cf = CloudFlareProtect()
params = cf.run(url)
if len(params):
self.update_cookies(params[0])
self.update_ua(params[1])
self._params['cf-protect'] = True
def update_ua(self, ua):
self._storage['user_agent'] = ua
self.http().user_agent = ua
self._info and self._info.set_ua(ua)
def update_cookies(self, cookies):
for k in cookies:
self._storage['cookies'][k] = cookies[k]
self.http().cookies[k] = cookies[k]
@property
def content(self):
content = self._storage.get('main_content', None)
if content is None:
content = self.get_main_content()
return content

View File

@@ -0,0 +1,12 @@
from .rawdevart_com import RawDevArtCom
class FirstKissMangaCom(RawDevArtCom):
_chapter_selector = r'/manga/[^/]+/chapter-(\d+(?:-\d+)?)'
def get_files(self):
parser = self.html_fromstring(self.chapter)
return self._images_helper(parser, '.page-break img[data-src]', attr='data-src')
main = FirstKissMangaCom

View File

@@ -0,0 +1,24 @@
from .authrone_com import AuthroneCom
from .helpers.std import Std
class ThreeAsqInfo(AuthroneCom, Std):
_ch_selector = '.mng_det ul.lst > li > a.lst'
def get_chapter_index(self) -> str:
return self.re.search(
r'\.info/[^/]+/([^/]+)',
self.chapter
).group(1).replace('.', '-')
def get_main_content(self):
return self._get_content('{}/{}/')
def get_manga_name(self) -> str:
return self._get_name(r'\.info/([^/]+)')
def get_files(self):
return list(set(super().get_files())) # remove doubles
main = ThreeAsqInfo

View File

@@ -0,0 +1,69 @@
from manga_py.provider import Provider
from .helpers.std import Std
class SevenSamaCom(Provider, Std):
def get_archive_name(self) -> str:
self._vol_fill = True
name = self.re.sub('[^a-zA-Z0-9]+', '_', self.chapter['chapter_name'])
return self.normal_arc_name([
self.chapter['number'],
str(self.chapter_id),
name
])
def get_chapter_index(self) -> str:
return self.chapter_id
def get_main_content(self):
return self._get_content('{}/manga/{}')
def get_manga_name(self) -> str:
return self._get_name('/manga/([^/]+)')
def get_chapters(self):
idx = self.re.search(r'/manga/.+?/(\d+)', self.get_url()).group(1)
chapters = []
for i in range(1, 1000):
content = self.http_get('{}/series/chapters_list.json?page={}&id_serie={}'.format(
self.domain, i, idx
), {'x-requested-with': 'XMLHttpRequest'})
data = self.json.loads(content)
if data['chapters'] is False:
break
chapters += self.__prepare_chapters(data['chapters'])
return chapters
@staticmethod
def __prepare_chapters(items):
chapters = []
for i in items:
for k, j in i['releases'].items():
chapter = i.copy()
chapter['release'] = j
chapter['release_id'] = k
chapters.append(chapter)
return chapters
def get_files(self):
url = self.chapter_for_json()
content = self.http_get('{}{}'.format(self.domain, url))
api_key = self.re.search(r'this\.page\.identifier\s*=\s*[\'"](.+)[\'"]', content).group(1)
url = '{}/leitor/pages/{}.json?key={}'.format(
self.domain, self.chapter['release']['id_release'], api_key
)
images = self.json.loads(self.http_get(url, {'x-requested-with': 'XMLHttpRequest'}))
return images['images']
def get_cover(self) -> str:
return self._cover_from_content('.cover img.cover')
def book_meta(self) -> dict:
pass
def chapter_for_json(self) -> str:
return self.chapter['release']['link']
main = SevenSamaCom

View File

@@ -0,0 +1,67 @@
from manga_py.provider import Provider
from .helpers import eight_muses_com
from .helpers.std import Std
class EightMusesCom(Provider, Std):
_chapters = None
chapter_selector = '.gallery a.c-tile[href^="/comics/"]'
helper = None
_images_path = 'image/fl'
def get_chapter_index(self) -> str:
re = self.re.compile(r'/(?:album|picture)/([^/]+/[^/]+(?:/[^/]+)?)')
ch = self.chapter
if isinstance(ch, list) and len(ch) > 0:
ch = ch[0]
if isinstance(ch, dict):
ch = ch.get('href')
idx = re.search(ch).group(1)
return '-'.join(idx.split('/'))
def get_main_content(self):
return self.http_get(self.get_url())
def get_manga_name(self) -> str:
return self._get_name('/album/([^/]+)')
def get_chapters(self):
chapters = self._elements(self.chapter_selector)
return self.helper.chapters(chapters)
def _parse_images(self, images) -> list:
return ['{}/{}/{}'.format(
self.domain,
self._images_path,
i.get('value')
) for i in images if i.get('value')]
@staticmethod
def _sort(items: dict) -> list:
items = [items[i] for i in sorted(items, key=lambda x: int(x)) if len(items[i]) > 5]
return list(set(items))
def get_files(self):
images = {}
_n = self.http().normalize_uri
for n, i in enumerate(self.chapter):
if n % 4 < 2:
img = self.html_fromstring(_n(i.get('href')), '#imageName,#imageNextName')
images[str(n)] = img[0]
images[str(n + 2)] = img[1]
return self._parse_images(self._sort(images))
def get_cover(self) -> str:
pass
def prepare_cookies(self):
self._chapters = []
self._base_cookies()
self.helper = eight_muses_com.EightMusesCom(self)
def book_meta(self) -> dict:
# todo meta
pass
main = EightMusesCom

View File

@@ -0,0 +1,63 @@
# All providers
### For template example, see _template.py
## Functions:
```python
from manga_py.provider import Provider
# from .helpers.std import Std
class _Template(Provider):
# class _Template(Provider, Std): # extends utils
def get_archive_name(self) -> str:
pass
def get_chapter_index(self) -> str:
pass
def get_main_content(self): # call once
# return self._get_content('{}/manga/{}')
pass
def prepare_cookies(self): # if site with cookie protect
# self._storage['proxies'] = auto_proxy()
# self._storage['cookies'] = self.http().get_base_cookies(self.get_url()).get_dict() # base cookies
pass
def get_manga_name(self) -> str:
# return self._get_name('/manga/([^/]+)')
return ''
def get_chapters(self): # call once
# return self._elements('a.chapter')
return []
def get_files(self): # call ever volume loop
return []
def get_cover(self) -> str:
# return self._cover_from_content('.cover img')
pass
def book_meta(self) -> dict:
"""
:see http://acbf.wikia.com/wiki/Meta-data_Section_Definition
:return {
'author': str,
'title': str,
'annotation': str,
'keywords': str,
'cover': str,
'rating': str,
}
"""
pass
main = _Template
```

View File

@@ -0,0 +1,18 @@
from .rawdevart_com import RawDevArtCom
class ThreeAsqOrg(RawDevArtCom):
def get_chapter_index(self) -> str:
return self.chapter.split('/')[-2]
def get_files(self):
parser = self.html_fromstring(self.chapter)
return self._images_helper(parser, 'img.wp-manga-chapter-img')
@property
def chapter(self):
return super().chapter + '?style=list'
main = ThreeAsqOrg

View File

@@ -0,0 +1,877 @@
import re
import importlib
providers_list = {
'1stkissmanga_com': [
r'1stkissmanga\.com/manga/.',
],
'3asq_info': [
r'3asq\.info/.',
],
'_3asq_org': [
r'3asq\.org/.',
],
'7sama_com': [
r'7sama\.com/manga/.',
],
# '8muses_com': [
# r'8muses\.com/comics/album/.',
# ],
'ac_qq_com': [
r'ac\.qq\.com/Comic.+?/id/\d',
],
'acomics_ru': [
r'acomics\.ru/~.',
],
'adulto_seinagi_org': [
r'adulto\.seinagi\.org/(series|read)/.',
r'xanime-seduccion\.com/(series|read)/.',
r'twistedhelscans\.com/(series|read)/.',
r'reader\.evilflowers\.com/(series|read)/.',
],
'allhentai_ru': [
r'allhentai\.ru/.',
],
'animextremist_com': [
r'animextremist\.com/mangas-online/.',
],
'antisensescans_com': [
r'antisensescans\.com/online/(series|read)/.',
],
'asmhentai_com': [
r'asmhentai\.com/(g|gallery)/\d',
],
'atfbooru_ninja': [
r'atfbooru\.ninja/posts.',
],
# 'authrone_com': [
# r'authrone\.com/manga/.',
# ],
'bato_to': [
r'bato\.to/(series|chapter)/\d',
],
'blogtruyen_com': [
r'blogtruyen\.com/.',
],
'bns_shounen_ai_net': [
r'bns\.shounen-ai\.net/read/(series|read)/.',
],
'boredomsociety_xyz': [
r'boredomsociety\.xyz/(titles/info|reader)/\d',
],
'cdmnet_com_br': [
r'cdmnet\.com\.br/titulos/.',
],
'chochox_com': [
r'chochox\.com/.',
],
'choutensei_260mb_net': [
r'choutensei\.260mb\.net/(series|read)/.',
],
'comicextra_com': [
r'comicextra\.com/.',
],
# 'comico_co_id_content': [
# r'comico\.co\.id/content\?contentId=\d',
# ],
'comico_co_id_titles': [
r'comico\.co\.id/titles/\d',
],
'comic_webnewtype_com': [
r'comic\.webnewtype\.com/contents/.',
],
'comico_jp': [
r'comico\.jp(?:/challenge)?/(detail|articleList).+titleNo.',
],
'comicsandmanga_ru': [
r'comicsandmanga\.ru/online-reading/.',
],
'comicvn_net': [
r'comicvn\.net/truyen-tranh-online/.',
],
'cycomi_com': [
r'cycomi\.com/fw/cycomibrowser/chapter/title/\d',
],
'danbooru_donmai_us': [
r'danbooru\.donmai\.us/posts.',
],
'darkskyprojects_org': [
r'darkskyprojects\.org/biblioteca/.',
],
'dejameprobar_es': [
r'dejameprobar\.es/slide/.',
r'menudo-fansub\.com/slide/.',
r'npscan\.mangaea\.net/slide/.',
r'snf\.mangaea\.net/slide/.',
r'yuri-ism\.net/slide/.',
],
'desu_me': [
r'desu\.me/manga/.',
],
'digitalteam1_altervista_org': [
r'digitalteam1\.altervista\.org/reader/read/.',
],
# 'dm5_com': [
# r'dm5\.com/manhua-.',
# ],
'doujins_com': [
r'doujins\.com/gallery/.',
r'doujin-moe\.us/gallery/.',
],
'e_hentai_org': [
r'e-hentai\.org/g/\d',
],
'exhentai_org': [
r'exhentai\.org/g/\d',
],
'fanfox_net': [
r'fanfox\.net/manga/.',
],
'freeadultcomix_com': [
r'freeadultcomix\.com/.',
],
'freemanga_to': [
r'freemanga\.to/(manga|chapter)/.',
],
'funmanga_com': [
r'funmanga\.com/.',
],
'gmanga_me': [
r'gmanga\.me/mangas/.',
],
'gomanga_co': [
r'gomanga\.co/reader/.',
r'jaiminisbox\.com/reader/.',
r'kobato\.hologfx\.com/reader/.',
# r'atelierdunoir\.org/reader/.',
r'seinagi\.org/reader/.',
],
'goodmanga_net': [
r'goodmanga\.net/.',
],
'helveticascans_com': [
r'helveticascans\.com/r/(series|read)/.',
],
'hakihome_com': [
r'hakihome\.com/.',
],
'hatigarmscans_eu': [
r'hatigarmscans\.eu/hs/(series|read).',
r'hatigarmscans\.net/hs/(series|read).',
r'hatigarmscans\.net/manga/.',
],
'heavenmanga_biz': [
r'heavenmanga\.\w+/.',
],
'hentai2read_com': [
r'hentai2read\.com/.',
],
'hentai_cafe': [
r'hentai\.cafe/.',
],
'hentai_chan_me': [
r'hentai-chan\.me/(related|manga|online)/.', # todo
],
'hentai_image_com': [
r'hentai-image\.com/image/.',
],
'hentaihand_com': [
r'hentaihand\.com/comic/\d',
],
'hentaifox_com': [
r'hentaifox\.com/.',
],
'hentaihere_com': [
r'hentaihere\.com/m/.',
],
'hentaiporns_net': [
r'hentaiporns\.net/.'
],
'hentairead_com': [
r'hentairead\.com/.',
],
'hitomi_la': [
r'hitomi\.la/(galleries|reader)/.',
],
'hgamecg_com': [
r'hgamecg\.com/index/category/\d',
],
'hitmanga_eu': [
r'hitmanga\.eu/.',
r'mymanga\.io/.',
],
'hocvientruyentranh_com': [
r'hocvientruyentranh\.com/(manga|chapter)/.',
],
'hoducomics_com': [
r'hoducomics\.com/webtoon/list/\d',
r'hodu1\.com/webtoon/list/\d',
],
'hotchocolatescans_com': [
r'hotchocolatescans\.com/fs/(series|read)/.',
r'mangaichiscans\.mokkori\.fr/fs/(series|read)/.',
r'taptaptaptaptap\.net/fs/(series|read)/.',
],
'riceballicious_info': [
r'riceballicious\.info/fs/reader/(series|read)/.',
],
'rocaca_com': [
r'rocaca\.com/manga/.',
],
'inmanga_com': [
r'inmanga\.com/ver/manga/.',
],
'isekaiscan_com': [
r'isekaiscan\.com/manga/.',
],
'japscan_com': [
r'japscan\.to/.',
],
'jurnalu_ru': [
r'jurnalu\.ru/online-reading/.',
],
'kissmanga_com': [
r'kissmanga\.com/Manga/.',
],
'komikcast_com': [
r'komikcast\.com/.',
],
'komikid_com': [
r'komikid\.com/manga/.',
r'mangazuki\.co/manga/.',
r'mangaforest\.com/manga/.',
r'mangadenizi\.com/.',
r'mangadoor\.com/manga/.',
r'manga\.fascans\.com/manga/.',
r'mangadesu\.net/manga/.',
r'mangahis\.com/manga/.',
r'cmreader\.info/manga/.',
r'rawmangaupdate\.com/manga/.',
r'mangaraw\.online/manga/.',
r'manhua-tr\.com/manga/.',
r'manga-v2\.mangavadisi\.org/manga/.',
r'universoyuri\.com/manga/.',
r'digitalteam1\.altervista\.org/manga/.',
# r'sosscanlation\.com/manga/.',
r'komikgue\.com/manga/.',
r'onma\.me/manga/.',
],
'kumanga_com': [
r'kumanga\.com/manga/\d',
],
'lector_kirishimafansub_com': [
r'lector\.kirishimafansub\.com/(lector/)?(series|read)/.',
],
'leitor_net': [
r'leitor\.net/manga/.',
],
'leomanga_com': [
r'leomanga\.com/manga/.',
],
'leviatanscans_com': [
r'leviatanscans\.com/comics/\d'
],
'lhtranslation_com': [
r'read\.lhtranslation\.com/(truyen|manga)-.',
r'lhtranslation\.net/(truyen|manga)-.',
],
'lolibooru_moe': [
r'lolibooru\.moe/post.',
],
'lolivault_net': [
r'lolivault\.net/online/(series|read).',
],
'luscious_net': [
r'luscious\.net/.+/album/.',
r'luscious\.net/albums/.',
],
'mangapark_org': [
r'mangapark\.org/(series|chapter)/', # is different!
],
'mang_as': [
r'mang\.as/manga/.',
],
'manga_ae': [
r'mangaae\.com/.',
],
'manga_fox_com': [
r'manga-fox\.com/.',
r'manga-here\.io/.',
],
'manga_mexat_com': [
r'manga\.mexat\.com/category/.',
],
'manga_online_biz': [
r'manga-online\.biz/.',
],
'manga_online_com_ua': [
r'manga-online\.com\.ua/.+html',
],
'manga_room_com': [
r'manga-room\.com/manga/.',
],
'manga_sh': [
r'manga\.sh/comics/.',
],
'manga_tube_me': [
r'manga-tube\.me/series/.',
],
'mangaarabteam_com': [
r'mangaarabteam\.com/.',
],
'manga_tr_com': [
r'manga-tr\.com/(manga|id)-.',
],
'mangabat_com': [
r'mangabat\.com/(manga|chapter)/.',
],
'mangabb_co': [
r'mangabb\.co/.',
],
'mangabox_me': [
r'mangabox\.me/reader/.',
],
'mangachan_me': [
r'mangachan\.me/(related|manga|online)/.',
r'yaoichan\.me/(manga|online).',
],
'mangachan_me_download': [
r'mangachan\.me/download/.',
r'hentai-chan\.me/download/.',
r'yaoichan\.me/download/.',
],
'mangacanblog_com': [
r'mangacanblog\.com/.',
],
'mangaclub_ru': [
r'mangaclub\.ru/.',
],
'mangadeep_com': [
r'mangadeep\.com/.',
r'manga99\.com/.',
],
'mangadex_org': [
r'mangadex\.org/manga/.',
],
'mangadex_com': [
r'mangadex\.com/(title|chapter)/.',
r'mangadex\.org/(title|chapter)/.',
],
'mangadex_info': [
r'mangadex\.info/manga/.',
],
'mangaeden_com': [
r'mangaeden\.com/[^/]+/[^/]+-manga/.',
r'perveden\.com/[^/]+/[^/]+-manga/.',
],
'mangafans_us': [ # MangaNeloCom
r'mangafans\.us/manga/.',
r'mangahot\.org/read-manga/.',
],
# 'mangaforall_com': [
# r'mangaforall\.com/m/.',
# ],
'mangafreak_net_download': [
r'mangafreak\.net/Manga/.',
],
'mangafull_org': [
r'mangafull\.org/manga/.',
],
# 'mangago_me': [
# r'mangago\.me/read-manga/.',
# ],
'mangahasu_se': [
r'mangahasu\.se/.',
],
'mangaheaven_club': [
r'mangaheaven\.club/read-manga/.',
],
'mangaheaven_xyz': [
r'mangaheaven\.xyz/manga/.',
],
'mangahere_cc': [
r'mangahere\.co/manga/.',
r'mangahere\.cc/manga/.',
],
'mangahi_net': [
r'mangahi\.net/.',
],
'mangaid_me': [
r'mangaid\.co/manga/.',
r'mangaid\.net/manga/.',
r'mangaid\.me/manga/.',
],
'mangahome_com': [
r'mangahome\.com/manga/.',
],
'mangahub_io': [
r'mangahub\.io/(manga|chapter)/.',
# r'mangareader\.site/(manga|chapter)/.',
r'mangakakalot\.fun/(manga|chapter)/.',
r'mangahere\.onl/(manga|chapter)/.',
],
'mangahub_ru': [
r'mangahub\.ru/.',
],
'mangaindo_web_id': [
r'mangaindo\.web\.id/.',
],
'mangainn_net': [
r'mangainn\.net/.',
],
'mangajinnofansub_com': [ # normal
r'mangajinnofansub\.com/lector/(series|read)/.',
],
'mangakakalot_com': [
r'mangakakalot\.com/(manga|chapter)/.',
],
'mangakatana_com': [
r'mangakatana\.com/manga/.',
],
'mangaku_web_id': [
# r'mangaku\.web\.id/.',
r'mangaku\.in/.',
],
'mangalib_me': [
r'mangalib\.me/.',
],
'mangalife_us': [
r'mangalife\.us/(read-online|manga)/.',
],
'mangamew_com': [
r'mangamew\.com/(\w+-)?manga/.',
],
'mangamew_com_vn': [
r'mangamew\.com/(\w+-)?truyen/.',
],
'manganelo_com': [
r'manganelo\.com/(manga|chapter)/.',
],
'mangaon_net': [
r'mangaon\.net/(manga-info|read-online)/.',
],
'mangaonline_com_br': [
r'mangaonline\.com\.br/.',
],
'mangaonline_today': [
r'mangaonline\.today/.',
],
'mangaonlinehere_com': [
r'mangaonlinehere\.com/(manga-info|read-online)/.',
],
'mangapanda_com': [
r'mangapanda\.com/.',
],
'mangapark_me': [
r'mangapark\.me/manga/.',
],
'mangareader_net': [
r'mangareader\.net/.',
],
'mangareader_site': [
r'mangareader\.site',
],
'mangareader_xyz': [
r'mangareader\.xyz/manga/.',
r'mangareader\.xyz/.+?/chapter-\d',
# r'mangafox\.cc/manga/.',
# r'mangafox\.cc/.+?/chapter-\d',
],
'mangarock_com': [
r'mangarock\.com/manga/.',
],
'mangarussia_com': [
r'mangarussia\.com/(manga|chapter)/.',
],
'mangasaurus_com': [
r'mangasaurus\.com/(manga|view).',
],
'mangaseeonline_us': [
r'mangaseeonline\.us/(read-online|manga)/.',
],
'mangashiro_net': [
r'mangashiro\.net/.',
],
'mangasupa_com': [
r'mangasupa\.com/(manga|chapter)/.',
],
'mangasushi_net': [
r'mangasushi\.net/manga/.',
],
'mangatail_com': [
r'mangasail\.com/(manga|chapter|node|content)/.',
r'mangasail\.co/(manga|chapter|node|content)/.',
r'mangatail\.me/(manga|chapter|node|content)/.',
],
'mangatown_com': [
r'mangatown\.com/manga/.',
],
'mangatrue_com': [
r'mangatrue\.com/manga/.',
r'mangaall\.com/manga/.',
],
'mangawindow_net': [
r'mangawindow\.net/(series|chapter)/\d', # is different!
],
'mangax_net': [
r'mangax\.net/\w/.',
],
'mangazuki_me': [
r'mangazuki\.me/manga/.',
r'mangazuki\.info/manga/.',
r'mangazuki\.online/manga/.',
],
'manhuagui_com': [
r'manhuagui\.com/comic/\d',
],
'manhuatai_com': [
r'manhuatai\.com/.',
],
'manhwa_co': [
r'manhwa\.co/.',
],
# 'manhwahentai_com': [
# r'manhwahentai\.com/manhwa/.'
# ],
'merakiscans_com': [
r'merakiscans\.com/manga/.',
],
'mintmanga_com': [
r'mintmanga\.com/.',
],
'mngcow_co': [
r'mngcow\.co/.',
],
'mngdoom_com': [
r'mangadoom\.co/.',
r'mngdoom\.com/.',
],
'mymangalist_org': [
r'mymangalist.org/(read|chapter)-',
],
'myreadingmanga_info': [
r'myreadingmanga\.info/.',
],
'neumanga_tv': [
r'neumanga\.tv/manga/.',
],
'nhentai_net': [
r'nhentai\.net/g/.',
],
'niadd_com': [
r'niadd\.com/manga/.',
],
'nightow_net': [
r'nightow\.net/online/\?manga=.',
],
'nineanime_com': [
r'nineanime\.com/manga/.+\.html'
],
'ninemanga_com': [
r'ninemanga\.com/(manga|chapter).',
r'addfunny\.com/(manga|chapter).',
],
'noranofansub_com': [
r'noranofansub\.com(/lector)?/(series/|read/)?.',
],
'nozominofansub_com': [ # mangazuki_co
r'nozominofansub\.com/public(/index\.php)?/manga/.',
r'godsrealmscan\.com/public(/index\.php)?/manga/.',
],
# 'nude_moon_me': [
# r'nude-moon\.me/\d',
# ],
'otakusmash_com': [
r'otakusmash\.com/.',
r'mrsmanga\.com/.',
r'mentalmanga\.com/.',
r'mangasmash\.com/.',
r'omgbeaupeep\.com/comics/.',
],
'otscans_com': [
r'otscans\.com/foolslide/(series|read)/.',
],
'pecintakomik_com_manga': [
r'pecintakomik\.com/manga/.',
],
'pecintakomik_com': [
r'pecintakomik\.com/.',
],
'plus_comico_jp_manga': [
r'plus\.comico\.jp/manga/\d',
],
'plus_comico_jp': [
r'plus\.comico\.jp/store/\d',
],
'porncomix_info': [
r'porncomix\.info/.',
],
'psychoplay_co': [
r'psychoplay\.co/(series|read)/.',
],
'puzzmos_com': [
r'puzzmos\.com/manga/.',
],
r'pururin_io': [
r'pururin\.io/(gallery|read)/.',
],
'pzykosis666hfansub_com': [
r'pzykosis666hfansub\.com/online/.',
],
'ravens_scans_com': [
r'ravens-scans\.com(/lector)?/(serie/|read/).',
],
'raw_senmanga_com': [
r'raw\.senmanga\.com/.',
],
'rawdevart_com': [
r'rawdevart\.com/manga/.',
],
'rawlh_com': [
r'lhscan\.net/(truyen|manga|read)-.',
r'rawqq\.com/(truyen|manga|read)-.',
r'rawqv\.com/(truyen|manga|read)-.',
],
'rawneko_com': [
r'rawneko\.com/manga/.',
],
'read_egscans_com': [
r'read\.egscans\.com/.',
],
'read_powermanga_org': [
r'lector\.dangolinenofansub\.com/(series|read)/.',
r'read\.powermanga\.org/(series|read)/.',
# r'read\.yagami\.me/(series|read)/.',
r'reader\.kireicake\.com/(series|read)/.',
r'reader\.shoujosense\.com/(series|read)/.',
r'reader\.whiteoutscans\.com/(series|read)/.',
r'slide\.world-three\.org/(series|read)/.',
r'manga\.animefrontline\.com/(series|read)/.',
r'reader\.s2smanga\.com/(series|read)/.',
r'reader\.seaotterscans\.com/(series|read)/.',
r'reader\.idkscans\.com/(series|read)/.',
r'reader\.thecatscans\.com/(series|read)/.',
r'reader\.deathtollscans\.net/(series|read)/.',
r'lector\.ytnofan\.com/(series|read)/.',
r'reader\.jokerfansub\.com/(series|read)/.',
r'lector\.patyscans\.com/(series|read)/.',
r'truecolorsscans\.miocio\.org/(series|read)/.',
r'reader\.letitgo\.scans\.today/(series|read)/.',
r'reader\.fos-scans\.com/(series|read)/.',
r'reader\.serenade\.moe/(series|read)/.',
r'reader\.vortex-scans\.com/(series|read)/.',
r'reader\.roseliascans\.com/(series|read)/.',
r'reader\.silentsky-scans\.net/(series|read)/.',
r'hoshiscans\.shounen-ai\.net/(series|read)/.',
r'digitalteamreader\.netsons\.org/(series|read)/.',
r'reader\.manga-download\.org/(series|read)/.',
],
'read_yagami_me': [
r'read\.yagami\.me/series/\w',
],
# 'readcomicbooksonline_org_manga': [ # todo #168
# r'readcomicbooksonline\.net/manga/.',
# r'readcomicbooksonline\.org/manga/.',
# ],
# 'readcomicbooksonline_org': [
# r'readcomicbooksonline\.net/.',
# r'readcomicbooksonline\.org/.',
# ],
'comicpunch_net_manga': [
r'comicpunch\.net/asiancomics/.',
],
'comicpunch_net': [
r'comicpunch\.net/.',
],
'reader_championscans_com': [
r'reader\.championscans\.com/(series|read)/.',
],
'reader_imangascans_org': [
r'reader\.imangascans\.org/.',
],
# 'readhentaimanga_com': [
# r'readhentaimanga\.com/.',
# ],
'readcomiconline_to': [
r'readcomiconline\.to/Comic/.',
],
'readcomicsonline_ru': [
r'readcomicsonline\.ru/comic/.',
],
'readmanga_me': [
r'readmanga\.me/.',
],
'readmanga_eu': [
r'readmanga\.eu/manga/\d+/.',
],
'readmng_com': [
r'readmng\.com/.',
],
'readms_net': [
r'readms\.net/(r|manga)/.',
],
'remanga_org': [
r'remanga\.org/manga/.',
],
'santosfansub_com': [
r'santosfansub\.com/Slide/.',
],
'selfmanga_ru': [
r'selfmanga\.ru/.',
],
'senmanga_com': [
r'senmanga\.com/.',
],
'shakai_ru': [
r'shakai\.ru/manga.*?/\d',
],
'shogakukan_co_jp': [
r'shogakukan\.co\.jp/books/\d',
r'shogakukan\.co\.jp/magazines/series/\d',
],
'shogakukan_tameshiyo_me': [
r'shogakukan\.tameshiyo\.me/\d',
],
'siberowl_com': [
r'siberowl\.com/mangas/.',
],
'sleepypandascans_co': [
r'sleepypandascans\.co/(Series|Reader)/.',
],
#'somanga_net': [
# r'somanga\.net/(leitor|manga)/.',
# r'somangas\.net/(leitor|manga)/.',
#],
'subapics_com': [
# r'subapics\.com/manga/.',
# r'subapics\.com/.+-chapter-.',
r'mangakita\.net/manga/.',
r'mangakita\.net/.+-chapter-.',
r'komikstation\.com/manga/.',
r'komikstation\.com/.+-chapter-.',
r'mangavy\.com/manga/.',
r'mangavy\.com/.+-chapter-.',
r'mangakid\.net/manga/.',
r'mangakid\.net/.+-chapter-.',
],
'submanga_online': [
r'submanga\.online/manga/.',
],
'sunday_webry_com': [
r'sunday-webry\.com/series/\d',
],
'taadd_com': [
r'taadd\.com/(book|chapter)/.',
],
'tapas_io': [
r'tapas\.io/episode/\d',
r'tapas\.io/series/\w',
],
'tenmanga_com': [
r'tenmanga\.com/(book|chapter)/.',
],
'tmofans_com': [
r'tmofans\.com/library/manga/\d',
],
'translate_webtoons_com': [
r'translate\.webtoons\.com/webtoonVersion\?webtoonNo.',
],
'trashscanlations_com': [
r'trashscanlations\.com/series/.',
],
'tonarinoyj_jp': [
r'tonarinoyj\.jp/episode/.',
],
'toonkor_co': [
r'toonkor\.co/.',
],
'triplesevenscans_com': [
r'sensescans\.com/reader/(series|read)/.',
r'triplesevenscans\.com/reader/(series|read)/.',
r'cm-scans\.shounen-ai\.net/reader/(series|read)/.',
r'yaoislife\.shounen-ai\.net/reader/(series|read)/.',
r'fujoshibitches\.shounen-ai\.net/reader/(series|read)/.',
],
'truyen_vnsharing_site': [
r'truyen\.vnsharing\.site/index/read/.',
],
'truyenchon_com': [
r'truyenchon\.com/truyen/.',
r'nettruyen\.com/truyen-tranh/.',
],
'truyentranhtuan_com': [
r'truyentranhtuan\.com/.',
],
'tsumino_com': [
r'tsumino\.com/Book/Info/\d',
r'tsumino\.com/Read/View/\d',
],
# 'tumangaonline_com': [
# r'tumangaonline\.com/.',
# r'tumangaonline\.me/.',
# ],
'unionmangas_net': [
r'unionmangas\.cc/(leitor|manga)/.',
r'unionmangas\.net/(leitor|manga)/.',
r'unionmangas\.site/(leitor|manga)/.',
],
'viz_com': [
r'viz\.com/shonenjump/chapters/.',
],
'web_ace_jp': [
r'web-ace\.jp/youngaceup/contents/\d',
],
'webtoon_bamtoki_com': [
r'webtoon\.bamtoki\.com/.',
r'webtoon\.bamtoki\.se/.',
],
'webtoons_com': [
r'webtoons\.com/[^/]+/[^/]+/.',
],
'webtoontr_com': [
r'webtoontr\.com/_/.',
],
'westmanga_info': [
r'westmanga\.info/.',
],
'whitecloudpavilion_com': [
r'whitecloudpavilion\.com/manga/free/manga/.',
],
'wiemanga_com': [
r'wiemanga\.com/(manga|chapter)/.',
],
'wmanga_ru': [
r'wmanga\.ru/starter/manga_.',
],
'yande_re': [
r'yande\.re/post.',
],
'zeroscans_com': [
r'zeroscans\.com/manga/.',
r'manhwareader\.com/manga/.',
],
'zingbox_me': [
r'zingbox\.me/.',
],
'zip_read_com': [
r'zip-read\.com/.',
],
'zmanga_net': [
r'zmanga\.net/.',
],
}
def __check_provider(provider, url):
items = [r'\b' + i for i in provider]
reg = '(?:' + '|'.join(items) + ')'
return re.search(reg, url)
def get_provider(url):
fromlist = 'manga_py.providers'
for i in providers_list:
if __check_provider(providers_list[i], url):
provider = importlib.import_module('%s.%s' % (fromlist, i))
return provider.main
return False

View File

@@ -0,0 +1,50 @@
from manga_py.provider import Provider
from .helpers.std import Std
class _Template(Provider, Std):
def get_archive_name(self) -> str:
pass
def get_chapter_index(self) -> str:
pass
def get_main_content(self):
pass
def get_manga_name(self) -> str:
return ''
def get_chapters(self):
# return self._elements('a.chapter')
return []
def get_files(self):
return []
def get_cover(self) -> str:
# return self._cover_from_content('.cover img')
pass
def book_meta(self) -> dict:
"""
:see http://acbf.wikia.com/wiki/Meta-data_Section_Definition
return {
'author': str,
'title': str,
'annotation': str,
'keywords': str,
'cover': str,
'rating': str,
}
"""
pass
def chapter_for_json(self) -> str:
# overload std param, if need
# return self.chapter
pass
main = _Template

View File

@@ -0,0 +1,52 @@
from manga_py.crypt import AcQqComCrypt
from manga_py.provider import Provider
from .helpers.std import Std
class AcQqCom(Provider, Std):
_decoder = None
_re = None
def get_chapter_index(self) -> str:
return self.re.search(r'/cid/(\d+)', self.chapter).group(1)
def get_main_content(self):
content = self._storage.get('main_content', None)
if content is not None:
return content
idx = self._get_name(r'/id/(\d+)')
return self.http_get('{}/Comic/comicInfo/id/{}'.format(self.domain, idx))
def get_manga_name(self) -> str:
return self.text_content(self.content, '.works-intro-title strong', 0)
def get_chapters(self):
return self._elements('.chapter-page-all li a')[::-1]
def get_files(self):
content = self.http_get(self.chapter)
data = self._re.search(content).group(1)
data = self._decoder.decode(data)
return [i.get('url') for i in data.get('picture', [])][0:1]
def get_cover(self) -> str:
return self._cover_from_content('.works-cover img')
def prepare_cookies(self):
self._re = self.re.compile(r'var\s+DATA\s*=\s*[\'"](.*?)[\'"]')
self._decoder = AcQqComCrypt(self)
self._base_cookies()
def book_meta(self) -> dict:
result = {
'author': self.text_content(self.content, '.works-intro-digi em'),
'rating': self.text_content(self.content, 'p.ui-left strong'),
'cover': self.get_cover(),
'annotation': self.text_content(self.content, '.works-intro-short'),
'language': 'cn',
}
return result
main = AcQqCom

Some files were not shown because too many files have changed in this diff Show More