2017-01-29 21:06:08 +01:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-01-20 19:37:45 +01:00
|
|
|
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
|
|
|
|
# Copyright (C) 2016-2019 lemmsh cervinko Kennyl matthazinski OzzieIsaacs
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2016-12-23 09:53:39 +01:00
|
|
|
import logging
|
|
|
|
import uploader
|
|
|
|
import os
|
|
|
|
from flask_babel import gettext as _
|
2017-12-02 17:33:55 +01:00
|
|
|
import comic
|
2016-12-23 09:53:39 +01:00
|
|
|
|
2018-07-15 16:27:16 +02:00
|
|
|
try:
|
|
|
|
from lxml.etree import LXML_VERSION as lxmlversion
|
|
|
|
except ImportError:
|
|
|
|
lxmlversion = None
|
|
|
|
|
2016-06-05 17:41:47 +02:00
|
|
|
__author__ = 'lemmsh'
|
|
|
|
|
2016-06-05 18:42:18 +02:00
|
|
|
logger = logging.getLogger("book_formats")
|
|
|
|
|
2016-06-05 17:41:47 +02:00
|
|
|
try:
|
|
|
|
from wand.image import Image
|
2016-12-23 09:53:39 +01:00
|
|
|
from wand import version as ImageVersion
|
2019-02-09 12:58:43 +01:00
|
|
|
from wand.exceptions import PolicyError
|
2016-06-05 17:41:47 +02:00
|
|
|
use_generic_pdf_cover = False
|
2018-03-31 08:00:51 +02:00
|
|
|
except (ImportError, RuntimeError) as e:
|
2016-06-05 21:28:30 +02:00
|
|
|
logger.warning('cannot import Image, generating pdf covers for pdf uploads will not work: %s', e)
|
2016-06-05 17:41:47 +02:00
|
|
|
use_generic_pdf_cover = True
|
2016-06-05 18:42:18 +02:00
|
|
|
try:
|
|
|
|
from PyPDF2 import PdfFileReader
|
2016-12-23 09:53:39 +01:00
|
|
|
from PyPDF2 import __version__ as PyPdfVersion
|
2016-06-05 18:42:18 +02:00
|
|
|
use_pdf_meta = True
|
2017-03-05 10:40:39 +01:00
|
|
|
except ImportError as e:
|
2016-06-05 21:28:30 +02:00
|
|
|
logger.warning('cannot import PyPDF2, extracting pdf metadata will not work: %s', e)
|
2016-06-05 18:42:18 +02:00
|
|
|
use_pdf_meta = False
|
2016-06-05 17:41:47 +02:00
|
|
|
|
2016-06-05 21:28:30 +02:00
|
|
|
try:
|
|
|
|
import epub
|
|
|
|
use_epub_meta = True
|
2017-03-05 10:40:39 +01:00
|
|
|
except ImportError as e:
|
2016-08-07 18:46:38 +02:00
|
|
|
logger.warning('cannot import epub, extracting epub metadata will not work: %s', e)
|
2016-06-05 21:28:30 +02:00
|
|
|
use_epub_meta = False
|
|
|
|
|
2016-06-18 15:50:32 +02:00
|
|
|
try:
|
|
|
|
import fb2
|
|
|
|
use_fb2_meta = True
|
2017-03-05 10:40:39 +01:00
|
|
|
except ImportError as e:
|
2016-08-07 18:46:38 +02:00
|
|
|
logger.warning('cannot import fb2, extracting fb2 metadata will not work: %s', e)
|
2016-06-18 15:50:32 +02:00
|
|
|
use_fb2_meta = False
|
|
|
|
|
2019-04-17 20:45:08 +02:00
|
|
|
try:
|
|
|
|
from PIL import Image
|
|
|
|
use_PIL = True
|
|
|
|
except ImportError:
|
|
|
|
use_PIL = False
|
2019-04-15 20:57:25 +02:00
|
|
|
|
2016-06-05 21:28:30 +02:00
|
|
|
|
2016-06-05 17:41:47 +02:00
|
|
|
def process(tmp_file_path, original_file_name, original_file_extension):
|
2017-04-23 08:22:10 +02:00
|
|
|
meta = None
|
2016-06-05 21:28:30 +02:00
|
|
|
try:
|
|
|
|
if ".PDF" == original_file_extension.upper():
|
2017-04-23 08:22:10 +02:00
|
|
|
meta = pdf_meta(tmp_file_path, original_file_name, original_file_extension)
|
2016-12-23 09:53:39 +01:00
|
|
|
if ".EPUB" == original_file_extension.upper() and use_epub_meta is True:
|
2017-04-23 08:22:10 +02:00
|
|
|
meta = epub.get_epub_info(tmp_file_path, original_file_name, original_file_extension)
|
2016-12-23 09:53:39 +01:00
|
|
|
if ".FB2" == original_file_extension.upper() and use_fb2_meta is True:
|
2017-04-23 08:22:10 +02:00
|
|
|
meta = fb2.get_fb2_info(tmp_file_path, original_file_extension)
|
2017-12-02 17:33:55 +01:00
|
|
|
if original_file_extension.upper() in ['.CBZ', '.CBT']:
|
|
|
|
meta = comic.get_comic_info(tmp_file_path, original_file_name, original_file_extension)
|
|
|
|
|
2017-11-30 16:49:46 +01:00
|
|
|
except Exception as ex:
|
|
|
|
logger.warning('cannot parse metadata, using default: %s', ex)
|
2017-04-23 08:22:10 +02:00
|
|
|
|
|
|
|
if meta and meta.title.strip() and meta.author.strip():
|
|
|
|
return meta
|
|
|
|
else:
|
|
|
|
return default_meta(tmp_file_path, original_file_name, original_file_extension)
|
2016-06-05 18:52:28 +02:00
|
|
|
|
|
|
|
|
|
|
|
def default_meta(tmp_file_path, original_file_name, original_file_extension):
|
|
|
|
return uploader.BookMeta(
|
2016-12-23 09:53:39 +01:00
|
|
|
file_path=tmp_file_path,
|
|
|
|
extension=original_file_extension,
|
|
|
|
title=original_file_name,
|
2017-02-04 14:28:18 +01:00
|
|
|
author=u"Unknown",
|
2016-12-23 09:53:39 +01:00
|
|
|
cover=None,
|
|
|
|
description="",
|
|
|
|
tags="",
|
|
|
|
series="",
|
2017-03-02 12:59:35 +01:00
|
|
|
series_id="",
|
2017-03-02 15:57:02 +01:00
|
|
|
languages="")
|
2016-06-05 17:41:47 +02:00
|
|
|
|
|
|
|
|
|
|
|
def pdf_meta(tmp_file_path, original_file_name, original_file_extension):
|
2016-06-05 18:42:18 +02:00
|
|
|
|
2016-12-23 09:53:39 +01:00
|
|
|
if use_pdf_meta:
|
2019-02-23 18:28:25 +01:00
|
|
|
pdf = PdfFileReader(open(tmp_file_path, 'rb'), strict=False)
|
2016-06-05 18:42:18 +02:00
|
|
|
doc_info = pdf.getDocumentInfo()
|
|
|
|
else:
|
|
|
|
doc_info = None
|
|
|
|
|
2016-12-23 09:53:39 +01:00
|
|
|
if doc_info is not None:
|
2017-04-23 08:22:10 +02:00
|
|
|
author = doc_info.author if doc_info.author else u"Unknown"
|
|
|
|
title = doc_info.title if doc_info.title else original_file_name
|
2016-06-05 17:41:47 +02:00
|
|
|
subject = doc_info.subject
|
|
|
|
else:
|
2017-02-04 14:28:18 +01:00
|
|
|
author = u"Unknown"
|
2016-06-05 17:41:47 +02:00
|
|
|
title = original_file_name
|
|
|
|
subject = ""
|
|
|
|
return uploader.BookMeta(
|
2016-12-23 09:53:39 +01:00
|
|
|
file_path=tmp_file_path,
|
|
|
|
extension=original_file_extension,
|
|
|
|
title=title,
|
|
|
|
author=author,
|
|
|
|
cover=pdf_preview(tmp_file_path, original_file_name),
|
|
|
|
description=subject,
|
|
|
|
tags="",
|
|
|
|
series="",
|
2017-03-02 12:59:35 +01:00
|
|
|
series_id="",
|
2017-03-02 15:57:02 +01:00
|
|
|
languages="")
|
2016-06-05 17:41:47 +02:00
|
|
|
|
2016-12-23 09:53:39 +01:00
|
|
|
|
2016-06-05 17:41:47 +02:00
|
|
|
def pdf_preview(tmp_file_path, tmp_dir):
|
|
|
|
if use_generic_pdf_cover:
|
|
|
|
return None
|
|
|
|
else:
|
2019-04-17 20:45:08 +02:00
|
|
|
if use_PIL:
|
|
|
|
try:
|
|
|
|
input1 = PdfFileReader(open(tmp_file_path, 'rb'), strict=False)
|
|
|
|
page0 = input1.getPage(0)
|
|
|
|
xObject = page0['/Resources']['/XObject'].getObject()
|
|
|
|
|
|
|
|
for obj in xObject:
|
|
|
|
if xObject[obj]['/Subtype'] == '/Image':
|
|
|
|
size = (xObject[obj]['/Width'], xObject[obj]['/Height'])
|
|
|
|
data = xObject[obj]._data # xObject[obj].getData()
|
|
|
|
if xObject[obj]['/ColorSpace'] == '/DeviceRGB':
|
|
|
|
mode = "RGB"
|
|
|
|
else:
|
|
|
|
mode = "P"
|
|
|
|
if '/Filter' in xObject[obj]:
|
|
|
|
if xObject[obj]['/Filter'] == '/FlateDecode':
|
|
|
|
img = Image.frombytes(mode, size, data)
|
|
|
|
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.png"
|
|
|
|
img.save(filename=os.path.join(tmp_dir, cover_file_name))
|
|
|
|
return cover_file_name
|
|
|
|
# img.save(obj[1:] + ".png")
|
|
|
|
elif xObject[obj]['/Filter'] == '/DCTDecode':
|
|
|
|
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
|
|
|
|
img = open(cover_file_name, "wb")
|
|
|
|
img.write(data)
|
|
|
|
img.close()
|
|
|
|
return cover_file_name
|
|
|
|
elif xObject[obj]['/Filter'] == '/JPXDecode':
|
|
|
|
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jp2"
|
|
|
|
img = open(cover_file_name, "wb")
|
|
|
|
img.write(data)
|
|
|
|
img.close()
|
|
|
|
return cover_file_name
|
|
|
|
else:
|
2019-04-15 20:57:25 +02:00
|
|
|
img = Image.frombytes(mode, size, data)
|
|
|
|
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.png"
|
|
|
|
img.save(filename=os.path.join(tmp_dir, cover_file_name))
|
|
|
|
return cover_file_name
|
|
|
|
# img.save(obj[1:] + ".png")
|
2019-04-17 20:45:08 +02:00
|
|
|
except Exception as ex:
|
|
|
|
print(ex)
|
2019-02-09 12:58:43 +01:00
|
|
|
try:
|
|
|
|
cover_file_name = os.path.splitext(tmp_file_path)[0] + ".cover.jpg"
|
|
|
|
with Image(filename=tmp_file_path + "[0]", resolution=150) as img:
|
|
|
|
img.compression_quality = 88
|
|
|
|
img.save(filename=os.path.join(tmp_dir, cover_file_name))
|
|
|
|
return cover_file_name
|
|
|
|
except PolicyError as ex:
|
|
|
|
logger.warning('Pdf extraction forbidden by Imagemagick policy: %s', ex)
|
|
|
|
return None
|
|
|
|
except Exception as ex:
|
|
|
|
logger.warning('Cannot extract cover image, using default: %s', ex)
|
|
|
|
return None
|
2017-11-30 16:49:46 +01:00
|
|
|
|
2016-12-23 09:53:39 +01:00
|
|
|
def get_versions():
|
|
|
|
if not use_generic_pdf_cover:
|
2018-09-30 09:43:20 +02:00
|
|
|
IVersion = ImageVersion.MAGICK_VERSION
|
|
|
|
WVersion = ImageVersion.VERSION
|
2016-12-23 09:53:39 +01:00
|
|
|
else:
|
2018-07-15 16:27:16 +02:00
|
|
|
IVersion = _(u'not installed')
|
2018-09-30 09:43:20 +02:00
|
|
|
WVersion = _(u'not installed')
|
2016-12-23 09:53:39 +01:00
|
|
|
if use_pdf_meta:
|
2018-07-18 20:21:44 +02:00
|
|
|
PVersion='v'+PyPdfVersion
|
2016-12-23 09:53:39 +01:00
|
|
|
else:
|
2017-02-04 14:28:18 +01:00
|
|
|
PVersion=_(u'not installed')
|
2018-07-15 16:27:16 +02:00
|
|
|
if lxmlversion:
|
2018-07-18 20:21:44 +02:00
|
|
|
XVersion = 'v'+'.'.join(map(str, lxmlversion))
|
2018-07-15 16:27:16 +02:00
|
|
|
else:
|
|
|
|
XVersion = _(u'not installed')
|
2018-09-30 09:43:20 +02:00
|
|
|
return {'Image Magick': IVersion, 'PyPdf': PVersion, 'lxml':XVersion, 'Wand Version': WVersion}
|