2015-08-02 20:59:11 +02:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-01-20 19:37:45 +01:00
|
|
|
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
|
|
|
|
# Copyright (C) 2012-2019 cervinko, idalin, SiphonSquirrel, ouzklcn, akushsky,
|
|
|
|
# OzzieIsaacs, bodybybuddha, jkrehm, matthazinski, janeczku
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
|
2019-03-16 16:23:40 +01:00
|
|
|
from cps import config, global_WorkerThread, get_locale, db, mimetypes
|
2016-03-28 21:07:13 +02:00
|
|
|
from flask import current_app as app
|
2017-02-20 19:52:00 +01:00
|
|
|
from tempfile import gettempdir
|
2015-08-02 20:59:11 +02:00
|
|
|
import sys
|
|
|
|
import os
|
2016-03-26 16:12:29 +01:00
|
|
|
import re
|
|
|
|
import unicodedata
|
2018-08-12 09:29:57 +02:00
|
|
|
import worker
|
2018-08-04 10:56:42 +02:00
|
|
|
import time
|
|
|
|
from flask import send_from_directory, make_response, redirect, abort
|
2016-11-09 19:24:33 +01:00
|
|
|
from flask_babel import gettext as _
|
2018-11-03 13:43:38 +01:00
|
|
|
from flask_login import current_user
|
|
|
|
from babel.dates import format_datetime
|
2019-03-16 15:48:09 +01:00
|
|
|
from babel.core import UnknownLocaleError
|
2018-11-18 17:09:13 +01:00
|
|
|
from datetime import datetime
|
2019-03-16 15:48:09 +01:00
|
|
|
from babel import Locale as LC
|
2017-01-30 18:58:36 +01:00
|
|
|
import shutil
|
2017-02-20 19:52:00 +01:00
|
|
|
import requests
|
2019-03-16 15:48:09 +01:00
|
|
|
from sqlalchemy.sql.expression import true, and_, false, text, func
|
|
|
|
from iso639 import languages as isoLanguages
|
|
|
|
from pagination import Pagination
|
2019-03-16 16:23:40 +01:00
|
|
|
from werkzeug.datastructures import Headers
|
2019-03-16 15:48:09 +01:00
|
|
|
|
2017-03-17 00:36:37 +01:00
|
|
|
try:
|
|
|
|
import gdriveutils as gd
|
|
|
|
except ImportError:
|
|
|
|
pass
|
2018-08-24 15:48:09 +02:00
|
|
|
import random
|
2019-02-03 18:32:27 +01:00
|
|
|
from subproc_wrapper import process_open
|
2019-02-06 21:52:24 +01:00
|
|
|
import ub
|
2017-02-20 19:52:00 +01:00
|
|
|
|
2019-03-16 16:23:40 +01:00
|
|
|
try:
|
|
|
|
from urllib.parse import quote
|
|
|
|
except ImportError:
|
|
|
|
from urllib import quote
|
|
|
|
|
2017-02-15 18:09:17 +01:00
|
|
|
try:
|
|
|
|
import unidecode
|
2017-03-31 16:52:25 +02:00
|
|
|
use_unidecode = True
|
2017-11-30 16:49:46 +01:00
|
|
|
except ImportError:
|
2017-03-31 16:52:25 +02:00
|
|
|
use_unidecode = False
|
2016-12-23 09:53:39 +01:00
|
|
|
|
2019-03-16 16:23:40 +01:00
|
|
|
try:
|
|
|
|
import Levenshtein
|
|
|
|
use_levenshtein = True
|
|
|
|
except ImportError:
|
|
|
|
use_levenshtein = False
|
|
|
|
|
2019-03-24 15:58:43 +01:00
|
|
|
try:
|
|
|
|
from functools import reduce
|
|
|
|
except ImportError:
|
|
|
|
pass # We're not using Python 3
|
|
|
|
|
2017-11-30 16:49:46 +01:00
|
|
|
|
2018-07-30 20:12:41 +02:00
|
|
|
def update_download(book_id, user_id):
|
|
|
|
check = ub.session.query(ub.Downloads).filter(ub.Downloads.user_id == user_id).filter(ub.Downloads.book_id ==
|
|
|
|
book_id).first()
|
|
|
|
if not check:
|
|
|
|
new_download = ub.Downloads(user_id=user_id, book_id=book_id)
|
|
|
|
ub.session.add(new_download)
|
|
|
|
ub.session.commit()
|
|
|
|
|
2018-08-31 15:19:48 +02:00
|
|
|
# Convert existing book entry to new format
|
|
|
|
def convert_book_format(book_id, calibrepath, old_book_format, new_book_format, user_id, kindle_mail=None):
|
2015-08-02 20:59:11 +02:00
|
|
|
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
|
2018-08-31 15:19:48 +02:00
|
|
|
data = db.session.query(db.Data).filter(db.Data.book == book.id).filter(db.Data.format == old_book_format).first()
|
2016-03-27 23:36:51 +02:00
|
|
|
if not data:
|
2018-08-31 15:19:48 +02:00
|
|
|
error_message = _(u"%(format)s format not found for book id: %(book)d", format=old_book_format, book=book_id)
|
|
|
|
app.logger.error("convert_book_format: " + error_message)
|
2018-08-12 09:29:57 +02:00
|
|
|
return error_message
|
2019-02-06 21:52:24 +01:00
|
|
|
if config.config_use_google_drive:
|
2018-08-31 15:19:48 +02:00
|
|
|
df = gd.getFileFromEbooksFolder(book.path, data.name + "." + old_book_format.lower())
|
2018-08-04 14:00:51 +02:00
|
|
|
if df:
|
2018-08-31 15:19:48 +02:00
|
|
|
datafile = os.path.join(calibrepath, book.path, data.name + u"." + old_book_format.lower())
|
2018-08-04 14:00:51 +02:00
|
|
|
if not os.path.exists(os.path.join(calibrepath, book.path)):
|
|
|
|
os.makedirs(os.path.join(calibrepath, book.path))
|
|
|
|
df.GetContentFile(datafile)
|
|
|
|
else:
|
2018-08-31 15:19:48 +02:00
|
|
|
error_message = _(u"%(format)s not found on Google Drive: %(fn)s",
|
|
|
|
format=old_book_format, fn=data.name + "." + old_book_format.lower())
|
2018-08-12 09:29:57 +02:00
|
|
|
return error_message
|
2017-01-22 16:44:37 +01:00
|
|
|
file_path = os.path.join(calibrepath, book.path, data.name)
|
2018-08-31 15:19:48 +02:00
|
|
|
if os.path.exists(file_path + "." + old_book_format.lower()):
|
2018-08-31 15:00:22 +02:00
|
|
|
# read settings and append converter task to queue
|
2018-08-31 15:19:48 +02:00
|
|
|
if kindle_mail:
|
|
|
|
settings = ub.get_mail_settings()
|
2018-11-03 13:43:38 +01:00
|
|
|
settings['subject'] = _('Send to Kindle') # pretranslate Subject for e-mail
|
|
|
|
settings['body'] = _(u'This e-mail has been sent via Calibre-Web.')
|
|
|
|
# text = _(u"%(format)s: %(book)s", format=new_book_format, book=book.title)
|
2018-08-31 15:19:48 +02:00
|
|
|
else:
|
2018-09-06 20:54:48 +02:00
|
|
|
settings = dict()
|
2018-11-03 13:43:38 +01:00
|
|
|
text = (u"%s -> %s: %s" % (old_book_format, new_book_format, book.title))
|
2018-09-06 20:54:48 +02:00
|
|
|
settings['old_book_format'] = old_book_format
|
|
|
|
settings['new_book_format'] = new_book_format
|
2018-08-31 15:19:48 +02:00
|
|
|
global_WorkerThread.add_convert(file_path, book.id, user_id, text, settings, kindle_mail)
|
2018-08-12 09:29:57 +02:00
|
|
|
return None
|
2015-08-02 20:59:11 +02:00
|
|
|
else:
|
2018-08-31 15:19:48 +02:00
|
|
|
error_message = _(u"%(format)s not found: %(fn)s",
|
|
|
|
format=old_book_format, fn=data.name + "." + old_book_format.lower())
|
2018-08-12 09:29:57 +02:00
|
|
|
return error_message
|
2015-08-02 20:59:11 +02:00
|
|
|
|
2016-12-23 09:53:39 +01:00
|
|
|
|
2018-07-30 20:12:41 +02:00
|
|
|
def send_test_mail(kindle_mail, user_name):
|
2018-08-28 10:29:11 +02:00
|
|
|
global_WorkerThread.add_email(_(u'Calibre-Web test e-mail'),None, None, ub.get_mail_settings(),
|
2018-11-03 13:43:38 +01:00
|
|
|
kindle_mail, user_name, _(u"Test e-mail"),
|
|
|
|
_(u'This e-mail has been sent via Calibre-Web.'))
|
2018-08-12 09:29:57 +02:00
|
|
|
return
|
|
|
|
|
2018-08-24 15:48:09 +02:00
|
|
|
|
|
|
|
# Send registration email or password reset email, depending on parameter resend (False means welcome email)
|
|
|
|
def send_registration_mail(e_mail, user_name, default_password, resend=False):
|
|
|
|
text = "Hello %s!\r\n" % user_name
|
|
|
|
if not resend:
|
|
|
|
text += "Your new account at Calibre-Web has been created. Thanks for joining us!\r\n"
|
|
|
|
text += "Please log in to your account using the following informations:\r\n"
|
|
|
|
text += "User name: %s\n" % user_name
|
|
|
|
text += "Password: %s\r\n" % default_password
|
|
|
|
text += "Don't forget to change your password after first login.\r\n"
|
|
|
|
text += "Sincerely\r\n\r\n"
|
|
|
|
text += "Your Calibre-Web team"
|
|
|
|
global_WorkerThread.add_email(_(u'Get Started with Calibre-Web'),None, None, ub.get_mail_settings(),
|
2018-11-03 18:02:28 +01:00
|
|
|
e_mail, user_name, _(u"Registration e-mail for user: %(name)s", name=user_name), text)
|
2018-08-24 15:48:09 +02:00
|
|
|
return
|
|
|
|
|
2018-11-18 17:09:13 +01:00
|
|
|
def check_send_to_kindle(entry):
|
2018-11-25 11:25:20 +01:00
|
|
|
"""
|
2018-11-18 17:09:13 +01:00
|
|
|
returns all available book formats for sending to Kindle
|
2018-11-25 11:25:20 +01:00
|
|
|
"""
|
2018-11-18 17:09:13 +01:00
|
|
|
if len(entry.data):
|
|
|
|
bookformats=list()
|
2019-02-06 21:52:24 +01:00
|
|
|
if config.config_ebookconverter == 0:
|
2018-11-18 17:09:13 +01:00
|
|
|
# no converter - only for mobi and pdf formats
|
|
|
|
for ele in iter(entry.data):
|
|
|
|
if 'MOBI' in ele.format:
|
2018-11-25 13:29:58 +01:00
|
|
|
bookformats.append({'format':'Mobi','convert':0,'text':_('Send %(format)s to Kindle',format='Mobi')})
|
2018-11-18 17:09:13 +01:00
|
|
|
if 'PDF' in ele.format:
|
2018-11-25 13:29:58 +01:00
|
|
|
bookformats.append({'format':'Pdf','convert':0,'text':_('Send %(format)s to Kindle',format='Pdf')})
|
2018-11-18 17:09:13 +01:00
|
|
|
if 'AZW' in ele.format:
|
2018-11-25 13:29:58 +01:00
|
|
|
bookformats.append({'format':'Azw','convert':0,'text':_('Send %(format)s to Kindle',format='Azw')})
|
2018-11-18 17:09:13 +01:00
|
|
|
if 'AZW3' in ele.format:
|
2018-11-25 13:29:58 +01:00
|
|
|
bookformats.append({'format':'Azw3','convert':0,'text':_('Send %(format)s to Kindle',format='Azw3')})
|
2018-10-01 20:19:29 +02:00
|
|
|
else:
|
2018-11-18 17:09:13 +01:00
|
|
|
formats = list()
|
|
|
|
for ele in iter(entry.data):
|
|
|
|
formats.append(ele.format)
|
|
|
|
if 'MOBI' in formats:
|
|
|
|
bookformats.append({'format': 'Mobi','convert':0,'text':_('Send %(format)s to Kindle',format='Mobi')})
|
|
|
|
if 'AZW' in formats:
|
|
|
|
bookformats.append({'format': 'Azw','convert':0,'text':_('Send %(format)s to Kindle',format='Azw')})
|
|
|
|
if 'AZW3' in formats:
|
|
|
|
bookformats.append({'format': 'Azw3','convert':0,'text':_('Send %(format)s to Kindle',format='Azw3')})
|
|
|
|
if 'PDF' in formats:
|
|
|
|
bookformats.append({'format': 'Pdf','convert':0,'text':_('Send %(format)s to Kindle',format='Pdf')})
|
2019-02-06 21:52:24 +01:00
|
|
|
if config.config_ebookconverter >= 1:
|
2018-11-18 17:09:13 +01:00
|
|
|
if 'EPUB' in formats and not 'MOBI' in formats:
|
|
|
|
bookformats.append({'format': 'Mobi','convert':1,
|
|
|
|
'text':_('Convert %(orig)s to %(format)s and send to Kindle',orig='Epub',format='Mobi')})
|
2019-02-06 21:52:24 +01:00
|
|
|
if config.config_ebookconverter == 2:
|
2018-11-18 17:09:13 +01:00
|
|
|
if 'EPUB' in formats and not 'AZW3' in formats:
|
|
|
|
bookformats.append({'format': 'Azw3','convert':1,
|
|
|
|
'text':_('Convert %(orig)s to %(format)s and send to Kindle',orig='Epub',format='Azw3')})
|
|
|
|
return bookformats
|
2018-10-01 20:19:29 +02:00
|
|
|
else:
|
2018-11-18 17:09:13 +01:00
|
|
|
app.logger.error(u'Cannot find book entry %d', entry.id)
|
|
|
|
return None
|
2018-10-01 20:19:29 +02:00
|
|
|
|
2018-08-24 15:48:09 +02:00
|
|
|
|
2018-11-25 11:25:20 +01:00
|
|
|
# Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return
|
|
|
|
# list with supported formats
|
|
|
|
def check_read_formats(entry):
|
|
|
|
EXTENSIONS_READER = {'TXT', 'PDF', 'EPUB', 'ZIP', 'CBZ', 'TAR', 'CBT', 'RAR', 'CBR'}
|
|
|
|
bookformats = list()
|
|
|
|
if len(entry.data):
|
|
|
|
for ele in iter(entry.data):
|
|
|
|
if ele.format in EXTENSIONS_READER:
|
|
|
|
bookformats.append(ele.format.lower())
|
|
|
|
return bookformats
|
|
|
|
|
|
|
|
|
2018-08-12 09:29:57 +02:00
|
|
|
# Files are processed in the following order/priority:
|
2018-11-18 17:09:13 +01:00
|
|
|
# 1: If Mobi file is existing, it's directly send to kindle email,
|
|
|
|
# 2: If Epub file is existing, it's converted and send to kindle email,
|
|
|
|
# 3: If Pdf file is existing, it's directly send to kindle email
|
|
|
|
def send_mail(book_id, book_format, convert, kindle_mail, calibrepath, user_id):
|
2016-12-23 09:53:39 +01:00
|
|
|
"""Send email with attachments"""
|
2015-08-02 20:59:11 +02:00
|
|
|
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
|
2018-11-18 17:09:13 +01:00
|
|
|
|
|
|
|
if convert:
|
2018-11-03 13:43:38 +01:00
|
|
|
# returns None if success, otherwise errormessage
|
2018-11-18 17:09:13 +01:00
|
|
|
return convert_book_format(book_id, calibrepath, u'epub', book_format.lower(), user_id, kindle_mail)
|
2015-08-02 20:59:11 +02:00
|
|
|
else:
|
2018-11-18 17:09:13 +01:00
|
|
|
for entry in iter(book.data):
|
|
|
|
if entry.format.upper() == book_format.upper():
|
|
|
|
result = entry.name + '.' + book_format.lower()
|
|
|
|
global_WorkerThread.add_email(_(u"Send to Kindle"), book.path, result, ub.get_mail_settings(),
|
2018-11-03 18:02:28 +01:00
|
|
|
kindle_mail, user_id, _(u"E-mail: %(book)s", book=book.title),
|
|
|
|
_(u'This e-mail has been sent via Calibre-Web.'))
|
2018-11-18 17:09:13 +01:00
|
|
|
return
|
2018-08-12 09:29:57 +02:00
|
|
|
return _(u"The requested file could not be read. Maybe wrong permissions?")
|
2016-03-26 16:12:29 +01:00
|
|
|
|
2016-12-23 09:53:39 +01:00
|
|
|
|
2016-04-03 23:52:32 +02:00
|
|
|
def get_valid_filename(value, replace_whitespace=True):
|
2016-03-26 16:12:29 +01:00
|
|
|
"""
|
|
|
|
Returns the given string converted to a string that can be used for a clean
|
|
|
|
filename. Limits num characters to 128 max.
|
|
|
|
"""
|
2017-03-31 16:52:25 +02:00
|
|
|
if value[-1:] == u'.':
|
2017-02-15 18:09:17 +01:00
|
|
|
value = value[:-1]+u'_'
|
2017-09-16 19:57:00 +02:00
|
|
|
value = value.replace("/", "_").replace(":", "_").strip('\0')
|
2017-02-15 18:09:17 +01:00
|
|
|
if use_unidecode:
|
2017-11-30 16:49:46 +01:00
|
|
|
value = (unidecode.unidecode(value)).strip()
|
2017-02-15 18:09:17 +01:00
|
|
|
else:
|
2017-11-30 16:49:46 +01:00
|
|
|
value = value.replace(u'§', u'SS')
|
|
|
|
value = value.replace(u'ß', u'ss')
|
2017-02-15 18:09:17 +01:00
|
|
|
value = unicodedata.normalize('NFKD', value)
|
|
|
|
re_slugify = re.compile('[\W\s-]', re.UNICODE)
|
2017-11-30 16:49:46 +01:00
|
|
|
if isinstance(value, str): # Python3 str, Python2 unicode
|
2017-03-06 06:42:00 +01:00
|
|
|
value = re_slugify.sub('', value).strip()
|
|
|
|
else:
|
2017-03-05 11:48:59 +01:00
|
|
|
value = unicode(re_slugify.sub('', value).strip())
|
2016-04-03 23:52:32 +02:00
|
|
|
if replace_whitespace:
|
2017-11-30 16:49:46 +01:00
|
|
|
# *+:\"/<>? are replaced by _
|
2017-11-12 18:48:44 +01:00
|
|
|
value = re.sub(r'[\*\+:\\\"/<>\?]+', u'_', value, flags=re.U)
|
2017-11-28 08:54:21 +01:00
|
|
|
# pipe has to be replaced with comma
|
|
|
|
value = re.sub(r'[\|]+', u',', value, flags=re.U)
|
2017-02-15 18:09:17 +01:00
|
|
|
value = value[:128]
|
2017-04-23 08:22:10 +02:00
|
|
|
if not value:
|
|
|
|
raise ValueError("Filename cannot be empty")
|
2019-02-23 14:26:02 +01:00
|
|
|
if sys.version_info.major == 3:
|
|
|
|
return value
|
|
|
|
else:
|
|
|
|
return value.decode('utf-8')
|
2016-03-26 16:12:29 +01:00
|
|
|
|
2017-11-30 16:49:46 +01:00
|
|
|
|
2017-02-15 18:09:17 +01:00
|
|
|
def get_sorted_author(value):
|
2017-11-30 16:49:46 +01:00
|
|
|
try:
|
2018-10-30 21:47:33 +01:00
|
|
|
if ',' not in value:
|
|
|
|
regexes = ["^(JR|SR)\.?$", "^I{1,3}\.?$", "^IV\.?$"]
|
|
|
|
combined = "(" + ")|(".join(regexes) + ")"
|
|
|
|
value = value.split(" ")
|
|
|
|
if re.match(combined, value[-1].upper()):
|
|
|
|
value2 = value[-2] + ", " + " ".join(value[:-2]) + " " + value[-1]
|
|
|
|
elif len(value) == 1:
|
|
|
|
value2 = value[0]
|
|
|
|
else:
|
|
|
|
value2 = value[-1] + ", " + " ".join(value[:-1])
|
2017-11-30 16:49:46 +01:00
|
|
|
else:
|
2018-10-30 21:47:33 +01:00
|
|
|
value2 = value
|
2017-11-30 16:49:46 +01:00
|
|
|
except Exception:
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.error("Sorting author " + str(value) + "failed")
|
2017-11-30 16:49:46 +01:00
|
|
|
value2 = value
|
2017-02-15 18:09:17 +01:00
|
|
|
return value2
|
2016-12-23 09:53:39 +01:00
|
|
|
|
2018-08-04 18:22:43 +02:00
|
|
|
|
2018-07-14 19:40:59 +02:00
|
|
|
# Deletes a book fro the local filestorage, returns True if deleting is successfull, otherwise false
|
2018-08-04 18:22:43 +02:00
|
|
|
def delete_book_file(book, calibrepath, book_format=None):
|
2018-07-09 20:30:38 +02:00
|
|
|
# check that path is 2 elements deep, check that target path has no subfolders
|
2018-07-14 19:40:59 +02:00
|
|
|
if book.path.count('/') == 1:
|
2018-01-10 21:16:51 +01:00
|
|
|
path = os.path.join(calibrepath, book.path)
|
2018-08-04 18:22:43 +02:00
|
|
|
if book_format:
|
|
|
|
for file in os.listdir(path):
|
|
|
|
if file.upper().endswith("."+book_format):
|
|
|
|
os.remove(os.path.join(path, file))
|
2018-07-14 23:03:54 +02:00
|
|
|
else:
|
2018-08-04 18:22:43 +02:00
|
|
|
if os.path.isdir(path):
|
|
|
|
if len(next(os.walk(path))[1]):
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.error(
|
2018-08-04 18:22:43 +02:00
|
|
|
"Deleting book " + str(book.id) + " failed, path has subfolders: " + book.path)
|
|
|
|
return False
|
|
|
|
shutil.rmtree(path, ignore_errors=True)
|
|
|
|
return True
|
|
|
|
else:
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.error("Deleting book " + str(book.id) + " failed, book path not valid: " + book.path)
|
2018-08-04 18:22:43 +02:00
|
|
|
return False
|
2017-11-30 16:49:46 +01:00
|
|
|
|
|
|
|
|
2019-01-05 21:44:29 +01:00
|
|
|
def update_dir_structure_file(book_id, calibrepath, first_author):
|
2017-11-30 16:49:46 +01:00
|
|
|
localbook = db.session.query(db.Books).filter(db.Books.id == book_id).first()
|
|
|
|
path = os.path.join(calibrepath, localbook.path)
|
2017-03-30 21:17:18 +02:00
|
|
|
|
2017-11-30 16:49:46 +01:00
|
|
|
authordir = localbook.path.split('/')[0]
|
2019-01-05 21:44:29 +01:00
|
|
|
if first_author:
|
|
|
|
new_authordir = get_valid_filename(first_author)
|
|
|
|
else:
|
|
|
|
new_authordir = get_valid_filename(localbook.authors[0].name)
|
2017-03-30 21:17:18 +02:00
|
|
|
|
2017-11-30 16:49:46 +01:00
|
|
|
titledir = localbook.path.split('/')[1]
|
|
|
|
new_titledir = get_valid_filename(localbook.title) + " (" + str(book_id) + ")"
|
2017-04-03 20:05:55 +02:00
|
|
|
|
2017-11-30 16:49:46 +01:00
|
|
|
if titledir != new_titledir:
|
|
|
|
try:
|
|
|
|
new_title_path = os.path.join(os.path.dirname(path), new_titledir)
|
2018-06-04 03:17:22 +02:00
|
|
|
if not os.path.exists(new_title_path):
|
|
|
|
os.renames(path, new_title_path)
|
|
|
|
else:
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.info("Copying title: " + path + " into existing: " + new_title_path)
|
2019-03-16 17:20:14 +01:00
|
|
|
for dir_name, __, file_list in os.walk(path):
|
2018-06-04 03:17:22 +02:00
|
|
|
for file in file_list:
|
2019-01-06 15:00:34 +01:00
|
|
|
os.renames(os.path.join(dir_name, file),
|
|
|
|
os.path.join(new_title_path + dir_name[len(path):], file))
|
2017-11-30 16:49:46 +01:00
|
|
|
path = new_title_path
|
|
|
|
localbook.path = localbook.path.split('/')[0] + '/' + new_titledir
|
|
|
|
except OSError as ex:
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.error("Rename title from: " + path + " to " + new_title_path + ": " + str(ex))
|
|
|
|
app.logger.debug(ex, exc_info=True)
|
2019-01-06 15:00:34 +01:00
|
|
|
return _("Rename title from: '%(src)s' to '%(dest)s' failed with error: %(error)s",
|
|
|
|
src=path, dest=new_title_path, error=str(ex))
|
2016-04-03 23:52:32 +02:00
|
|
|
if authordir != new_authordir:
|
2017-11-30 16:49:46 +01:00
|
|
|
try:
|
2019-01-06 15:00:34 +01:00
|
|
|
new_author_path = os.path.join(calibrepath, new_authordir, os.path.basename(path))
|
2017-11-30 16:49:46 +01:00
|
|
|
os.renames(path, new_author_path)
|
|
|
|
localbook.path = new_authordir + '/' + localbook.path.split('/')[1]
|
|
|
|
except OSError as ex:
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.error("Rename author from: " + path + " to " + new_author_path + ": " + str(ex))
|
|
|
|
app.logger.debug(ex, exc_info=True)
|
2019-01-06 15:00:34 +01:00
|
|
|
return _("Rename author from: '%(src)s' to '%(dest)s' failed with error: %(error)s",
|
|
|
|
src=path, dest=new_author_path, error=str(ex))
|
|
|
|
# Rename all files from old names to new names
|
|
|
|
if authordir != new_authordir or titledir != new_titledir:
|
|
|
|
try:
|
2019-02-18 20:52:58 +01:00
|
|
|
new_name = get_valid_filename(localbook.title) + ' - ' + get_valid_filename(new_authordir)
|
|
|
|
path_name = os.path.join(calibrepath, new_authordir, os.path.basename(path))
|
2019-01-14 20:27:53 +01:00
|
|
|
for file_format in localbook.data:
|
|
|
|
os.renames(os.path.join(path_name, file_format.name + '.' + file_format.format.lower()),
|
2019-02-18 20:52:58 +01:00
|
|
|
os.path.join(path_name, new_name + '.' + file_format.format.lower()))
|
2019-01-14 20:27:53 +01:00
|
|
|
file_format.name = new_name
|
2019-01-06 15:00:34 +01:00
|
|
|
except OSError as ex:
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.error("Rename file in path " + path + " to " + new_name + ": " + str(ex))
|
|
|
|
app.logger.debug(ex, exc_info=True)
|
2019-01-06 15:00:34 +01:00
|
|
|
return _("Rename file in path '%(src)s' to '%(dest)s' failed with error: %(error)s",
|
|
|
|
src=path, dest=new_name, error=str(ex))
|
2017-11-30 16:49:46 +01:00
|
|
|
return False
|
2017-01-30 18:58:36 +01:00
|
|
|
|
2017-03-31 16:52:25 +02:00
|
|
|
|
2019-01-05 21:44:29 +01:00
|
|
|
def update_dir_structure_gdrive(book_id, first_author):
|
2017-11-30 16:49:46 +01:00
|
|
|
error = False
|
2017-03-01 23:38:03 +01:00
|
|
|
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
|
2019-02-18 20:52:58 +01:00
|
|
|
path = book.path
|
2017-04-04 19:05:09 +02:00
|
|
|
|
2017-03-01 23:38:03 +01:00
|
|
|
authordir = book.path.split('/')[0]
|
2019-01-05 21:44:29 +01:00
|
|
|
if first_author:
|
|
|
|
new_authordir = get_valid_filename(first_author)
|
|
|
|
else:
|
|
|
|
new_authordir = get_valid_filename(book.authors[0].name)
|
2017-03-01 23:38:03 +01:00
|
|
|
titledir = book.path.split('/')[1]
|
2019-02-18 20:52:58 +01:00
|
|
|
new_titledir = get_valid_filename(book.title) + u" (" + str(book_id) + u")"
|
2017-07-09 20:15:15 +02:00
|
|
|
|
2017-03-01 23:38:03 +01:00
|
|
|
if titledir != new_titledir:
|
2018-07-14 08:31:52 +02:00
|
|
|
gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path), titledir)
|
|
|
|
if gFile:
|
|
|
|
gFile['title'] = new_titledir
|
|
|
|
gFile.Upload()
|
2019-02-18 20:52:58 +01:00
|
|
|
book.path = book.path.split('/')[0] + u'/' + new_titledir
|
|
|
|
path = book.path
|
2018-07-14 08:31:52 +02:00
|
|
|
gd.updateDatabaseOnEdit(gFile['id'], book.path) # only child folder affected
|
|
|
|
else:
|
2019-01-06 15:00:34 +01:00
|
|
|
error = _(u'File %(file)s not found on Google Drive', file=book.path) # file not found
|
2017-07-11 16:13:33 +02:00
|
|
|
|
2017-03-01 23:38:03 +01:00
|
|
|
if authordir != new_authordir:
|
2019-02-20 18:04:55 +01:00
|
|
|
gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path), new_titledir)
|
2018-07-14 08:31:52 +02:00
|
|
|
if gFile:
|
2019-02-18 20:52:58 +01:00
|
|
|
gd.moveGdriveFolderRemote(gFile, new_authordir)
|
|
|
|
book.path = new_authordir + u'/' + book.path.split('/')[1]
|
|
|
|
path = book.path
|
2018-07-14 08:31:52 +02:00
|
|
|
gd.updateDatabaseOnEdit(gFile['id'], book.path)
|
|
|
|
else:
|
2018-09-08 12:28:48 +02:00
|
|
|
error = _(u'File %(file)s not found on Google Drive', file=authordir) # file not found
|
2019-01-06 15:00:34 +01:00
|
|
|
# Rename all files from old names to new names
|
2019-02-18 20:52:58 +01:00
|
|
|
|
2019-01-06 15:00:34 +01:00
|
|
|
if authordir != new_authordir or titledir != new_titledir:
|
2019-02-18 20:52:58 +01:00
|
|
|
new_name = get_valid_filename(book.title) + u' - ' + get_valid_filename(new_authordir)
|
|
|
|
for file_format in book.data:
|
|
|
|
gFile = gd.getFileFromEbooksFolder(path, file_format.name + u'.' + file_format.format.lower())
|
|
|
|
if not gFile:
|
|
|
|
error = _(u'File %(file)s not found on Google Drive', file=file_format.name) # file not found
|
|
|
|
break
|
|
|
|
gd.moveGdriveFileRemote(gFile, new_name + u'.' + file_format.format.lower())
|
|
|
|
file_format.name = new_name
|
2017-11-30 16:49:46 +01:00
|
|
|
return error
|
2017-03-01 23:38:03 +01:00
|
|
|
|
2018-07-14 08:31:52 +02:00
|
|
|
|
2018-08-04 18:22:43 +02:00
|
|
|
def delete_book_gdrive(book, book_format):
|
2018-07-14 13:48:51 +02:00
|
|
|
error= False
|
2018-08-04 18:22:43 +02:00
|
|
|
if book_format:
|
|
|
|
name = ''
|
|
|
|
for entry in book.data:
|
|
|
|
if entry.format.upper() == book_format:
|
|
|
|
name = entry.name + '.' + book_format
|
|
|
|
gFile = gd.getFileFromEbooksFolder(book.path, name)
|
|
|
|
else:
|
|
|
|
gFile = gd.getFileFromEbooksFolder(os.path.dirname(book.path),book.path.split('/')[1])
|
2018-07-14 13:48:51 +02:00
|
|
|
if gFile:
|
|
|
|
gd.deleteDatabaseEntry(gFile['id'])
|
|
|
|
gFile.Trash()
|
|
|
|
else:
|
2018-09-08 12:28:48 +02:00
|
|
|
error =_(u'Book path %(path)s not found on Google Drive', path=book.path) # file not found
|
2018-07-14 13:48:51 +02:00
|
|
|
return error
|
2018-07-14 08:31:52 +02:00
|
|
|
|
2018-11-25 11:25:20 +01:00
|
|
|
|
2018-08-24 15:48:09 +02:00
|
|
|
def generate_random_password():
|
|
|
|
s = "abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%&*()?"
|
|
|
|
passlen = 8
|
|
|
|
return "".join(random.sample(s,passlen ))
|
|
|
|
|
2018-07-14 08:31:52 +02:00
|
|
|
################################## External interface
|
|
|
|
|
2019-01-05 21:44:29 +01:00
|
|
|
def update_dir_stucture(book_id, calibrepath, first_author = None):
|
2019-02-06 21:52:24 +01:00
|
|
|
if config.config_use_google_drive:
|
2019-01-05 21:44:29 +01:00
|
|
|
return update_dir_structure_gdrive(book_id, first_author)
|
2018-07-14 08:31:52 +02:00
|
|
|
else:
|
2019-01-05 21:44:29 +01:00
|
|
|
return update_dir_structure_file(book_id, calibrepath, first_author)
|
2018-07-14 08:31:52 +02:00
|
|
|
|
2018-11-25 11:25:20 +01:00
|
|
|
|
2018-08-04 18:22:43 +02:00
|
|
|
def delete_book(book, calibrepath, book_format):
|
2019-02-06 21:52:24 +01:00
|
|
|
if config.config_use_google_drive:
|
2018-08-04 18:22:43 +02:00
|
|
|
return delete_book_gdrive(book, book_format)
|
2018-07-14 13:48:51 +02:00
|
|
|
else:
|
2018-08-04 18:22:43 +02:00
|
|
|
return delete_book_file(book, calibrepath, book_format)
|
2018-08-04 10:56:42 +02:00
|
|
|
|
2018-11-25 11:25:20 +01:00
|
|
|
|
2018-08-04 10:56:42 +02:00
|
|
|
def get_book_cover(cover_path):
|
2019-02-06 21:52:24 +01:00
|
|
|
if config.config_use_google_drive:
|
2018-08-04 10:56:42 +02:00
|
|
|
try:
|
2019-02-16 07:23:08 +01:00
|
|
|
if not gd.is_gdrive_ready():
|
2019-02-03 16:38:27 +01:00
|
|
|
return send_from_directory(os.path.join(os.path.dirname(__file__), "static"), "generic_cover.jpg")
|
2018-08-04 10:56:42 +02:00
|
|
|
path=gd.get_cover_via_gdrive(cover_path)
|
|
|
|
if path:
|
|
|
|
return redirect(path)
|
|
|
|
else:
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.error(cover_path + '/cover.jpg not found on Google Drive')
|
2018-08-04 10:56:42 +02:00
|
|
|
return send_from_directory(os.path.join(os.path.dirname(__file__), "static"), "generic_cover.jpg")
|
|
|
|
except Exception as e:
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.error("Error Message: " + e.message)
|
|
|
|
app.logger.exception(e)
|
2018-08-04 10:56:42 +02:00
|
|
|
# traceback.print_exc()
|
|
|
|
return send_from_directory(os.path.join(os.path.dirname(__file__), "static"),"generic_cover.jpg")
|
|
|
|
else:
|
2019-02-06 21:52:24 +01:00
|
|
|
return send_from_directory(os.path.join(config.config_calibre_dir, cover_path), "cover.jpg")
|
2018-08-04 10:56:42 +02:00
|
|
|
|
2018-11-25 11:25:20 +01:00
|
|
|
|
2018-08-04 10:56:42 +02:00
|
|
|
# saves book cover to gdrive or locally
|
|
|
|
def save_cover(url, book_path):
|
|
|
|
img = requests.get(url)
|
|
|
|
if img.headers.get('content-type') != 'image/jpeg':
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.error("Cover is no jpg file, can't save")
|
2018-08-04 17:08:32 +02:00
|
|
|
return False
|
2018-08-04 10:56:42 +02:00
|
|
|
|
2019-02-06 21:52:24 +01:00
|
|
|
if config.config_use_google_drive:
|
2018-08-04 10:56:42 +02:00
|
|
|
tmpDir = gettempdir()
|
|
|
|
f = open(os.path.join(tmpDir, "uploaded_cover.jpg"), "wb")
|
|
|
|
f.write(img.content)
|
|
|
|
f.close()
|
2018-11-25 11:25:20 +01:00
|
|
|
gd.uploadFileToEbooksFolder(os.path.join(book_path, 'cover.jpg'), os.path.join(tmpDir, f.name))
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.info("Cover is saved on Google Drive")
|
2018-08-04 17:08:32 +02:00
|
|
|
return True
|
2018-08-04 10:56:42 +02:00
|
|
|
|
2019-02-06 21:52:24 +01:00
|
|
|
f = open(os.path.join(config.config_calibre_dir, book_path, "cover.jpg"), "wb")
|
2018-08-04 10:56:42 +02:00
|
|
|
f.write(img.content)
|
|
|
|
f.close()
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.info("Cover is saved")
|
2018-08-04 17:08:32 +02:00
|
|
|
return True
|
2018-08-04 10:56:42 +02:00
|
|
|
|
2018-11-25 11:25:20 +01:00
|
|
|
|
2018-08-04 10:56:42 +02:00
|
|
|
def do_download_file(book, book_format, data, headers):
|
2019-02-06 21:52:24 +01:00
|
|
|
if config.config_use_google_drive:
|
2018-08-04 10:56:42 +02:00
|
|
|
startTime = time.time()
|
|
|
|
df = gd.getFileFromEbooksFolder(book.path, data.name + "." + book_format)
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.debug(time.time() - startTime)
|
2018-08-04 10:56:42 +02:00
|
|
|
if df:
|
|
|
|
return gd.do_gdrive_download(df, headers)
|
|
|
|
else:
|
|
|
|
abort(404)
|
|
|
|
else:
|
2019-02-06 21:52:24 +01:00
|
|
|
filename = os.path.join(config.config_calibre_dir, book.path)
|
2018-11-03 18:37:38 +01:00
|
|
|
if not os.path.isfile(os.path.join(filename, data.name + "." + book_format)):
|
|
|
|
# ToDo: improve error handling
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.error('File not found: %s' % os.path.join(filename, data.name + "." + book_format))
|
2018-11-03 18:37:38 +01:00
|
|
|
response = make_response(send_from_directory(filename, data.name + "." + book_format))
|
2018-08-04 10:56:42 +02:00
|
|
|
response.headers = headers
|
|
|
|
return response
|
|
|
|
|
2018-07-14 08:31:52 +02:00
|
|
|
##################################
|
|
|
|
|
2017-03-01 23:38:03 +01:00
|
|
|
|
2017-02-21 19:40:22 +01:00
|
|
|
|
2018-10-03 21:58:37 +02:00
|
|
|
|
2017-11-19 18:08:55 +01:00
|
|
|
def check_unrar(unrarLocation):
|
|
|
|
error = False
|
|
|
|
if os.path.exists(unrarLocation):
|
|
|
|
try:
|
2018-08-31 10:47:58 +02:00
|
|
|
if sys.version_info < (3, 0):
|
|
|
|
unrarLocation = unrarLocation.encode(sys.getfilesystemencoding())
|
2019-02-03 18:32:27 +01:00
|
|
|
p = process_open(unrarLocation)
|
2017-11-19 18:08:55 +01:00
|
|
|
p.wait()
|
|
|
|
for lines in p.stdout.readlines():
|
|
|
|
if isinstance(lines, bytes):
|
|
|
|
lines = lines.decode('utf-8')
|
|
|
|
value=re.search('UNRAR (.*) freeware', lines)
|
|
|
|
if value:
|
|
|
|
version = value.group(1)
|
2018-08-31 10:47:58 +02:00
|
|
|
except OSError as e:
|
2017-11-19 18:08:55 +01:00
|
|
|
error = True
|
2019-02-16 07:23:08 +01:00
|
|
|
app.logger.exception(e)
|
2018-08-31 10:47:58 +02:00
|
|
|
version =_(u'Error excecuting UnRar')
|
2017-11-19 18:08:55 +01:00
|
|
|
else:
|
|
|
|
version = _(u'Unrar binary file not found')
|
|
|
|
error=True
|
|
|
|
return (error, version)
|
2018-08-31 10:47:58 +02:00
|
|
|
|
2018-09-10 10:42:28 +02:00
|
|
|
|
2018-11-25 11:25:20 +01:00
|
|
|
|
2018-11-18 17:09:13 +01:00
|
|
|
def json_serial(obj):
|
|
|
|
"""JSON serializer for objects not serializable by default json code"""
|
|
|
|
|
|
|
|
if isinstance(obj, (datetime)):
|
|
|
|
return obj.isoformat()
|
|
|
|
raise TypeError ("Type %s not serializable" % type(obj))
|
2018-10-03 21:58:37 +02:00
|
|
|
|
2018-11-25 11:25:20 +01:00
|
|
|
|
2018-09-19 17:26:52 +02:00
|
|
|
def render_task_status(tasklist):
|
|
|
|
#helper function to apply localize status information in tasklist entries
|
|
|
|
renderedtasklist=list()
|
2018-11-18 17:09:13 +01:00
|
|
|
# task2 = task
|
2018-09-19 17:26:52 +02:00
|
|
|
for task in tasklist:
|
2018-11-03 13:43:38 +01:00
|
|
|
if task['user'] == current_user.nickname or current_user.role_admin():
|
2018-11-18 17:09:13 +01:00
|
|
|
# task2 = copy.deepcopy(task) # = task
|
2018-11-03 13:43:38 +01:00
|
|
|
if task['formStarttime']:
|
2019-02-16 07:23:08 +01:00
|
|
|
task['starttime'] = format_datetime(task['formStarttime'], format='short', locale=get_locale())
|
2018-11-18 17:09:13 +01:00
|
|
|
# task2['formStarttime'] = ""
|
2018-10-03 21:58:37 +02:00
|
|
|
else:
|
2018-11-03 13:43:38 +01:00
|
|
|
if 'starttime' not in task:
|
|
|
|
task['starttime'] = ""
|
|
|
|
|
|
|
|
# localize the task status
|
|
|
|
if isinstance( task['stat'], int ):
|
|
|
|
if task['stat'] == worker.STAT_WAITING:
|
|
|
|
task['status'] = _(u'Waiting')
|
|
|
|
elif task['stat'] == worker.STAT_FAIL:
|
|
|
|
task['status'] = _(u'Failed')
|
|
|
|
elif task['stat'] == worker.STAT_STARTED:
|
|
|
|
task['status'] = _(u'Started')
|
|
|
|
elif task['stat'] == worker.STAT_FINISH_SUCCESS:
|
|
|
|
task['status'] = _(u'Finished')
|
|
|
|
else:
|
|
|
|
task['status'] = _(u'Unknown Status')
|
|
|
|
|
|
|
|
# localize the task type
|
|
|
|
if isinstance( task['taskType'], int ):
|
|
|
|
if task['taskType'] == worker.TASK_EMAIL:
|
|
|
|
task['taskMessage'] = _(u'E-mail: ') + task['taskMess']
|
|
|
|
elif task['taskType'] == worker.TASK_CONVERT:
|
|
|
|
task['taskMessage'] = _(u'Convert: ') + task['taskMess']
|
|
|
|
elif task['taskType'] == worker.TASK_UPLOAD:
|
|
|
|
task['taskMessage'] = _(u'Upload: ') + task['taskMess']
|
|
|
|
elif task['taskType'] == worker.TASK_CONVERT_ANY:
|
|
|
|
task['taskMessage'] = _(u'Convert: ') + task['taskMess']
|
|
|
|
else:
|
|
|
|
task['taskMessage'] = _(u'Unknown Task: ') + task['taskMess']
|
2018-10-03 21:58:37 +02:00
|
|
|
|
2018-11-03 13:43:38 +01:00
|
|
|
renderedtasklist.append(task)
|
2018-09-19 17:26:52 +02:00
|
|
|
|
|
|
|
return renderedtasklist
|
2019-03-16 15:48:09 +01:00
|
|
|
|
|
|
|
|
|
|
|
# Language and content filters for displaying in the UI
|
|
|
|
def common_filters():
|
|
|
|
if current_user.filter_language() != "all":
|
|
|
|
lang_filter = db.Books.languages.any(db.Languages.lang_code == current_user.filter_language())
|
|
|
|
else:
|
|
|
|
lang_filter = true()
|
|
|
|
content_rating_filter = false() if current_user.mature_content else \
|
|
|
|
db.Books.tags.any(db.Tags.name.in_(config.mature_content_tags()))
|
|
|
|
return and_(lang_filter, ~content_rating_filter)
|
|
|
|
|
|
|
|
|
|
|
|
# Creates for all stored languages a translated speaking name in the array for the UI
|
|
|
|
def speaking_language(languages=None):
|
|
|
|
if not languages:
|
|
|
|
languages = db.session.query(db.Languages).all()
|
|
|
|
for lang in languages:
|
|
|
|
try:
|
|
|
|
cur_l = LC.parse(lang.lang_code)
|
|
|
|
lang.name = cur_l.get_language_name(get_locale())
|
|
|
|
except UnknownLocaleError:
|
|
|
|
lang.name = _(isoLanguages.get(part3=lang.lang_code).name)
|
|
|
|
return languages
|
|
|
|
|
|
|
|
# checks if domain is in database (including wildcards)
|
|
|
|
# example SELECT * FROM @TABLE WHERE 'abcdefg' LIKE Name;
|
|
|
|
# from https://code.luasoftware.com/tutorials/flask/execute-raw-sql-in-flask-sqlalchemy/
|
|
|
|
def check_valid_domain(domain_text):
|
|
|
|
domain_text = domain_text.split('@', 1)[-1].lower()
|
|
|
|
sql = "SELECT * FROM registration WHERE :domain LIKE domain;"
|
|
|
|
result = ub.session.query(ub.Registration).from_statement(text(sql)).params(domain=domain_text).all()
|
|
|
|
return len(result)
|
|
|
|
|
|
|
|
|
|
|
|
# Orders all Authors in the list according to authors sort
|
|
|
|
def order_authors(entry):
|
|
|
|
sort_authors = entry.author_sort.split('&')
|
|
|
|
authors_ordered = list()
|
|
|
|
error = False
|
|
|
|
for auth in sort_authors:
|
|
|
|
# ToDo: How to handle not found authorname
|
|
|
|
result = db.session.query(db.Authors).filter(db.Authors.sort == auth.lstrip().strip()).first()
|
|
|
|
if not result:
|
|
|
|
error = True
|
|
|
|
break
|
|
|
|
authors_ordered.append(result)
|
|
|
|
if not error:
|
|
|
|
entry.authors = authors_ordered
|
|
|
|
return entry
|
|
|
|
|
|
|
|
|
|
|
|
# Fill indexpage with all requested data from database
|
|
|
|
def fill_indexpage(page, database, db_filter, order, *join):
|
|
|
|
if current_user.show_detail_random():
|
|
|
|
randm = db.session.query(db.Books).filter(common_filters())\
|
|
|
|
.order_by(func.random()).limit(config.config_random_books)
|
|
|
|
else:
|
|
|
|
randm = false()
|
|
|
|
off = int(int(config.config_books_per_page) * (page - 1))
|
|
|
|
pagination = Pagination(page, config.config_books_per_page,
|
|
|
|
len(db.session.query(database).filter(db_filter).filter(common_filters()).all()))
|
|
|
|
entries = db.session.query(database).join(*join, isouter=True).filter(db_filter).filter(common_filters()).\
|
|
|
|
order_by(*order).offset(off).limit(config.config_books_per_page).all()
|
|
|
|
for book in entries:
|
|
|
|
book = order_authors(book)
|
|
|
|
return entries, randm, pagination
|
|
|
|
|
|
|
|
|
|
|
|
# read search results from calibre-database and return it (function is used for feed and simple search
|
|
|
|
def get_search_results(term):
|
|
|
|
q = list()
|
|
|
|
authorterms = re.split("[, ]+", term)
|
|
|
|
for authorterm in authorterms:
|
2019-04-14 18:20:45 +02:00
|
|
|
q.append(db.Books.authors.any(db.or_(db.Authors.name.ilike("%" + authorterm + "%"),
|
|
|
|
db.Authors.name.ilike("%" + unidecode.unidecode(authorterm) + "%"))))
|
2019-03-16 15:48:09 +01:00
|
|
|
db.session.connection().connection.connection.create_function("lower", 1, db.lcase)
|
2019-04-14 18:20:45 +02:00
|
|
|
db.Books.authors.any(db.or_(db.Authors.name.ilike("%" + term + "%"),
|
|
|
|
db.Authors.name.ilike("%" + unidecode.unidecode(term) + "%")))
|
2019-03-16 15:48:09 +01:00
|
|
|
|
|
|
|
return db.session.query(db.Books).filter(common_filters()).filter(
|
|
|
|
db.or_(db.Books.tags.any(db.Tags.name.ilike("%" + term + "%")),
|
|
|
|
db.Books.series.any(db.Series.name.ilike("%" + term + "%")),
|
|
|
|
db.Books.authors.any(and_(*q)),
|
|
|
|
db.Books.publishers.any(db.Publishers.name.ilike("%" + term + "%")),
|
2019-04-14 18:20:45 +02:00
|
|
|
db.Books.title.ilike("%" + term + "%"),
|
|
|
|
db.Books.tags.any(db.Tags.name.ilike("%" + unidecode.unidecode(term) + "%")),
|
|
|
|
db.Books.series.any(db.Series.name.ilike("%" + unidecode.unidecode(term) + "%")),
|
|
|
|
db.Books.publishers.any(db.Publishers.name.ilike("%" + unidecode.unidecode(term) + "%")),
|
|
|
|
db.Books.title.ilike("%" + unidecode.unidecode(term) + "%")
|
|
|
|
)).all()
|
2019-03-16 15:48:09 +01:00
|
|
|
|
|
|
|
|
|
|
|
def get_unique_other_books(library_books, author_books):
|
|
|
|
# Get all identifiers (ISBN, Goodreads, etc) and filter author's books by that list so we show fewer duplicates
|
|
|
|
# Note: Not all images will be shown, even though they're available on Goodreads.com.
|
|
|
|
# See https://www.goodreads.com/topic/show/18213769-goodreads-book-images
|
|
|
|
identifiers = reduce(lambda acc, book: acc + map(lambda identifier: identifier.val, book.identifiers),
|
|
|
|
library_books, [])
|
|
|
|
other_books = filter(lambda book: book.isbn not in identifiers and book.gid["#text"] not in identifiers,
|
|
|
|
author_books)
|
|
|
|
|
|
|
|
# Fuzzy match book titles
|
2019-03-16 16:23:40 +01:00
|
|
|
if use_levenshtein:
|
2019-03-16 15:48:09 +01:00
|
|
|
library_titles = reduce(lambda acc, book: acc + [book.title], library_books, [])
|
|
|
|
other_books = filter(lambda author_book: not filter(
|
|
|
|
lambda library_book:
|
|
|
|
# Remove items in parentheses before comparing
|
|
|
|
Levenshtein.ratio(re.sub(r"\(.*\)", "", author_book.title), library_book) > 0.7,
|
|
|
|
library_titles
|
|
|
|
), other_books)
|
|
|
|
|
|
|
|
return other_books
|
2019-03-16 16:23:40 +01:00
|
|
|
|
|
|
|
|
|
|
|
def get_cc_columns():
|
|
|
|
tmpcc = db.session.query(db.Custom_Columns).filter(db.Custom_Columns.datatype.notin_(db.cc_exceptions)).all()
|
|
|
|
if config.config_columns_to_ignore:
|
|
|
|
cc = []
|
|
|
|
for col in tmpcc:
|
|
|
|
r = re.compile(config.config_columns_to_ignore)
|
|
|
|
if r.match(col.label):
|
|
|
|
cc.append(col)
|
|
|
|
else:
|
|
|
|
cc = tmpcc
|
|
|
|
return cc
|
|
|
|
|
|
|
|
def get_download_link(book_id, book_format):
|
|
|
|
book_format = book_format.split(".")[0]
|
|
|
|
book = db.session.query(db.Books).filter(db.Books.id == book_id).first()
|
|
|
|
data = db.session.query(db.Data).filter(db.Data.book == book.id)\
|
|
|
|
.filter(db.Data.format == book_format.upper()).first()
|
|
|
|
if data:
|
|
|
|
# collect downloaded books only for registered user and not for anonymous user
|
|
|
|
if current_user.is_authenticated:
|
|
|
|
ub.update_download(book_id, int(current_user.id))
|
|
|
|
file_name = book.title
|
|
|
|
if len(book.authors) > 0:
|
|
|
|
file_name = book.authors[0].name + '_' + file_name
|
|
|
|
file_name = get_valid_filename(file_name)
|
|
|
|
headers = Headers()
|
|
|
|
try:
|
|
|
|
headers["Content-Type"] = mimetypes.types_map['.' + book_format]
|
|
|
|
except KeyError:
|
|
|
|
headers["Content-Type"] = "application/octet-stream"
|
|
|
|
headers["Content-Disposition"] = "attachment; filename*=UTF-8''%s.%s" % (quote(file_name.encode('utf-8')),
|
|
|
|
book_format)
|
|
|
|
return do_download_file(book, book_format, data, headers)
|
|
|
|
else:
|
|
|
|
abort(404)
|