From 8c22778902af02d4569c4324e929fde433906dcc Mon Sep 17 00:00:00 2001 From: cdhigh Date: Sat, 15 Jun 2024 21:47:54 -0300 Subject: [PATCH] 3.1.1 --- application/lib/dictionary/mdict/mdict.py | 115 +---- application/lib/dictionary/mdict/readmdict.py | 471 +++++++----------- application/lib/dictionary/pystardict.py | 43 +- application/templates/adv_dict.html | 2 +- application/templates/dict.html | 58 ++- application/translations/messages.pot | 125 +++-- .../tr_TR/LC_MESSAGES/messages.mo | Bin 29594 -> 29711 bytes .../tr_TR/LC_MESSAGES/messages.po | 125 +++-- .../translations/zh/LC_MESSAGES/messages.mo | Bin 27850 -> 27958 bytes .../translations/zh/LC_MESSAGES/messages.po | 126 +++-- application/view/reader.py | 12 +- docker/Dockerfile | 4 +- docs/Chinese/reader.md | 11 +- docs/English/reader.md | 13 +- main.py | 2 +- tools/update_req.py | 4 +- 16 files changed, 516 insertions(+), 595 deletions(-) diff --git a/application/lib/dictionary/mdict/mdict.py b/application/lib/dictionary/mdict/mdict.py index 65d7f9c9..834d9d4d 100644 --- a/application/lib/dictionary/mdict/mdict.py +++ b/application/lib/dictionary/mdict/mdict.py @@ -41,20 +41,19 @@ def __init__(self, database='', host=None): self.database = database self.dictionary = None if database in self.databases: - #try: + try: self.dictionary = IndexedMdx(database) - #except Exception as e: - #default_log.warning(f'Instantiate mdict failed: {self.databases[database]}: {e}') + except Exception as e: + default_log.warning(f'Instantiate mdict failed: {self.databases[database]}: {e}') + else: + default_log.warning(f'dict not found: {self.databases[database]}') #返回当前使用的词典名字 def __repr__(self): return 'mdict [{}]'.format(self.databases.get(self.database, '')) def definition(self, word, language=''): - ret = self.dictionary.get(word) if self.dictionary else '' - if isinstance(ret, bytes): - ret = ret.decode(self.dictionary.meta.get('encoding', 'utf-8')) - return ret + return self.dictionary.get(word) if self.dictionary else '' #经过词典树缓存的Mdx class IndexedMdx: @@ -66,62 +65,35 @@ def __init__(self, fname, encoding="", substyle=False, passcode=None): prefix = os.path.splitext(fname)[0] dictName = os.path.basename(prefix) trieName = f'{prefix}.trie' - metaName = f'{prefix}.meta' self.trie = None - self.meta = {} - self.stylesheet = {} - if os.path.exists(trieName) and os.path.exists(metaName): + self.mdx = MDX(fname, encoding, substyle, passcode) + if os.path.exists(trieName): try: - self.trie = marisa_trie.RecordTrie(self.TRIE_FMT) + self.trie = marisa_trie.RecordTrie(self.TRIE_FMT) #type:ignore self.trie.load(trieName) - with open(metaName, 'r', encoding='utf-8') as f: - self.meta = json.loads(f.read()) - if not isinstance(self.meta, dict): - self.meta = {} - self.stylesheet = json.loads(self.meta.get("stylesheet", '{}')) except Exception as e: self.trie = None default_log.warning(f'Failed to load mdict trie data: {dictName}: {e}') - if self.trie and self.meta: - self.fMdx = open(fname, 'rb') + if self.trie: return #重建索引 default_log.info(f"Building trie for {dictName}") - mdx = MDX(fname, encoding, substyle, passcode) - dictIndex = mdx.get_index() - indexList = dictIndex["index_dict_list"] - #[(word, (params,)),...] #为了能制作大词典,mdx中这些数据都是64bit的,但是为了节省空间,这里只使用32bit保存(>LLLLLL) - idxBuff = [(item["key_text"].lower(), ( - item["file_pos"], #32bit - item["compressed_size"], #64bit - item["decompressed_size"], #64bit - item["record_start"], #64bit - item["record_end"], #64bit - item["offset"])) #64bit - for item in indexList] - self.trie = marisa_trie.RecordTrie(self.TRIE_FMT, idxBuff) + self.trie = marisa_trie.RecordTrie(self.TRIE_FMT, self.mdx.get_index()) #type:ignore self.trie.save(trieName) - self.meta = dictIndex['meta'] - #mdx内嵌css,键为序号(1-255),值为元祖 (startTag, endTag) - self.stylesheet = json.loads(self.meta.get("stylesheet", '{}')) - with open(metaName, 'w', encoding='utf-8') as f: - f.write(json.dumps(self.meta)) - - self.fMdx = open(fname, 'rb') - - del mdx + del self.trie - self.trie = marisa_trie.RecordTrie(self.TRIE_FMT) + self.trie = marisa_trie.RecordTrie(self.TRIE_FMT) #type:ignore self.trie.load(trieName) - del idxBuff import gc gc.collect() #获取单词释义,不存在则返回空串 def get(self, word): + if not self.trie: + return '' word = word.lower().strip() indexes = self.trie[word] if word in self.trie else None ret = self.get_content_by_Index(indexes) @@ -138,47 +110,21 @@ def __contains__(self, word) -> bool: #通过单词的索引数据,直接读取文件对应的数据块返回释义 #indexes是列表,因为可能有多个单词条目 def get_content_by_Index(self, indexes): - if not indexes: - return '' - - ret = [] - encoding = self.meta.get('encoding', 'utf-8') - for index in indexes: - filePos, compSize, decompSize, startPos, endPos, offset = index - self.fMdx.seek(filePos) - compressed = self.fMdx.read(compSize) - type_ = compressed[:4] #32bit-type, 32bit-adler, data - if type_ == b"\x00\x00\x00\x00": - data = compressed[8:] - elif type_ == b"\x01\x00\x00\x00": - #header = b"\xf0" + pack(">I", decompSize) - data = lzo.decompress(compressed[8:], initSize=decompSize, blockSize=1308672) - elif type_ == b"\x02\x00\x00\x00": - data = zlib.decompress(compressed[8:]) - else: - continue - record = data[startPos - offset : endPos - offset] - ret.append(record.decode(encoding, errors="ignore").strip("\x00")) - - txt = '
'.join(ret) - if self.stylesheet: - txt = self.replace_css(txt) - - #很多人制作的mdx很复杂,可能需要后处理 - return self.post_process(txt) + return self.post_process(self.mdx.get_content_by_Index(indexes)) #对查词结果进行后处理 def post_process(self, content): if not content: return '' - soup = BeautifulSoup(content, 'html.parser') #html.parser不会自动添加body + soup = BeautifulSoup(content, 'html.parser') #html.parser不会自动添加html/body #删除图像 for tag in soup.find_all('img'): tag.extract() - self.inline_css(soup) + self.adjust_css(soup) + #self.inline_css(soup) #碰到稍微复杂一些的CSS文件性能就比较低下,暂时屏蔽对CSS文件的支持 self.remove_empty_tags(soup) body = soup.body @@ -187,9 +133,9 @@ def post_process(self, content): return str(soup) - #将css样式内联到html标签中 - def inline_css(self, soup): - # 首先删除 height 属性 + #调整一些CSS + def adjust_css(self, soup): + #删除 height 属性 for element in soup.find_all(): if element.has_attr('height'): del element['height'] @@ -200,6 +146,8 @@ def inline_css(self, soup): del newStyle['height'] element['style'] = "; ".join(f"{k}: {v}" for k, v in newStyle.items()) + #将外部单独css文件的样式内联到html标签中 + def inline_css(self, soup): link = soup.find('link', attrs={'rel': 'stylesheet', 'href': True}) if not link: return @@ -225,7 +173,7 @@ def inline_css(self, soup): except Exception as e: default_log.warning(f'parse css failed: {self.mdxFilename}: {e}') return - + for rule in cssRules: if rule.type == rule.STYLE_RULE: selector = rule.selectorText @@ -263,16 +211,3 @@ def remove_empty_tags(self, soup, preserve_tags=None): self.remove_empty_tags(tag, preserve_tags) for tag in empty_tags: tag.decompose() - - #替换css,其实这个不是css,算是一种模板替换,不过都这么叫 - def replace_css(self, txt): - txt_list = re.split(r"`\d+`", txt) - txt_tag = re.findall(r"`\d+`", txt) - txt_styled = txt_list[0] - for j, p in enumerate(txt_list[1:]): - style = self.stylesheet[txt_tag[j][1:-1]] - if p and p[-1] == "\n": - txt_styled = txt_styled + style[0] + p.rstrip() + style[1] + "\r\n" - else: - txt_styled = txt_styled + style[0] + p + style[1] - return txt_styled diff --git a/application/lib/dictionary/mdict/readmdict.py b/application/lib/dictionary/mdict/readmdict.py index 59f04951..b5301c9d 100644 --- a/application/lib/dictionary/mdict/readmdict.py +++ b/application/lib/dictionary/mdict/readmdict.py @@ -16,41 +16,14 @@ # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -import json -import re -import sys -import os - -# zlib compression is used for engine version >=2.0 -import zlib -from io import BytesIO +import os, re, sys, io, json, zlib from struct import pack, unpack from typing import Dict -if __name__ == '__main__': - sys.path.insert(0, os.path.dirname(__file__)) - from .pureSalsa20 import Salsa20 from .ripemd128 import ripemd128 from . import lzo -''' -key_block_info,key_block,record_block的开头4个字节是压缩类型 -b'\x00\x00\x00\x00'无压缩 -b'\x01\x00\x00\x00'lzo压缩 -b'\x02\x00\x00\x00'zlib压缩 - -self._encrypt是加密类型 -0 无加密 -1 Salsa20加密,需要提供self._passcode -2 ripemd128加密 -''' - -# 2x3 compatible -if sys.hexversion >= 0x03000000: - unicode = str - - class NumberFmt: """ python struct.unpack format, reference: https://docs.python.org/3/library/struct.html @@ -67,10 +40,10 @@ def _unescape_entities(text): """ unescape offending tags < > " & """ - text = text.replace(b"<", b"<") - text = text.replace(b">", b">") - text = text.replace(b""", b'"') - text = text.replace(b"&", b"&") + text = text.replace("<", "<") + text = text.replace(">", ">") + text = text.replace(""", '"') + text = text.replace("&", "&") return text @@ -109,35 +82,18 @@ def _decrypt_regcode_by_email(reg_code, email): encrypt_key = s20.encryptBytes(reg_code) return encrypt_key - -def _parse_header(header) -> Dict[str, str]: - """ - extract attributes from - """ - tag_list = re.findall(b'(\w+)="(.*?)"', header, re.DOTALL) - tag_dict = {} - for k, v in tag_list: - tag_dict[k] = _unescape_entities(v) - return tag_dict - - class MDict(object): """ Base class which reads in header and key block. It has no public methods and serves only as code sharing base class. """ - def __init__(self, fname, encoding="", passcode=None): self._fname = fname - self._encoding = encoding.upper() + self.encoding = encoding.upper() self._passcode = passcode self.header = self._read_header() - try: - self._key_list = self._read_keys() - except: - print("Try Brutal Force on Encrypted Key Blocks") - self._key_list = self._read_keys_brutal() + self._key_list = None def __len__(self): return self._num_entries @@ -149,9 +105,20 @@ def keys(self): """ Return an iterator over dictionary keys. """ - return (key_value for key_id, key_value in self._key_list) + return (key_value for key_id, key_value in self.key_list()) + + #按需加载单词信息列表 + def key_list(self): + if not self._key_list: + try: + self._key_list = self._read_keys() + except: + print("Try Brutal Force on Encrypted Key Blocks") + self._key_list = self._read_keys_brutal() + return self._key_list def _read_number(self, f): + "根据版本不同,在缓冲区f中读取4个字节或8个字节,返回一个整数" return unpack(self._number_format, f.read(self._number_width))[0] def _decode_key_block_info(self, key_block_info_compressed): @@ -164,8 +131,8 @@ def _decode_key_block_info(self, key_block_info_compressed): # decompress key_block_info = zlib.decompress(key_block_info_compressed[8:]) # adler checksum - adler32 = unpack(NumberFmt.be_uint, key_block_info_compressed[4:8])[0] - assert adler32 == zlib.adler32(key_block_info) & 0xFFFFFFFF + #adler32 = unpack(NumberFmt.be_uint, key_block_info_compressed[4:8])[0] + #assert adler32 == zlib.adler32(key_block_info) & 0xFFFFFFFF else: # no compression key_block_info = key_block_info_compressed @@ -183,16 +150,14 @@ def _decode_key_block_info(self, key_block_info_compressed): text_term = 0 while i < len(key_block_info): - # number of entries in current key block - num_entries += unpack( - self._number_format, key_block_info[i : i + self._number_width] - )[0] + #这一块key block包含多少个单词 number of entries in current key block + num_entries += unpack(self._number_format, key_block_info[i : i + self._number_width])[0] i += self._number_width # text head size text_head_size = unpack(byte_format, key_block_info[i : i + byte_width])[0] i += byte_width # text head - if self._encoding != "UTF-16": + if self.encoding != "UTF-16": i += text_head_size + text_term else: i += (text_head_size + text_term) * 2 @@ -200,26 +165,19 @@ def _decode_key_block_info(self, key_block_info_compressed): text_tail_size = unpack(byte_format, key_block_info[i : i + byte_width])[0] i += byte_width # text tail - if self._encoding != "UTF-16": + if self.encoding != "UTF-16": i += text_tail_size + text_term else: i += (text_tail_size + text_term) * 2 # key block compressed size - key_block_compressed_size = unpack( - self._number_format, key_block_info[i : i + self._number_width] - )[0] + key_block_compressed_size = unpack(self._number_format, key_block_info[i : i + self._number_width])[0] i += self._number_width # key block decompressed size - key_block_decompressed_size = unpack( - self._number_format, key_block_info[i : i + self._number_width] - )[0] + key_block_decompressed_size = unpack(self._number_format, key_block_info[i : i + self._number_width])[0] i += self._number_width - key_block_info_list += [ - (key_block_compressed_size, key_block_decompressed_size) - ] - - assert num_entries == self._num_entries + key_block_info_list.append((key_block_compressed_size, key_block_decompressed_size)) + #assert num_entries == self._num_entries return key_block_info_list def _decode_key_block(self, key_block_compressed, key_block_info_list): @@ -237,18 +195,10 @@ def _decode_key_block(self, key_block_compressed, key_block_info_list): if key_block_type == b"\x00\x00\x00\x00": key_block = key_block_compressed[start + 8 : end] elif key_block_type == b"\x01\x00\x00\x00": - if lzo is None: - print("LZO compression is not supported") - break - # decompress key block header = b"\xf0" + pack(NumberFmt.be_uint, decompressed_size) - key_block = lzo.decompress( - key_block_compressed[start + 8 : end], - initSize=decompressed_size, - blockSize=1308672, - ) + key_block = lzo.decompress(key_block_compressed[start + 8 : end], + initSize=decompressed_size, blockSize=1308672) elif key_block_type == b"\x02\x00\x00\x00": - # decompress key block key_block = zlib.decompress(key_block_compressed[start + 8 : end]) # extract one single key block into a key list key_list += self._split_key_block(key_block) @@ -269,7 +219,7 @@ def _split_key_block(self, key_block): key_block[key_start_index : key_start_index + self._number_width], )[0] # key text ends with '\x00' - if self._encoding == "UTF-16": + if self.encoding == "UTF-16": delimiter = b"\x00\x00" width = 2 else: @@ -283,7 +233,7 @@ def _split_key_block(self, key_block): i += width key_text = ( key_block[key_start_index + self._number_width : key_end_index] - .decode(self._encoding, errors="ignore") + .decode(self.encoding, errors="ignore") .encode("utf-8") .strip() ) @@ -291,50 +241,44 @@ def _split_key_block(self, key_block): key_list += [(key_id, key_text)] return key_list + #读取文件头,生成一个python字典 def _read_header(self): f = open(self._fname, "rb") - # number of bytes of header text >:big endian,I:unsigned int + #文件开头4个字节是头长度,不包括这4个字节和校验和,大端格式 header_bytes_size = unpack(NumberFmt.be_uint, f.read(4))[0] header_bytes = f.read(header_bytes_size) - # 4 bytes: adler32 checksum of header, in little endian - adler32 = unpack(NumberFmt.le_uint, f.read(4))[0] - assert adler32 == zlib.adler32(header_bytes) & 0xFFFFFFFF + #接下来4个字节,小端格式,adler32校验和 + f.read(4) + #adler32 = unpack(NumberFmt.le_uint, f.read(4))[0] + #assert adler32 == zlib.adler32(header_bytes) & 0xFFFFFFFF # mark down key block offset self._key_block_offset = f.tell() f.close() - # header text in utf-16 encoding ending with '\x00\x00' - header_text = header_bytes[:-2].decode("utf-16").encode("utf-8") - header_tag = _parse_header(header_text) - if not self._encoding: - encoding = header_tag[b"Encoding"] - if sys.hexversion >= 0x03000000: - encoding = encoding.decode("utf-8") - # GB18030 > GBK > GB2312 - if encoding in ["GBK", "GB2312"]: + #头部信息最后两个字节如果为(0x00, 0x00),则为UTF16编码,否则为UTF8编码 + headerEncoding = 'UTF-16' if header_bytes[-2:] == b'\x00\x00' else 'UTF-8' + header_text = header_bytes[:-2].decode(headerEncoding) + header_tag = self._parse_header(header_text) + if not self.encoding: + encoding = header_tag.get("Encoding", self.encoding) + if encoding in ("GBK", "GB2312"): # GB18030 > GBK > GB2312 encoding = "GB18030" - self._encoding = encoding - # 读取标题和描述 - if b"Title" in header_tag: - self._title = header_tag[b"Title"].decode("utf-8") - else: - self._title = "" + self.encoding = encoding or 'UTF-8' - if b"Description" in header_tag: - self._description = header_tag[b"Description"].decode("utf-8") - else: - self._description = "" - pass + self.title = header_tag.get('Title', '') + self.description = header_tag.get('Description', '') + # encryption flag # 0x00 - no encryption # 0x01 - encrypt record block # 0x02 - encrypt key info block - if b"Encrypted" not in header_tag or header_tag[b"Encrypted"] == b"No": - self._encrypt = 0 - elif header_tag[b"Encrypted"] == b"Yes": + encrypt = header_tag.get('Encrypted', '') + if encrypt == "Yes": self._encrypt = 1 + elif encrypt.isdigit(): + self._encrypt = int(encrypt) else: - self._encrypt = int(header_tag[b"Encrypted"]) + self._encrypt = 0 # stylesheet attribute if present takes form of: # style_number # 1-255 @@ -342,15 +286,17 @@ def _read_header(self): # style_end # or '' # store stylesheet in dict in the form of # {'number' : ('style_begin', 'style_end')} - self._stylesheet = {} - if header_tag.get(b"StyleSheet"): - lines = header_tag[b"StyleSheet"].decode("utf-8").strip().splitlines() - for i in range(0, len(lines), 3): - self._stylesheet[lines[i]] = (lines[i + 1], lines[i + 2]) - + stylesheet = {} + lines = header_tag.get("StyleSheet", "").splitlines() + for i in range(0, len(lines), 3): + if (i + 2) < len(lines): + k, v1, v2 = lines[i:i+3] + stylesheet[k] = (v1, v2) + self.stylesheet = stylesheet + # before version 2.0, number is 4 bytes integer # version 2.0 and above uses 8 bytes - self._version = float(header_tag[b"GeneratedByEngineVersion"]) + self._version = float(header_tag["GeneratedByEngineVersion"]) if self._version < 2.0: self._number_width = 4 self._number_format = NumberFmt.be_uint @@ -360,55 +306,60 @@ def _read_header(self): return header_tag + #将文件头信息分析为一个python字典 + def _parse_header(self, header) -> Dict[str, str]: + """ + extract attributes from + """ + tag_list = re.findall(r'(\w+)="(.*?)"', header, re.DOTALL) + tag_dict = {} + for k, v in tag_list: + tag_dict[k] = _unescape_entities(v) + return tag_dict + + #文件头后面就是单词信息列表 def _read_keys(self): f = open(self._fname, "rb") f.seek(self._key_block_offset) - # the following numbers could be encrypted - if self._version >= 2.0: - num_bytes = 8 * 5 - else: - num_bytes = 4 * 4 + #词典加密的原理是加密开头几个字节 + num_bytes = (8 * 5) if self._version >= 2.0 else (4 * 4) block = f.read(num_bytes) if self._encrypt & 1: if self._passcode is None: - raise RuntimeError( - "user identification is needed to read encrypted file" - ) + raise RuntimeError("user identification is needed to read encrypted file") regcode, userid = self._passcode - if isinstance(userid, unicode): - userid = userid.encode("utf8") - if self.header[b"RegisterBy"] == b"EMail": + if isinstance(userid, str): + userid = userid.encode("utf-8") + if self.header.get("RegisterBy") == "EMail": encrypted_key = _decrypt_regcode_by_email(regcode, userid) else: encrypted_key = _decrypt_regcode_by_deviceid(regcode, userid) block = _salsa_decrypt(block, encrypted_key) - # decode this block - sf = BytesIO(block) - # number of key blocks - num_key_blocks = self._read_number(sf) - # number of entries - self._num_entries = self._read_number(sf) - # number of bytes of key block info after decompression + sf = io.BytesIO(block) + num_key_blocks = self._read_number(sf) #Key block数量 + self._num_entries = self._read_number(sf) #词典中单词总数 + #2.0版本的key block info是压缩的,这里存放解压后长度 if self._version >= 2.0: - key_block_info_decomp_size = self._read_number(sf) - # number of bytes of key block info + self._read_number(sf) + #key block info字节长度 key_block_info_size = self._read_number(sf) - # number of bytes of key block + #单词块总共的字节长度 key_block_size = self._read_number(sf) - # 4 bytes: adler checksum of previous 5 numbers + # 4 bytes: 前面num_bytes字节的adler校验和 if self._version >= 2.0: - adler32 = unpack(NumberFmt.be_uint, f.read(4))[0] - assert adler32 == (zlib.adler32(block) & 0xFFFFFFFF) + f.read(4) + #adler32 = unpack(NumberFmt.be_uint, f.read(4))[0] + #assert adler32 == (zlib.adler32(block) & 0xFFFFFFFF) # read key block info, which indicates key block's compressed and # decompressed size key_block_info = f.read(key_block_info_size) key_block_info_list = self._decode_key_block_info(key_block_info) - assert num_key_blocks == len(key_block_info_list) + #assert num_key_blocks == len(key_block_info_list) # read key block key_block_compressed = f.read(key_block_size) @@ -517,33 +468,28 @@ def _decode_record_block(self): if record_block_type == b"\x00\x00\x00\x00": record_block = record_block_compressed[8:] elif record_block_type == b"\x01\x00\x00\x00": - if lzo is None: - print("LZO compression is not supported") - break - # decompress header = b"\xf0" + pack(NumberFmt.be_uint, decompressed_size) - record_block = lzo.decompress( - record_block_compressed[start + 8 : end], - initSize=decompressed_size, - blockSize=1308672, - ) + record_block = lzo.decompress(record_block_compressed[start + 8 : end], + initSize=decompressed_size, blockSize=1308672) elif record_block_type == b"\x02\x00\x00\x00": - # decompress record_block = zlib.decompress(record_block_compressed[8:]) + else: + record_block = b'' # notice that adler32 return signed value assert adler32 == zlib.adler32(record_block) & 0xFFFFFFFF assert len(record_block) == decompressed_size # split record block according to the offset info from key block - while i < len(self._key_list): - record_start, key_text = self._key_list[i] + keyList = self.key_list() + while i < len(keyList): + record_start, key_text = keyList[i] # reach the end of current record block if record_start - offset >= len(record_block): break # record end index - if i < len(self._key_list) - 1: - record_end = self._key_list[i + 1][0] + if i < len(keyList) - 1: + record_end = keyList[i + 1][0] else: record_end = len(record_block) + offset i += 1 @@ -598,42 +544,36 @@ def get_index(self, check_block=True): record_block_type = record_block_compressed[:4] # 4 bytes: adler32 checksum of decompressed record block adler32 = unpack(NumberFmt.be_uint, record_block_compressed[4:8])[0] + record_block = b'' if record_block_type == b"\x00\x00\x00\x00": _type = 0 if check_block: record_block = record_block_compressed[8:] elif record_block_type == b"\x01\x00\x00\x00": _type = 1 - if lzo is None: - print("LZO compression is not supported") - break - # decompress - header = b"\xf0" + pack(NumberFmt.be_uint, decompressed_size) + #header = b"\xf0" + pack(NumberFmt.be_uint, decompressed_size) if check_block: - record_block = lzo.decompress( - record_block_compressed[start + 8 : end], - initSize=decompressed_size, - blockSize=1308672, - ) + record_block = lzo.decompress(record_block_compressed[start + 8 : end], + initSize=decompressed_size, blockSize=1308672) elif record_block_type == b"\x02\x00\x00\x00": - # decompress _type = 2 if check_block: record_block = zlib.decompress(record_block_compressed[8:]) - + # notice that adler32 return signed value if check_block: assert adler32 == zlib.adler32(record_block) & 0xFFFFFFFF assert len(record_block) == decompressed_size # split record block according to the offset info from key block - while i < len(self._key_list): + keyList = self.key_list() + while i < len(keyList): ### 用来保存索引信息的空字典 index_dict = {} index_dict["file_pos"] = current_pos index_dict["compressed_size"] = compressed_size index_dict["decompressed_size"] = decompressed_size index_dict["record_block_type"] = _type - record_start, key_text = self._key_list[i] + record_start, key_text = keyList[i] index_dict["record_start"] = record_start index_dict["key_text"] = key_text.decode("utf-8") index_dict["offset"] = offset @@ -641,8 +581,8 @@ def get_index(self, check_block=True): if record_start - offset >= decompressed_size: break # record end index - if i < len(self._key_list) - 1: - record_end = self._key_list[i + 1][0] + if i < len(keyList) - 1: + record_end = keyList[i + 1][0] else: record_end = decompressed_size + offset index_dict["record_end"] = record_end @@ -676,13 +616,14 @@ def items(self): """Return a generator which in turn produce tuples in the form of (key, value)""" return self._decode_record_block() + #一种模板替换,使用词典内的预定义模板替换释义 def _substitute_stylesheet(self, txt): # substitute stylesheet definition - txt_list = re.split("`\d+`", txt) - txt_tag = re.findall("`\d+`", txt) + txt_list = re.split(r"`\d+`", txt) + txt_tag = re.findall(r"`\d+`", txt) txt_styled = txt_list[0] for j, p in enumerate(txt_list[1:]): - style = self._stylesheet[txt_tag[j][1:-1]] + style = self.stylesheet[txt_tag[j][1:-1]] if p and p[-1] == "\n": txt_styled = txt_styled + style[0] + p.rstrip() + style[1] + "\r\n" else: @@ -722,6 +663,7 @@ def _decode_record_block(self): ### record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存) ### record_end ### offset + record_block = b'' for compressed_size, decompressed_size in record_block_info_list: record_block_compressed = f.read(compressed_size) ###### 要得到 record_block_compressed 需要得到 compressed_size (这个可以直接记录) @@ -731,22 +673,12 @@ def _decode_record_block(self): record_block_type = record_block_compressed[:4] # 4 bytes adler checksum of uncompressed content adler32 = unpack(NumberFmt.be_uint, record_block_compressed[4:8])[0] - # no compression if record_block_type == b"\x00\x00\x00\x00": record_block = record_block_compressed[8:] - # lzo compression elif record_block_type == b"\x01\x00\x00\x00": - if lzo is None: - print("LZO compression is not supported") - break - # decompress header = b"\xf0" + pack(NumberFmt.be_uint, decompressed_size) - record_block = lzo.decompress( - record_block_compressed[8:], - initSize=decompressed_size, - blockSize=1308672, - ) - # zlib compression + record_block = lzo.decompress(record_block_compressed[8:], + initSize=decompressed_size, blockSize=1308672) elif record_block_type == b"\x02\x00\x00\x00": # decompress record_block = zlib.decompress(record_block_compressed[8:]) @@ -755,18 +687,18 @@ def _decode_record_block(self): ###### record_block_type ###### 另外还需要校验信息 adler32 # notice that adler32 return signed value - assert adler32 == zlib.adler32(record_block) & 0xFFFFFFFF - - assert len(record_block) == decompressed_size + #assert adler32 == zlib.adler32(record_block) & 0xFFFFFFFF + #assert len(record_block) == decompressed_size # split record block according to the offset info from key block - while i < len(self._key_list): - record_start, key_text = self._key_list[i] + keyList = self.key_list() + while i < len(keyList): + record_start, key_text = keyList[i] # reach the end of current record block if record_start - offset >= len(record_block): break # record end index - if i < len(self._key_list) - 1: - record_end = self._key_list[i + 1][0] + if i < len(keyList) - 1: + record_end = keyList[i + 1][0] else: record_end = len(record_block) + offset i += 1 @@ -775,13 +707,13 @@ def _decode_record_block(self): record = record_block[record_start - offset : record_end - offset] # convert to utf-8 record = ( - record.decode(self._encoding, errors="ignore") + record.decode(self.encoding, errors="ignore") .strip("\x00") .encode("utf-8") ) # substitute styles #############是否替换样式表 - if self._substyle and self._stylesheet: + if self._substyle and self.stylesheet: record = self._substitute_stylesheet(record) yield key_text, record @@ -804,7 +736,9 @@ def _decode_record_block(self): ### def get_index(self, check_block=True): ### 索引列表 - index_dict_list = [] + keyList = self.key_list() + keyListLen = len(keyList) + f = open(self._fname, "rb") f.seek(self._record_block_offset) @@ -816,18 +750,12 @@ def get_index(self, check_block=True): # record block info section record_block_info_list = [] - size_counter = 0 for i in range(num_record_blocks): compressed_size = self._read_number(f) decompressed_size = self._read_number(f) - record_block_info_list += [(compressed_size, decompressed_size)] - size_counter += self._number_width * 2 - assert size_counter == record_block_info_size + record_block_info_list.append((compressed_size, decompressed_size)) # actual record block data - offset = 0 - i = 0 - size_counter = 0 ###最后的索引表的格式为 ### key_text(关键词,可以由后面的 keylist 得到) ### file_pos(record_block开始的位置) @@ -837,100 +765,63 @@ def get_index(self, check_block=True): ### record_start (以下三个为从 record_block 中提取某一调记录需要的参数,可以直接保存) ### record_end ### offset + current_pos = f.tell() + f.close() + offset = 0 + i = 0 for compressed_size, decompressed_size in record_block_info_list: - current_pos = f.tell() - record_block_compressed = f.read(compressed_size) ###### 要得到 record_block_compressed 需要得到 compressed_size (这个可以直接记录) ###### 另外还需要记录当前 f 对象的位置 ###### 使用 f.tell() 命令/ 在建立索引是需要 f.seek() # 4 bytes indicates block compression type - record_block_type = record_block_compressed[:4] + #record_block_type = record_block_compressed[:4] # 4 bytes adler checksum of uncompressed content - adler32 = unpack(NumberFmt.be_uint, record_block_compressed[4:8])[0] - # no compression - if record_block_type == b"\x00\x00\x00\x00": - _type = 0 - record_block = record_block_compressed[8:] - # lzo compression - elif record_block_type == b"\x01\x00\x00\x00": - _type = 1 - if lzo is None: - print("LZO compression is not supported") - break - # decompress - header = b"\xf0" + pack(NumberFmt.be_uint, decompressed_size) - if check_block: - record_block = lzo.decompress( - record_block_compressed[8:], - initSize=decompressed_size, - blockSize=1308672, - ) - # zlib compression - elif record_block_type == b"\x02\x00\x00\x00": - # decompress - _type = 2 - if check_block: - record_block = zlib.decompress(record_block_compressed[8:]) - ###### 这里比较重要的是先要得到 record_block, 而 record_block 是解压得到的,其中一共有三种解压方法 - ###### 需要的信息有 record_block_compressed, decompress_size, - ###### record_block_type - ###### 另外还需要校验信息 adler32 - # notice that adler32 return signed value - if check_block: - assert adler32 == zlib.adler32(record_block) & 0xFFFFFFFF - assert len(record_block) == decompressed_size + #adler32 = unpack(NumberFmt.be_uint, record_block_compressed[4:8])[0] # split record block according to the offset info from key block - while i < len(self._key_list): - ### 用来保存索引信息的空字典 - index_dict = {} - index_dict["file_pos"] = current_pos - index_dict["compressed_size"] = compressed_size - index_dict["decompressed_size"] = decompressed_size - index_dict["record_block_type"] = _type - record_start, key_text = self._key_list[i] - index_dict["record_start"] = record_start - index_dict["key_text"] = key_text.decode("utf-8") - index_dict["offset"] = offset - # reach the end of current record block - if record_start - offset >= decompressed_size: + while i < keyListLen: + record_start, key_text = keyList[i] + if record_start - offset >= decompressed_size: # reach the end of current record block break - # record end index - if i < len(self._key_list) - 1: - record_end = self._key_list[i + 1][0] - else: - record_end = decompressed_size + offset - index_dict["record_end"] = record_end + + record_end = keyList[i + 1][0] if i < keyListLen - 1 else (decompressed_size + offset) + index_tuple = (current_pos, compressed_size, decompressed_size, record_start, + record_end, offset) + yield (key_text.decode('utf-8'), index_tuple) i += 1 - #############需要得到 record_block , record_start, record_end, - #############offset - if check_block: - record = record_block[record_start - offset : record_end - offset] - # convert to utf-8 - record = ( - record.decode(self._encoding, errors="ignore") - .strip("\x00") - .encode("utf-8") - ) - # substitute styles - #############是否替换样式表 - if self._substyle and self._stylesheet: - record = self._substitute_stylesheet(record) - index_dict_list.append(index_dict) + current_pos += compressed_size offset += decompressed_size - size_counter += compressed_size - # todo: 注意!!! - # assert(size_counter == record_block_size) - f.close - # 这里比 mdd 部分稍有不同,应该还需要传递编码以及样式表信息 - meta = {} - meta["encoding"] = self._encoding - meta["stylesheet"] = json.dumps(self._stylesheet) - meta["title"] = self._title - meta["description"] = self._description - - return {"index_dict_list": index_dict_list, "meta": meta} + return + + #通过单词的索引数据,直接读取文件对应的数据块返回释义 + #indexes是列表,因为可能有多个相同的单词条目 + def get_content_by_Index(self, indexes) -> str: + if not indexes: + return '' + + ret = [] + f = open(self._fname, 'rb') + for index in indexes: + #这6个变量是保存到trie的数据格式,都是32位保存 + filePos, compSize, decompSize, startPos, endPos, offset = index + f.seek(filePos) + compressed = f.read(compSize) + type_ = compressed[:4] #32bit-type, 32bit-adler, data + if type_ == b"\x00\x00\x00\x00": + data = compressed[8:] + elif type_ == b"\x01\x00\x00\x00": + #header = b"\xf0" + pack(">I", decompSize) + data = lzo.decompress(compressed[8:], initSize=decompSize, blockSize=1308672) + elif type_ == b"\x02\x00\x00\x00": + data = zlib.decompress(compressed[8:]) + else: + continue + record = data[startPos - offset : endPos - offset] + ret.append(record) #.strip(b"\x00")) + f.close() + txt = b'
'.join(ret).decode(self.encoding) + return self._substitute_stylesheet(txt) if self.stylesheet else txt if __name__ == "__main__": import sys @@ -1001,7 +892,7 @@ def passcode(s): # read mdict file if ext.lower() == ".mdx": mdx = MDX(args.filename, args.encoding, args.substyle, args.passcode) - if type(args.filename) is unicode: + if isinstance(args.filename, str): bfname = args.filename.encode("utf-8") else: bfname = args.filename @@ -1016,7 +907,7 @@ def passcode(s): mdd_filename = "".join([base, os.path.extsep, "mdd"]) if os.path.exists(mdd_filename): mdd = MDD(mdd_filename, args.passcode) - if type(mdd_filename) is unicode: + if isinstance(mdd_filename, str): bfname = mdd_filename.encode("utf-8") else: bfname = mdd_filename @@ -1044,7 +935,7 @@ def passcode(s): if mdx.header.get("StyleSheet"): style_fname = "".join([base, "_style", os.path.extsep, "txt"]) sf = open(style_fname, "wb") - sf.write(b"\r\n".join(mdx.header["StyleSheet"].splitlines())) + sf.write("\r\n".join(mdx.header["StyleSheet"].splitlines())) sf.close() # write out optional data files if mdd: diff --git a/application/lib/dictionary/pystardict.py b/application/lib/dictionary/pystardict.py index 5aa79c37..b2208984 100644 --- a/application/lib/dictionary/pystardict.py +++ b/application/lib/dictionary/pystardict.py @@ -174,13 +174,13 @@ def __init__(self, dict_prefix, container): idx_filename = f'{dict_prefix}.idx' idx_filename_gz = f'{idx_filename}.gz' trie_filename = f'{dict_prefix}.trie' - fmt_filename = f'{dict_prefix}.fmt' self.trie = None - if os.path.exists(trie_filename) and os.path.exists(fmt_filename): + bytes_size = int(container.ifo.idxoffsetbits / 8) + offset_format = 'L' if bytes_size == 4 else 'Q' + trie_fmt = f">{offset_format}L" + if os.path.exists(trie_filename): try: - with open(fmt_filename, 'r') as f: - fmt = f.read() - self.trie = marisa_trie.RecordTrie(fmt) + self.trie = marisa_trie.RecordTrie(trie_fmt) #type:ignore self.trie.load(trie_filename) except Exception as e: self.trie = None @@ -196,7 +196,7 @@ def __init__(self, dict_prefix, container): except Exception as e: raise Exception('idx file opening error: "{}"'.format(e)) - _file = file.read() + fileContent = file.read() #check file size if file.tell() != container.ifo.idxfilesize: @@ -206,38 +206,33 @@ def __init__(self, dict_prefix, container): file.close() #prepare main dict and parsing parameters - bytes_size = int(container.ifo.idxoffsetbits / 8) - offset_format = 'L' if bytes_size == 4 else 'Q' record_size = str(bytes_size + 4).encode('utf-8') #偏移+数据长 #parse data via regex record_pattern = br'(.+?\x00.{' + record_size + br'})' - matched_records = re.findall(record_pattern, _file, re.DOTALL) #type:ignore + matched_records = re.findall(record_pattern, fileContent, re.DOTALL) #type:ignore #check records count if len(matched_records) != container.ifo.wordcount: raise Exception('words count is incorrect') #unpack parsed records - idxBuff = [] - for matched_record in matched_records: - cnt = matched_record.find(b'\x00') + 1 - record_tuple = unpack(f'!{cnt}c{offset_format}L', matched_record) - word = b''.join(record_tuple[:cnt-1]).decode('utf8').lower() - idxBuff.append((word, record_tuple[cnt:])) #(word, (offset, size)) - - fmt = f"!{offset_format}L" - self.trie = marisa_trie.RecordTrie(fmt, idxBuff) + #为了减小一点内存占用,将这部分写成生成器 + def idxForTrie(): + for matched_record in matched_records: + cnt = matched_record.find(b'\x00') + 1 + record_tuple = unpack(f'>{cnt}c{offset_format}L', matched_record) + word = b''.join(record_tuple[:cnt-1]).decode('utf8').lower() + yield (word, record_tuple[cnt:]) #(word, (offset, size)) + + self.trie = marisa_trie.RecordTrie(trie_fmt, idxForTrie()) #type:ignore self.trie.save(trie_filename) - with open(fmt_filename, 'w') as f: - f.write(fmt) - + del self.trie - self.trie = marisa_trie.RecordTrie(fmt) + self.trie = marisa_trie.RecordTrie(trie_fmt) #type:ignore self.trie.load(trie_filename) - del _file + del fileContent del matched_records - del idxBuff import gc gc.collect() diff --git a/application/templates/adv_dict.html b/application/templates/adv_dict.html index c115a5c9..68f56919 100644 --- a/application/templates/adv_dict.html +++ b/application/templates/adv_dict.html @@ -78,7 +78,7 @@

     - {{_("Query")}} + {{_("Word lookup")}}

{% else %} diff --git a/application/templates/dict.html b/application/templates/dict.html index c0162b67..94dc10b8 100644 --- a/application/templates/dict.html +++ b/application/templates/dict.html @@ -1,6 +1,6 @@ {% extends "base.html" %} {% block titleTag -%} -{{_("Dict")}} - KindleEar +{{_("Word lookup")}} - KindleEar {% endblock -%} {% block menubar -%} {% endblock -%} @@ -11,6 +11,10 @@ padding: 0px 10px 40px 10px; border-top: 1px solid #ccc; } + .tr_container span { + margin: 5px; + padding: 0px; + } .tr_engine { display: none; margin: 10px 0px 20px 10px; @@ -31,10 +35,6 @@ {% endif -%}
-
- - -
+
+ + +

- +

@@ -70,6 +74,9 @@ {% block js -%} {% endblock -%} diff --git a/application/translations/messages.pot b/application/translations/messages.pot index a5b8cbed..067a2912 100644 --- a/application/translations/messages.pot +++ b/application/translations/messages.pot @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2024-06-09 21:53-0300\n" +"POT-Creation-Date: 2024-06-15 21:09-0300\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -322,12 +322,13 @@ msgstr "" #: application/templates/adv_dict.html:49 #: application/templates/adv_dict.html:66 #: application/templates/book_translator.html:26 +#: application/templates/dict.html:47 msgid "Engine" msgstr "" #: application/templates/adv_dict.html:33 #: application/templates/adv_dict.html:55 -#: application/templates/adv_dict.html:72 +#: application/templates/adv_dict.html:72 application/templates/dict.html:53 msgid "Database" msgstr "" @@ -335,7 +336,12 @@ msgstr "" msgid "Other languages" msgstr "" -#: application/templates/adv_dict.html:84 application/view/reader.py:29 +#: application/templates/adv_dict.html:81 application/templates/dict.html:3 +#: application/templates/dict.html:63 +msgid "Word lookup" +msgstr "" + +#: application/templates/adv_dict.html:86 application/view/reader.py:29 #: application/view/reader.py:95 msgid "Online reading feature has not been activated yet." msgstr "" @@ -1104,6 +1110,7 @@ msgid "One key per line" msgstr "" #: application/templates/book_translator.html:40 +#: application/templates/dict.html:39 msgid "Source language" msgstr "" @@ -1173,6 +1180,14 @@ msgstr "" msgid "Confirm Change" msgstr "" +#: application/templates/debug_cmd.html:3 +msgid "Debug cmd" +msgstr "" + +#: application/templates/dict.html:59 +msgid "Word" +msgstr "" + #: application/templates/home.html:3 msgid "Home" msgstr "" @@ -1574,7 +1589,7 @@ msgstr "" msgid "Fwd as Attach" msgstr "" -#: application/view/admin.py:48 application/view/adv.py:425 +#: application/view/admin.py:48 application/view/adv.py:434 #: application/view/settings.py:76 application/view/translator.py:83 #: application/view/translator.py:167 msgid "Settings Saved!" @@ -1694,29 +1709,29 @@ msgstr "" msgid "Unknown command: {}" msgstr "" -#: application/view/adv.py:427 +#: application/view/adv.py:436 msgid "The format is invalid." msgstr "" -#: application/view/adv.py:459 +#: application/view/adv.py:468 msgid "Authorization Error!
{}" msgstr "" -#: application/view/adv.py:480 +#: application/view/adv.py:489 msgid "Success authorized by Pocket!" msgstr "" -#: application/view/adv.py:486 +#: application/view/adv.py:495 msgid "" "Failed to request authorization of Pocket!
See details " "below:

{}" msgstr "" -#: application/view/adv.py:507 +#: application/view/adv.py:516 msgid "The Instapaper service encountered an error. Please try again later." msgstr "" -#: application/view/adv.py:520 +#: application/view/adv.py:529 msgid "Request type [{}] unsupported" msgstr "" @@ -1821,16 +1836,16 @@ msgstr "" msgid "The article is missing?" msgstr "" -#: application/view/reader.py:180 application/view/translator.py:116 +#: application/view/reader.py:195 application/view/translator.py:116 #: application/view/translator.py:200 msgid "The text is empty." msgstr "" -#: application/view/reader.py:274 +#: application/view/reader.py:291 msgid "Failed to push: {}" msgstr "" -#: application/view/reader.py:322 +#: application/view/reader.py:339 msgid "Failed to create ebook." msgstr "" @@ -1842,171 +1857,171 @@ msgstr "" msgid "Title is requied!" msgstr "" -#: application/view/settings.py:174 +#: application/view/settings.py:216 msgid "English" msgstr "" -#: application/view/settings.py:175 +#: application/view/settings.py:217 msgid "Chinese" msgstr "" -#: application/view/settings.py:176 +#: application/view/settings.py:218 msgid "French" msgstr "" -#: application/view/settings.py:177 +#: application/view/settings.py:219 msgid "Spanish" msgstr "" -#: application/view/settings.py:178 +#: application/view/settings.py:220 msgid "Portuguese" msgstr "" -#: application/view/settings.py:179 +#: application/view/settings.py:221 msgid "German" msgstr "" -#: application/view/settings.py:180 +#: application/view/settings.py:222 msgid "Italian" msgstr "" -#: application/view/settings.py:181 +#: application/view/settings.py:223 msgid "Japanese" msgstr "" -#: application/view/settings.py:182 +#: application/view/settings.py:224 msgid "Russian" msgstr "" -#: application/view/settings.py:183 +#: application/view/settings.py:225 msgid "Turkish" msgstr "" -#: application/view/settings.py:184 +#: application/view/settings.py:226 msgid "Korean" msgstr "" -#: application/view/settings.py:185 +#: application/view/settings.py:227 msgid "Arabic" msgstr "" -#: application/view/settings.py:186 +#: application/view/settings.py:228 msgid "Czech" msgstr "" -#: application/view/settings.py:187 +#: application/view/settings.py:229 msgid "Dutch" msgstr "" -#: application/view/settings.py:188 +#: application/view/settings.py:230 msgid "Greek" msgstr "" -#: application/view/settings.py:189 +#: application/view/settings.py:231 msgid "Hindi" msgstr "" -#: application/view/settings.py:190 +#: application/view/settings.py:232 msgid "Malaysian" msgstr "" -#: application/view/settings.py:191 +#: application/view/settings.py:233 msgid "Bengali" msgstr "" -#: application/view/settings.py:192 +#: application/view/settings.py:234 msgid "Persian" msgstr "" -#: application/view/settings.py:193 +#: application/view/settings.py:235 msgid "Urdu" msgstr "" -#: application/view/settings.py:194 +#: application/view/settings.py:236 msgid "Swahili" msgstr "" -#: application/view/settings.py:195 +#: application/view/settings.py:237 msgid "Vietnamese" msgstr "" -#: application/view/settings.py:196 +#: application/view/settings.py:238 msgid "Punjabi" msgstr "" -#: application/view/settings.py:197 +#: application/view/settings.py:239 msgid "Javanese" msgstr "" -#: application/view/settings.py:198 +#: application/view/settings.py:240 msgid "Tagalog" msgstr "" -#: application/view/settings.py:199 +#: application/view/settings.py:241 msgid "Hausa" msgstr "" -#: application/view/settings.py:200 +#: application/view/settings.py:242 msgid "Thai" msgstr "" -#: application/view/settings.py:201 +#: application/view/settings.py:243 msgid "Polish" msgstr "" -#: application/view/settings.py:202 +#: application/view/settings.py:244 msgid "Romanian" msgstr "" -#: application/view/settings.py:203 +#: application/view/settings.py:245 msgid "Hungarian" msgstr "" -#: application/view/settings.py:204 +#: application/view/settings.py:246 msgid "Swedish" msgstr "" -#: application/view/settings.py:205 +#: application/view/settings.py:247 msgid "Hebrew" msgstr "" -#: application/view/settings.py:206 +#: application/view/settings.py:248 msgid "Norwegian" msgstr "" -#: application/view/settings.py:207 +#: application/view/settings.py:249 msgid "Finnish" msgstr "" -#: application/view/settings.py:208 +#: application/view/settings.py:250 msgid "Danish" msgstr "" -#: application/view/settings.py:209 +#: application/view/settings.py:251 msgid "Ukrainian" msgstr "" -#: application/view/settings.py:210 +#: application/view/settings.py:252 msgid "Tamil" msgstr "" -#: application/view/settings.py:211 +#: application/view/settings.py:253 msgid "Marathi" msgstr "" -#: application/view/settings.py:212 +#: application/view/settings.py:254 msgid "Burmese" msgstr "" -#: application/view/settings.py:213 +#: application/view/settings.py:255 msgid "Amharic" msgstr "" -#: application/view/settings.py:214 +#: application/view/settings.py:256 msgid "Azerbaijani" msgstr "" -#: application/view/settings.py:215 +#: application/view/settings.py:257 msgid "Kazakh" msgstr "" diff --git a/application/translations/tr_TR/LC_MESSAGES/messages.mo b/application/translations/tr_TR/LC_MESSAGES/messages.mo index 9831f4787a9f223036e2a32517d1c69d0abcb482..2a9f36c04b56c2c3bc43d75e0ba9d945a059925e 100644 GIT binary patch delta 7815 zcmYM%4SbK~9>?*UJ=@p}v(4Dw9t<;#8AJ1sIn2W_@>DTl#B3N*Ik)FXDJ#dblsKId z*-|;-b%@Y|wO)mjDV^#hO7fx(R_A=ayMM3O>E-2pUDtiz*Y*4TuIs-4+WdW;XD-)q zzl`)>@9HNR8#aF1`gK}3PB7PlQTI1Q-5+E6 z%oOB4*J*D9T~TXiqaGZB&2SWI!r7>a9z{P~f}ywq72rD5eH*YL?m#_Xj+*a`T|aC6 zuP~7Ho$qP*xZxV6VtBmc@W<(4jz=y07;2)Wn2k?j8lJ{h_!}l*3!mdO#XeXc$D#r$ z#1LGD_3&x+T^bu{w8fWD4}6TuF)c9_7$j+lo@xEPiBE>yrDq2~D)t zRE9^*O7kKrv8$-(@3=G+aUgZp#0^mwV$5WWrk{?AJQv$wzKt)!0Q!GHO}rIV$=7WB z9aP1R*!450t-XkPOWfaSDAI5?G7p=h9~R**oQ*NqG{ws-9Rum-nfGB3{Rhk`sDz5l z1?Dnyt+@%Qxa+)Z1AEPbsFEJXA$Sh8m&vUiXEfe}I#laX_w7gBcN{sVP9){=#|#X> zEbI5eAo~4KXKHY5pZ$M;hDtUCmDwCr1s0+vT7<#46jiA;sQWgd7T96#w(&iv=PJy1 z?D}Erzh|Dp+V}rs8bf*D9BSfLd<+y&8Y-ji$T4y5$2GXbuGdfXDiDi0GiexwS*Wux z47K+YP#G7aDzF%p&?({}%N&{Dhj=ubnq>V^rnh zP?hmvZ9=Guq@m{PWc~i_*nd4Rh=CByLq#|i^19S%Tdp-!@9Ty z73g*w|0|B9zu&G0rg_hYq_O`hRTKltBpFqKOw@Qk?1cHKimXJHbO(mv8>od3qcS^b z*Ds&~xQhI&I5$!AHEQoAn1HeL+qyLPR8K?>^ zMU{3n*2A^d-(=Ufqbj-!^;_{e>b*Z^efKjOn&<}%#p~9uL1p0A(K}ROsEiX)6Lv&B z*9~4b2;zU%*I-@el zLIs$MIuj#Nfz3v3!6Iyf%dNi`72pBXeJ4-}RGDYZ3+N_u;Svo!5SZab7=;R;1!{pL zR7FzldZvwMqXNr8o$@@?kJT*H{ZC*jmZA3ih*^!Q%$*GOUnTj20U6lEd$0kjAA<^{ zB`UxaR3=?95qnyHBC6z5tY3=y5-!Ah@L5zMXRsfBj`~fB&7}T%4?AUg84N*XFcWpd z3e;=24F}-+c0IhSSMt`_jqwrIUxU1<&H?0)^DTcxVH`KbV`pTyoqX(xD_t78@jWCc z=PoLs^epe6R)etz{kf>|eVBtEptdf8lce8>OwzR#B7|%zYk?E-Q7SvvMd2MJYqD|&D)Zu#(70}->8Y{37euSFflKCU*`Tw8}S&fbR z^=7~6hocYgMFlz;bMX;u?9w1&(TL%tY=L`Fd;cLSuqxD^U9kQYRE2I@ zzb>zWDiLNjK~aPi=GZ27tP?;>iwRIflanwYifATVp zLQT*NHJ)JoH0x)eKjXbH2>aRiU~{xN33XqQYXfsohiefkkaZY~8&Cn3+xWYv2alox zc;Bv9S^o=6Wc)j9h9LvIK$21Oq**@`m57^7Lks0#5>CW;T!ETs7Y5;J)B{yG2+w00 zCb6N~^AX55$61e>=Mz-PZ((x`%JmLiGHSj~=+pb(od(|oXFTeGO{faIhB~GDt$zgd z`W#0Eat`%ge~I<*rukp99#Q%jZ-QDd6B}c;nU4vq?@Xhi3D=-9FGC&1a#SW&$bol$ z#a7sDu=jJWzF8S@Lf{%`a#ejRmI8V>a;8il$)&ibv<4P&4^4K3IM_5Ke> zy#+H+-|S*kMo(b_+-a7h0y}E`DpZ9op#lub^Iq2&ROTtD@ouO}<>pa;m2MOR+M5Sa zr*#vQ>Y)6O4KJgaF{nyJT{=;7PU|oYR_{q97mwe&Lq^9%)Eq7prY zTKA$$qdtwFY~U`orXQNmHed#-0xR);+<}q!J8I&Hkz|GOsMmNYD)ViqfOeq*-D8%c zGJnVV?r9p@lS))3U!x-b5p~$En|DwP`j7G!XoQ-$nb{I`e~MjCH?zz>c6}gfo_ufI zbtcickAa6#l{kt`uo64rcbJCJ-1G|eN1fuIQ5oO0e$;61%p{;n+Xpq@a8!a5PzlaP zRp@c_*ZaSmh7RFs)B|OxFV+rJ>5rgFc^*}nuTWd?4Qk?FQ2_;y@vb*U-5-srNW9qw zb(T7#)|r68djE@PD6;ve$d;hq+tsKNuSG5Vyp3-}ExZl2@IKUp@1pKMjoRZ%)V!Ba zXXL6~A3D~1ZX~+8aUzXqoQ*2|lc)uDU{@^1Hh2Yff8%l9g2|`_I%8wZw()#a1qv_$ zr=i}OHK+tPqUPB#j{5WONM}C-KD>r(rxQNj+k&YWPk%Woke6`?R-+bZ%PXNurK1AL zL@m(A#s{MA8;UVF3YBOvY71B0PyMy`TNqG8FQX-(Q|T*mxdzw1(mt~gI+1eqxP}{eK;Sr@CH;M+fip?uZ@?Z z&cH#`R-8ngjcQb4m+ks(Y(_t5g7>_eNTc?>MeS8OYOhA3&OiaGB6Ds0F{~{eMlrqx zRiScJ1x}$Byo3ty7rTB7l|bl3@7FXETj}?IBn>U_7&gLXsQw02CNH4I-$Wh4Q>gJ9 zs6c}Xyr1dDs0Ff60rW;yWH_pl6YTmF)RxS|6utlRXz1{4LzV0m)B|stf4A%ZKpnOZ zQ2~B}n&>>n;@7B()Sv=u#5W(p4qfe;|77o*9EW;MQc)$$!6dv7 zyW?Z1(!YhR@k8u}H&Jgv+7$2SITzd0UxGSAZ=hbgW7r!nU>?RzrT)rf=2S1E=TV30 z5GsRDu`B+8S~#=N`@i8qsFFW{I?XSkN_zy41xFoAvr>UBJey)o@!?_V~vP z18|2+LqCVRF%%DAFdjwi(Fdp!UNnD3O;BehUoi|sO*|i0;nP?bql&zMVo+P$3AIix z2H{xiy9G4-7$`AIQ2{K#5L|+-aSdt<-b77w8Wq@i)bm$R&s{^Eo!_u82G8=gb`UC| zxv0HgiM-aXbC!lCzJhx2zZi%Sq^I}3DQY|wm1$3GfcY4TlTaDWLT$->OvH7lfDV{P zupa%BsKh?S`g;GXX=s8gsD*BzO6*tSeQJYI3q|8$OhN_tFAT#UQ1}0eqwp@a#o@EP z!~8fdq+f>fF>H=^xR>D~z5n}Ys5EJFy@-3GG8~BYaSSTrsn##C{-049t}@r5Dzq7u z>3-CyKZL6AKT&~SGjCy(Ca9sI(-=_d?PX|b#jDNx1O`o-SLl1FXmZ8n#KIO4ql@QE zcJ!BTW^wV1c_kG?`}X$>PVeIDl-|8#R>jtwk-<#{6qFYD3KkyUKVxP=QGstpanZce ec@;koDNATHXv)m#MN=FPU%?!%S1c}x_4^+}%W^gV delta 7708 zcmYM(3wW1R9l-H-u(6GeF$NRH^$%kV7$`7cx`1(!i{O}p8F*P-q7BqgL?9yln;MFW zV2B!usXU+}Mmh~sLqxK;l%kSIDkPdtl29m>4Ela~&-3KNv(Gu_J?}Z^_dDnO{{vTi znzH7Tl;j6_sjDOYdp9MDI^st;*8czBrcP0mM)fI7#|CVMF9x@V_MM@=7c+RiKeYcX zv^NHi1;0Z2C8JZJA-!`HId={^Q77zxMd*Md(22)mQ>?};yck{J476W8w!&-B_isYS zdni2L5bB$;8S_V5C?x2x6-)3ScEK!uXbC@@cmg`mBpim9Vqg3P_P{@2H#~}Mu~j09 zT3|19A!V3}ld(D0T2E4#O`$hli@vZHjpRu*!e=oRx1kyOJ-U#$u?@b5zV`(>!D;lp z%&t+ypD3Rn_AkShSP`tkq${3A!3b|c>&uWJqD@$Yd$13FiT$t(ox0;iXygmf1wV+6 zvmTvj6B_wT=mPd*E*`=&3+_h#-MiB?WMf)kJa8TwX&L&$nBWw2;Mt-6BQ(-w!PRJL zA3?WZ6FUA5bYZ*Dh3-W&dZ3W}o3ev6iT+tL0Ny5d~+p%)gQ?W54hE=LERiDqzK zXuko?)GgupJ?NIMLvP2c=t7hGC{$56j7_nW4PB3ASb*=Nk$r{DFuypqBf9XyUeHZf?!Q>8k+Gd;(9W=E<8x0DZL#laV@%sM{o)@?HM1Y%g}y{(0;cg=Q7%lsdy68 z@N}rBk+yW|&C#=z7wUzW?foyIU}Wc_8K^)98ig4+4oy`x+HV>zAShbX3}pJPGtUQEd>Yu2fByH(TMy|VEdzPcn6L}pTC7>;1GJ4j^kN)8a)eV zmBf2rgho6F&A=EmpozgLm~`Ul6pXMQ-SY+58dspt*P$C1w=mMTbzDA-K(23qf1N;=*<1suNvy%LDf^zgrr3U@rS%nGwHTJ~4Scxam42?S{ z&QKLLr(P54)6nO$&lY8)0X>v`Lwz{9Ws{Ju(Ny$v!@gXQDm2urf4)QP>N|hx&EsI7`recc1~>7kn`I2o}-40h7M4hk`474_&}V=mejk z8TleS|2DLz4vZI;g`VfAvPHl*L$ETFA4Q>^w3t|Q2Z_$$US&IK7f9w973<**MrEv5wsp0M=${G zFaf=G-^X!yXL!C3P5Dtg7u%J^^=jm1MoW-C(MEopg&$%U{07<0sDPJ-;DuGv=u#M+e7;0MfRG&R3NBj1jG6u*u}xQjynkra2h6!cn$^kcpe(j9ccRsbfEii6FwTASB;D!{zQ}c z;VrorJ+v?5Rrq(jz~`glzb~xCpHu%ccE%apMYsCK^U1#}SxSR@v=XiV63xgHp}qyp z#4EvFXvA-!3;YOu|DWhtIfZ5@mn)-f(f2!}6Bna__8m?B?@HlH(BMGZ#>88&8-4K& zwEdk>|1i`WF_rccXr{gi?I~m9{@KB{XuocuUW}fl0q8=;Bq?~mFGN>ZA0AwTzPJ!w zz>@HMX{fKjLfY402i%J3cmN&e!%#nh&8Qzo=Q)YpF@Icqos;Dh9OzO^#~aWWmf{3l zj(zc6WLu+jHkQwrXgoU5&(M@_#7_7UddLo-<9&h&{3mk5qulXv|A|NjlF@VuzEBr8 zMDx+pz8GD|o#;JYh0XDa;IqM(F+uw-biyO(^*SERAc}6(JE7wZ$1XS)JA41@DHzF8 zro=pL`3k7?-0HJcl06moR~ULihS;Xg`5{sHasD z81qL1D0q6uq2G2>Fca(1)Xu{!yg9fcJbwTaw690c%4_IEyV3r8L;ZcsrhXW`=Eu;} z-+UtZ_p>>Nf{~P<3mF-#Mi(?a)UQD^lSCJ`0bAhnXvD9D_IJ<>HKG|ifo{ov(ZiZo z6@Lz#T}A#~Q6&wgt{(mJn2Ub&u15#jiaEFwooGM0*NxZ`zd+AS%0=;3v_bm~L7!J* z9oC|e@4$oj$BW2+35Cb2se~D%wx-|ZT zOh<1`Z#2R(besz0*Tm=|OyF{4m!e;xTab1c`R_uZ@Ur;&jlxRmOV9~+qN#cVUC2Ik zfqjE4$q=q zj%KJD%|JamVG^BSWq5uM8o)*zflp%(JdUQk{q*>gvm06;j0BR5hJ^={(L-1tKZx!| z7qkJpV*@(Deslo`(Tp5JQ+Wz~o;oAmk}NEy-UdB8L(q(kMf*=aW0HSJ2py)OhwTb< zg;%3{y9nFkjc7(5Ko|B57U9d-2S3Ixn8#f-GXwBUr04>M1uqEgHOl{?e~$h| zp&Z-KiVxSt*pvDdcrLEOj<^R0V<@2TZ=pJdNd;=XU7Yjh%RsucE@{gApREn<}PDi(BPLhK6@F&5$&;cICt8qPg=<=`PFO1j+o8WWkf}ThBcrQ9pBc|im zp&tD(K8!7ctBg&f_ z?|l)rroI3jcNzNLL)Z*A$Ip||Z^DCJXrzC|9Q+cqFoiT2Q44fS^05&6p$n=B)?#z& zb!cSsumvtb-(Q9<;BGX-k77&CU-TFSC)$F)!&gIxpI*cNT%^7fd*gfP=}n)7JE9pWLL(c3p7OD1 zN@t;|TpYX+o$yxl@U29*?%w$g2Re;t)=<=aR_BH_BS$yQ=yzUXK>zYVr465quE}Vq Zo_Ke+)X@unaNVlcYr8gdn0KPd{{WGoTL%CD diff --git a/application/translations/tr_TR/LC_MESSAGES/messages.po b/application/translations/tr_TR/LC_MESSAGES/messages.po index 313952a0..1de9b599 100644 --- a/application/translations/tr_TR/LC_MESSAGES/messages.po +++ b/application/translations/tr_TR/LC_MESSAGES/messages.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: PROJECT VERSION\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2024-06-09 21:53-0300\n" +"POT-Creation-Date: 2024-06-15 21:09-0300\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language: tr_TR\n" @@ -323,12 +323,13 @@ msgstr "Kitap dili" #: application/templates/adv_dict.html:49 #: application/templates/adv_dict.html:66 #: application/templates/book_translator.html:26 +#: application/templates/dict.html:47 msgid "Engine" msgstr "Motor" #: application/templates/adv_dict.html:33 #: application/templates/adv_dict.html:55 -#: application/templates/adv_dict.html:72 +#: application/templates/adv_dict.html:72 application/templates/dict.html:53 msgid "Database" msgstr "Veritabanı" @@ -336,7 +337,12 @@ msgstr "Veritabanı" msgid "Other languages" msgstr "Diğer diller" -#: application/templates/adv_dict.html:84 application/view/reader.py:29 +#: application/templates/adv_dict.html:81 application/templates/dict.html:3 +#: application/templates/dict.html:63 +msgid "Word lookup" +msgstr "Kelime arama" + +#: application/templates/adv_dict.html:86 application/view/reader.py:29 #: application/view/reader.py:95 msgid "Online reading feature has not been activated yet." msgstr "Çevrimiçi okuma özelliği henüz etkinleştirilmedi." @@ -1122,6 +1128,7 @@ msgid "One key per line" msgstr "Satır başına bir tuş" #: application/templates/book_translator.html:40 +#: application/templates/dict.html:39 msgid "Source language" msgstr "Kaynak dil" @@ -1191,6 +1198,14 @@ msgstr "Paylaşma anahtarı" msgid "Confirm Change" msgstr "Değişikliği Onayla" +#: application/templates/debug_cmd.html:3 +msgid "Debug cmd" +msgstr "Hata ayıklama komutu" + +#: application/templates/dict.html:59 +msgid "Word" +msgstr "Kelime" + #: application/templates/home.html:3 msgid "Home" msgstr "Ana Sayfa" @@ -1608,7 +1623,7 @@ msgstr "İlet" msgid "Fwd as Attach" msgstr "Ek olarak ilet" -#: application/view/admin.py:48 application/view/adv.py:425 +#: application/view/admin.py:48 application/view/adv.py:434 #: application/view/settings.py:76 application/view/translator.py:83 #: application/view/translator.py:167 msgid "Settings Saved!" @@ -1728,19 +1743,19 @@ msgstr "Makaleye URL'nin QR kodunu ekle" msgid "Unknown command: {}" msgstr "Bilinmeyen komut: {}" -#: application/view/adv.py:427 +#: application/view/adv.py:436 msgid "The format is invalid." msgstr "Format geçersiz." -#: application/view/adv.py:459 +#: application/view/adv.py:468 msgid "Authorization Error!
{}" msgstr "Yetkilendirme Hatası!
{}" -#: application/view/adv.py:480 +#: application/view/adv.py:489 msgid "Success authorized by Pocket!" msgstr "Pocket tarafından yetkilendirilen başarı!" -#: application/view/adv.py:486 +#: application/view/adv.py:495 msgid "" "Failed to request authorization of Pocket!
See details " "below:

{}" @@ -1748,13 +1763,13 @@ msgstr "" "Pocket yetkilendirme isteği başarısız oldu!
Aşağıdaki ayrıntılara " "bakın:

{}" -#: application/view/adv.py:507 +#: application/view/adv.py:516 msgid "The Instapaper service encountered an error. Please try again later." msgstr "" "Instapaper servisi bir hata ile karşılaştı. Lütfen daha sonra tekrar " "deneyin." -#: application/view/adv.py:520 +#: application/view/adv.py:529 msgid "Request type [{}] unsupported" msgstr "İstek türü [{}] desteklenmiyor" @@ -1865,16 +1880,16 @@ msgstr "" msgid "The article is missing?" msgstr "Makale eksik mi?" -#: application/view/reader.py:180 application/view/translator.py:116 +#: application/view/reader.py:195 application/view/translator.py:116 #: application/view/translator.py:200 msgid "The text is empty." msgstr "Metin boş." -#: application/view/reader.py:274 +#: application/view/reader.py:291 msgid "Failed to push: {}" msgstr "İtilemedi: {}" -#: application/view/reader.py:322 +#: application/view/reader.py:339 msgid "Failed to create ebook." msgstr "E-kitap oluşturulamadı." @@ -1886,171 +1901,171 @@ msgstr "Kindle E-mail adresi gerekli!" msgid "Title is requied!" msgstr "Başlık zorunlu!" -#: application/view/settings.py:174 +#: application/view/settings.py:216 msgid "English" msgstr "İngilizce" -#: application/view/settings.py:175 +#: application/view/settings.py:217 msgid "Chinese" msgstr "Çince" -#: application/view/settings.py:176 +#: application/view/settings.py:218 msgid "French" msgstr "Fransızca" -#: application/view/settings.py:177 +#: application/view/settings.py:219 msgid "Spanish" msgstr "İspanyolca" -#: application/view/settings.py:178 +#: application/view/settings.py:220 msgid "Portuguese" msgstr "Portekizce" -#: application/view/settings.py:179 +#: application/view/settings.py:221 msgid "German" msgstr "Almanca" -#: application/view/settings.py:180 +#: application/view/settings.py:222 msgid "Italian" msgstr "İtalyanca" -#: application/view/settings.py:181 +#: application/view/settings.py:223 msgid "Japanese" msgstr "Japonca" -#: application/view/settings.py:182 +#: application/view/settings.py:224 msgid "Russian" msgstr "Rusça" -#: application/view/settings.py:183 +#: application/view/settings.py:225 msgid "Turkish" msgstr "Türkçe" -#: application/view/settings.py:184 +#: application/view/settings.py:226 msgid "Korean" msgstr "Koreli" -#: application/view/settings.py:185 +#: application/view/settings.py:227 msgid "Arabic" msgstr "Arapça" -#: application/view/settings.py:186 +#: application/view/settings.py:228 msgid "Czech" msgstr "Çek" -#: application/view/settings.py:187 +#: application/view/settings.py:229 msgid "Dutch" msgstr "Flemenkçe" -#: application/view/settings.py:188 +#: application/view/settings.py:230 msgid "Greek" msgstr "Yunan" -#: application/view/settings.py:189 +#: application/view/settings.py:231 msgid "Hindi" msgstr "Hintçe" -#: application/view/settings.py:190 +#: application/view/settings.py:232 msgid "Malaysian" msgstr "Malezyalı" -#: application/view/settings.py:191 +#: application/view/settings.py:233 msgid "Bengali" msgstr "Bengal" -#: application/view/settings.py:192 +#: application/view/settings.py:234 msgid "Persian" msgstr "Farsça" -#: application/view/settings.py:193 +#: application/view/settings.py:235 msgid "Urdu" msgstr "Urduca" -#: application/view/settings.py:194 +#: application/view/settings.py:236 msgid "Swahili" msgstr "Svahili" -#: application/view/settings.py:195 +#: application/view/settings.py:237 msgid "Vietnamese" msgstr "Vietnam" -#: application/view/settings.py:196 +#: application/view/settings.py:238 msgid "Punjabi" msgstr "Pencap" -#: application/view/settings.py:197 +#: application/view/settings.py:239 msgid "Javanese" msgstr "Cava" -#: application/view/settings.py:198 +#: application/view/settings.py:240 msgid "Tagalog" msgstr "Tagalog" -#: application/view/settings.py:199 +#: application/view/settings.py:241 msgid "Hausa" msgstr "Hausa" -#: application/view/settings.py:200 +#: application/view/settings.py:242 msgid "Thai" msgstr "Taylandca" -#: application/view/settings.py:201 +#: application/view/settings.py:243 msgid "Polish" msgstr "Lehçe" -#: application/view/settings.py:202 +#: application/view/settings.py:244 msgid "Romanian" msgstr "Rumen" -#: application/view/settings.py:203 +#: application/view/settings.py:245 msgid "Hungarian" msgstr "Macarca" -#: application/view/settings.py:204 +#: application/view/settings.py:246 msgid "Swedish" msgstr "İsveççe" -#: application/view/settings.py:205 +#: application/view/settings.py:247 msgid "Hebrew" msgstr "İbranice" -#: application/view/settings.py:206 +#: application/view/settings.py:248 msgid "Norwegian" msgstr "Norveççe" -#: application/view/settings.py:207 +#: application/view/settings.py:249 msgid "Finnish" msgstr "Fince" -#: application/view/settings.py:208 +#: application/view/settings.py:250 msgid "Danish" msgstr "Danca" -#: application/view/settings.py:209 +#: application/view/settings.py:251 msgid "Ukrainian" msgstr "Ukraynaca" -#: application/view/settings.py:210 +#: application/view/settings.py:252 msgid "Tamil" msgstr "Tamilce" -#: application/view/settings.py:211 +#: application/view/settings.py:253 msgid "Marathi" msgstr "Marathi" -#: application/view/settings.py:212 +#: application/view/settings.py:254 msgid "Burmese" msgstr "Burmaca" -#: application/view/settings.py:213 +#: application/view/settings.py:255 msgid "Amharic" msgstr "Amharca" -#: application/view/settings.py:214 +#: application/view/settings.py:256 msgid "Azerbaijani" msgstr "Azerbaycanca" -#: application/view/settings.py:215 +#: application/view/settings.py:257 msgid "Kazakh" msgstr "Kazakça" diff --git a/application/translations/zh/LC_MESSAGES/messages.mo b/application/translations/zh/LC_MESSAGES/messages.mo index 92db23f156d825fb58a676e1c9a587fba0380eec..c281f6c6b7f75f8165a463644c32eb68a409d160 100644 GIT binary patch delta 7806 zcmYM&2~<~Q8prXgB8x%@uAm}_h>57EU}%D&lpA6$DK5E(8$&7=!( zGEq6EmRPA-TIyjkEk)DRhFqF*?37D7-yiomXF41{@AKYwdEVvz|Jc#*t9*E-ihDlP zXNALm2pxTF-k!CE? z&UIQ_MMu=!DX5Ma*boPy2Aql-=mo5Xv#}=5M7uKQU!$NE?nK3Vk;ORW7=!n)6*lEzTg=2}I19D%EvSUQMve0=YNB$~%CDmm z@Q-$!T38D!6OLy8b#`s22*gCxz-g$JW}`YhXD&buyvpK#qE@=s{2aBlWvC-4M~#0A zmDn9rqE(ngJ6fX|`>!nvrb32cC`O|?B%$7lE?5_bqE_-OYT|rU`*{|xL|z1E6KaK@ znJ3J1sD)ia^}pj%P{zLOvjz@9JqR;numN!bD)V0WFb=T#0<2E_57fYGP&@g))$c*= z*nWF{9CftkP;ZI*7X@V+%t2;iW2}ZbxCN(T7}k&VR+fOi#F^$(=tn%l9FJO1j+t-H zHJ6!1$d0?tW~K(*U}YIg{^r%ou_sI0v;;i&5>0P!p^--?sX#sD35p9(!JD z@j>%TtbG5EQRqvD)2M-)^I@Qb;!!K=gj^G6I4;K7_B<%g+ktS@or%YKn2fp${ZMB= z61C!7)DFx-E$CHrRVbvOiQh!6a0BYhi?KEyvFGPd?JuL=h8w7XtF`h5u7ldSMyQ>M z#L5Msb|fA(UOS5)Z^ikmLvJbqFcXzw7OLZDYd8TbFC}W=sThdQoAXfp3$ZG$MkTt= z>fgbE#5?S{Z@kw(AfEHrR@I|ID~UnvKoY9HJGR3Cs2zC)wWaGZ5VxZyE=8^E3wwSR zmB2ORYsI;R8n1S1Z-GrQoVcY+fgdNGUz#8f_2se*^}%xtBk?M>z?yBm+ui}SLvv7D zyAW&OGK-7s`8w2&Zb5xle1LlI4_NG;q@aO*#+vx6#rIGvsMglIRDq}!N23O8i|Y3X z>Mo?B1|EcZEwe3t0d-_ckRhGrs87fP$OoJ2T%hm}6}K@J{S&-_dY}>*irSg6sEG?O z2xlGUf65=%$j@=Vm1)!$I<7hoJNN1ge8vmCWEcRFzX+LHTJNZ&-S<3p%843$U} zD#2LPN;+aRK5FqO)RvF8I1lwnI1{_z>!^hs$L{zo>N_PoiT&4m*e=OiK?Z6CQ&0`& zqh7nU*aHvS^Wcu&mbbu1sPAv_V&qMAb|F8`Wq#GeMl@=I?UB=V2H>Okic3Ka4(^2)sn2twLM^}rRr0<9%)C5ED0On&TcBZqArY~y8a!~CHk^Anr zB@~p=6>HFdwD@z)c@+ENNaS>!Z5W1^P!s)yk=U61RecI-r9)6>o{Majv)bZ+VI$)E zsIza#ryNJ=I!!3(mNr9Ornaav?}WNUT~QxAy-}~(5Y%V5*>%V@OiA`QaC_CD?5$)fcY79%l|;F{2uD_A>c7@fKXH%gPO24YNrxW zpBG)RK0ayn6Hy80nX^#iE#-mm7&h;ti@MRJ9Nw9 zs=Nx?i9oY1Y6l`v3AaJ@?}Xano~RugW{yc`|24owDyri&)JpPkVHL-D3Dv>(32%pj z(1*A#sy^J}D2tn;ANBF51teO1s`)UAC1b!lF=`c3A0 zsP;R|{pMGw_TQl{?KRXVr%w-W+&bt-+|Xh-nnElUEioSZqt0d)Hp3mLiB6#syM%hL zuVNfVaiD6Sfid_pYU0hPex=wHk6L^K1BmZjR;sJru{`76e9|D5|2w6f;Cy_LkHcI0Vnfv=!;rWjk|In%$7 z*S!0H&7>GcoA zn#4^p99yG~qGu-iuflLDxCTxx>iz!+H{gEMgroX-zJyA26>6flEZ%CCSp7$+#J(`g z%(Lbd^FOW?ZkyGf^uA<*P_J1O>JkmX&Nvly6dzzMJcxWoIVVw{pyBAAF&Kwgy*t?+OtXrYP&+W!>KB-W7)t$W)PUQpewXj(KeqaVX<+ZDgs;_TGq3%c=YTT(-pO2OA z{~QXM_%+nlud%oo^&PMewIdhp`F&KvA%ndEqfq@@n2A=OhIOg$iE2L*D|ZSt&&VemB3M}KW+6Ft-b;^;qRt@mX}b& zEcRam#8IKk(+)LISJVUh&%l?iUa5HM4Qq+WpPfQ%6sn_ZET1@Q&d9Vo7e6615|>+!@YL3QRB2i{(5uTBRk?c z`SxHv>J#l9i}#=s`NaGh8xntS@$Y8U5ng>TYJxaazjmm^Qmwu}DxoZM1V%8wGn#^S zU>2(5BGf=D&2{#Ci^av}hh{12jvPWwcm_4z4b)#gf1uu?@Ta|Ysi;H-VX)r+EDGu{ z(VUJ-q`+Kcu0$oa8MPDdTl@v;XpUQa9+l7)FdK!2iYe7&697Bpp+UhhY@1HcRdK4_HY3@2H6vjP?HMwGwq%_n;CjLmkPD zv97n0&~e^?NvH-xP#>9jsKh?9`U=!e`Hc6B#xUYE)C9v(iOoR0&MQ&l9k=?cSdBRF z8Sf}-xfFEP_048x0_skrSlk0O&>*XyifP0zqK;@kX5vY#ip{dU9f(DZ-_hdk=u14% z>fK=!l+grpiamJNEI_Sz5r*KqSOY&s4RjdQ|Cq&PsKmd+&iErL@rViD9q5Ezh%>OB z-v7lEbXFTN2zR0~{uDLAG1P!3QMdNI#lP9}yB1fS=#3MII*LZ9aT3imGt(SiDd#_t zLI4e)LrpLXwW2qyekCfw4Y&<=pb{H3$(tY>)jkh(IrFh4?m?~m67mK*53oBvJ=yyX z*o3b#zjKs=ws^o4?_aCauqN?rb14Q87om1&6Dpw{=#QV7M^Ov-25aC?7>K`__fd)0 z%<&AxdKw_iDw?7?Je*VVUgO7n{l?75jvSjauB0M5JEGR$+-c(+{feBDn>%^Nvn74g zo~Y)ZkQmu6p;O!BlC|ljet{L+=T;Q2xKdbp`QyzcSsBHRYF}BpqM~@I?+T{+Nx?Hip^Em>FhGo3#)l|NqSLcSFo(HsR`sx}EA*6hSWH57*bOysKMcY_SR03-CRm87SBjDNdsO?GsQ&(JLEf-?2IVfDJJ+!MQrv8a0tD48tK<3r8#O zk(fZD4NgThs6egcEz}A(V=(SQ?a;@liF}Fm@hGa@CDZ`7Q0>B+ImaIt&0ngYg?C`K znTwugTtGrAoQcYpBa3kBFbVf!Tl@*zVN)u#!23}vpN^XFtEhfnM-8+Nwer8ACU6KN z@C4qTaC7!wXLpN0IMzt?JB~uFGz-=4#Y} zHlf<@LQQn9M?xL{+X{}G7qAiWtEd@Aa1O081{Lp)TG<1rj*Cz`IK|>mqIT*TD_?>- z($%Qf@eyjG-T@N1B)-8Q%-}#@$1IG&uTd+ziXj-C>>G!gc%qqtT2Y4C#q4ViH49KX zKGDy6ZkiQ%s4bm`Iaq-@!wZ;)L9P7DGzwMkF;u;I$h~xjFc`064ZLOf8f;rl^0iQR zDa!JR7_Rr9LP9IM2ekv)sE)d0DE2{Z)gV;80@MH{=46XcMYSt4pS1E>mM=H|gkjWM zi~||pRgh4}-=og(GHONoC~*9)InKx4sPg@&9XNryOqa0%-a_4l1}Xm8C!toHiQ0i) zs09r)^U%}4V@YU*rKmHXj`i?)RQYOD!?mc_P>Jfe8rAU;)Xtqk?aXPsy&%+%Tt@YG z&GO-G{rZt@+5a#Kq6w&@rl^L=sD^i1#SB!(Ss0F;&3>r%c^HU=sEHO^{C7B%{9{(W z7sJW#L+#Yjw(P%Fa)E$$;D!~{N_8%sd<<$w2B4l(395rfQ3KCHt!#mnuS88?6Y^uk zZ9@%o7`4E&*cgArJF&LM-x{DJ>W9iu)Q4v=Cg9uH3ctV{ypGzTK570AR+f8pF?%{3##Ew)Lp32&hI!D zbtzLV-xYOagORE(AN3uXi~4}A!Mcp^c9O^@@EK}`b?@>gkbv5m)~JEIV;$^|x@;p* zcVsl`D2gxwCtLmrRQ)-q9a@U&?={o{wqcmw|A!{R z(3UpBNK8e=vym0KT+{@|U=$Xi7BC$<;uDyR?_;Rm{|OT6;2dhpenqY17HVY?8UCGU zjG9;$Y6ab~HTJXoG*mw`Q1#}c7O>2G)m({5#Mhvw275_phDT8o_!c$5dDM}2$YP{X;*?%SW5>Uf0t>8FnB4Rs&{zwrkJ_;eRQ){U9=kHsguJ(` z!ZGBxFc--524WJj+in`h;G3v{KEee2#^S;3vsN09I`d3qb6lb2U&1)@yHRI<5_Oc{ zA(zy1=Sk?6|AIPmeFk+GLQo%`NYpcnM}6blp(dDtTIo!jgiEbFAlrYQ!5B?E26ac$ zkO5s6EANLvdjAEt6Pz{bGM1ty^e{HUGOUM-P#vr>-$AYD1Jos}vUoM}P}~7bz_70V zMB89L@}03h&c#TNLS!6R!&O$Ed!KXsaYOh^ujED4rQL~> z@ig{Td3XQc3l+GC{AU=Cg}g-_^;13Af6Zhz0iDr8RDK0&N8Ysj7Sv9BXzoF+ct2`_ z-=f<82X$9&qIM{PCnFo8+Q*{?PDU*>wI};u9>9|zpo-gj`75u&VDfuV@qLy*X!);D zJ9G*);qw;%+5E2=*xRochN>5dI)YdX!B!p#-O@DF8TYh;e6s-6pu~K{d60qT;z zhWb)&MRi<_HSu%HAHrnv$1xRa_VJI#%Ouf)KnZG~`KS(FLcQY^n1Tnf9R~LGKR_9% zfk&X)O~vNuS$-9Uk>6$%G$G;_TTbrfMXG25WZvn=1+9BT1GjHP@E zhT#I#-C1V7j#_yoYGGfY7INx#ob#{O-@nD3P+L=sZE%sf1J&Rxs)LvTz6q#_C8KsC z%i{N=jwa8XXz?d7ocJ@?3|~;5@m-}A>@xp}8t4dWMJFwO!MtYH$noz^6sle;Gu^xw z)lV-B#~gE{#Y@rCxA`Fwx`gwsU?r--U(I*TD%4i*Lk)Pr;#bT-zRPNNhZ%>OKx?xd zs$G`lyA9<1YYY2WARjeA0qT9vKs9*I@-LzWcmws0H>28b!`iq9_0#eo>Ig2G!MXmY zw;}4?7vV}Q$z}gFVCen6El@M=iW;c5<%gQ1EMA0~*u!R-Im=vNE;Cn}Z=rscyo-8W z)gB359yiGUc^`v1ieVUmlaOzddmQ!YtU|5eBx-`+p(gwjYC^xFb|h%9AFqdsH@18W zRQnW*duf*FfKe25M|C*d;-k${)PPg0{7FD=)YBV)HfBrC*P+ z`uV?ygc_VSFQSg%XHTh-f(jyYJy`>^&UdCpCR@B zXOd8Za;vbwT#j1d8|Efdz3rCYX;!1|$bMAEF~j|K38(?vm|3W;?_v2o)c=2S#U!*P z`nek`&8&z1TKHw|N{ z@U-PC%(WKZf*N4I#gCvScE;k@Q4v_kDzI;x-S+j9QBtU`_z7wI-*{G_UQrdQ-Wk+HenahOz!<-Nq#28vNRrh1Pq#oeYGwmaJ2Axa z52HGI)bg`Y11&&)({`&Zej4@ME}|wHSK!x6!+7#Nu|5`~%FEDGN6(SyfU8ggAH_tx zhD|VTtRL@)dZxoM73ZKP_AaW!kFhZx!c4r3s^7ZMuRju1Z!W6eR|+}*3=&%jXd)Le z8-t4cUy-_-<5A^v@CB?u4V*U4|0h;9>JpAcEuai_Bug<7w_!6phAMZ({ueW@*z;#r zL_h@#QCqdn{1juzpGOT4Jl^l93Fj>mP-lAzJL45>fEg3~iS|Q%f=8kzFahb`bJIzv z!^cq_%(nc?eg(JM@@r8m-i$hm-KdVfHqV<^&EN<9@<>$w@u>b=p%&EXcAWFiCZUe| z;szXH6+$QamoXgm?|}iRl|F{NFt-?c;7`~RvnFw~aSUol&tP2)ne6{^8f`YiFyg6L zhvRlxBs77Z7>dKpF{l}rVlDh5hU2s5BGeJBFxQ~kuebbr7*2l2\n" "Language: zh\n" @@ -323,12 +323,13 @@ msgstr "书本语言" #: application/templates/adv_dict.html:49 #: application/templates/adv_dict.html:66 #: application/templates/book_translator.html:26 +#: application/templates/dict.html:47 msgid "Engine" msgstr "翻译引擎" #: application/templates/adv_dict.html:33 #: application/templates/adv_dict.html:55 -#: application/templates/adv_dict.html:72 +#: application/templates/adv_dict.html:72 application/templates/dict.html:53 msgid "Database" msgstr "数据库" @@ -336,7 +337,12 @@ msgstr "数据库" msgid "Other languages" msgstr "其他语言" -#: application/templates/adv_dict.html:84 application/view/reader.py:29 +#: application/templates/adv_dict.html:81 application/templates/dict.html:3 +#: application/templates/dict.html:63 +msgid "Word lookup" +msgstr "单词查询" + +#: application/templates/adv_dict.html:86 application/view/reader.py:29 #: application/view/reader.py:95 msgid "Online reading feature has not been activated yet." msgstr "在线阅读功能尚未被激活。" @@ -1105,6 +1111,7 @@ msgid "One key per line" msgstr "一行一码" #: application/templates/book_translator.html:40 +#: application/templates/dict.html:39 msgid "Source language" msgstr "源语言" @@ -1174,6 +1181,14 @@ msgstr "分享密钥" msgid "Confirm Change" msgstr "确认修改" +#: application/templates/debug_cmd.html:3 +msgid "Debug cmd" +msgstr "调试命令" + +#: application/templates/dict.html:59 +msgid "Word" +msgstr "单词" + #: application/templates/home.html:3 msgid "Home" msgstr "首页" @@ -1577,7 +1592,7 @@ msgstr "转发" msgid "Fwd as Attach" msgstr "作为附件转发" -#: application/view/admin.py:48 application/view/adv.py:425 +#: application/view/admin.py:48 application/view/adv.py:434 #: application/view/settings.py:76 application/view/translator.py:83 #: application/view/translator.py:167 msgid "Settings Saved!" @@ -1697,29 +1712,29 @@ msgstr "在每篇文章后附加文章链接的二维码" msgid "Unknown command: {}" msgstr "未知命令:{}" -#: application/view/adv.py:427 +#: application/view/adv.py:436 msgid "The format is invalid." msgstr "格式非法。" -#: application/view/adv.py:459 +#: application/view/adv.py:468 msgid "Authorization Error!
{}" msgstr "申请授权过程失败!
{}" -#: application/view/adv.py:480 +#: application/view/adv.py:489 msgid "Success authorized by Pocket!" msgstr "已经成功获得Pocket的授权!" -#: application/view/adv.py:486 +#: application/view/adv.py:495 msgid "" "Failed to request authorization of Pocket!
See details " "below:

{}" msgstr "申请Pocket授权失败!
错误信息参考如下:

{}" -#: application/view/adv.py:507 +#: application/view/adv.py:516 msgid "The Instapaper service encountered an error. Please try again later." msgstr "Instapaper服务器异常,请稍候再试。" -#: application/view/adv.py:520 +#: application/view/adv.py:529 msgid "Request type [{}] unsupported" msgstr "不支持你请求的命令类型 [{}]" @@ -1824,16 +1839,16 @@ msgstr "你可以点击下面的链接来重置你的KindleEar密码。" msgid "The article is missing?" msgstr "文章丢失了?" -#: application/view/reader.py:180 application/view/translator.py:116 +#: application/view/reader.py:195 application/view/translator.py:116 #: application/view/translator.py:200 msgid "The text is empty." msgstr "文本为空。" -#: application/view/reader.py:274 +#: application/view/reader.py:291 msgid "Failed to push: {}" msgstr "推送失败: {}" -#: application/view/reader.py:322 +#: application/view/reader.py:339 msgid "Failed to create ebook." msgstr "创建电子书失败。" @@ -1845,171 +1860,171 @@ msgstr "Kindle E-mail必须填写!" msgid "Title is requied!" msgstr "书籍标题不能为空!" -#: application/view/settings.py:174 +#: application/view/settings.py:216 msgid "English" msgstr "英语" -#: application/view/settings.py:175 +#: application/view/settings.py:217 msgid "Chinese" msgstr "中文" -#: application/view/settings.py:176 +#: application/view/settings.py:218 msgid "French" msgstr "法语" -#: application/view/settings.py:177 +#: application/view/settings.py:219 msgid "Spanish" msgstr "西班牙语" -#: application/view/settings.py:178 +#: application/view/settings.py:220 msgid "Portuguese" msgstr "葡萄牙语" -#: application/view/settings.py:179 +#: application/view/settings.py:221 msgid "German" msgstr "德语" -#: application/view/settings.py:180 +#: application/view/settings.py:222 msgid "Italian" msgstr "意大利语" -#: application/view/settings.py:181 +#: application/view/settings.py:223 msgid "Japanese" msgstr "日语" -#: application/view/settings.py:182 +#: application/view/settings.py:224 msgid "Russian" msgstr "俄语" -#: application/view/settings.py:183 +#: application/view/settings.py:225 msgid "Turkish" msgstr "土耳其语" -#: application/view/settings.py:184 +#: application/view/settings.py:226 msgid "Korean" msgstr "韩语" -#: application/view/settings.py:185 +#: application/view/settings.py:227 msgid "Arabic" msgstr "阿拉伯语" -#: application/view/settings.py:186 +#: application/view/settings.py:228 msgid "Czech" msgstr "捷克语" -#: application/view/settings.py:187 +#: application/view/settings.py:229 msgid "Dutch" msgstr "荷兰语" -#: application/view/settings.py:188 +#: application/view/settings.py:230 msgid "Greek" msgstr "希腊语" -#: application/view/settings.py:189 +#: application/view/settings.py:231 msgid "Hindi" msgstr "印地语" -#: application/view/settings.py:190 +#: application/view/settings.py:232 msgid "Malaysian" msgstr "马来西亚语" -#: application/view/settings.py:191 +#: application/view/settings.py:233 msgid "Bengali" msgstr "孟加拉语" -#: application/view/settings.py:192 +#: application/view/settings.py:234 msgid "Persian" msgstr "波斯语" -#: application/view/settings.py:193 +#: application/view/settings.py:235 msgid "Urdu" msgstr "乌尔都语" -#: application/view/settings.py:194 +#: application/view/settings.py:236 msgid "Swahili" msgstr "斯瓦希里语" -#: application/view/settings.py:195 +#: application/view/settings.py:237 msgid "Vietnamese" msgstr "越南语" -#: application/view/settings.py:196 +#: application/view/settings.py:238 msgid "Punjabi" msgstr "旁遮普语" -#: application/view/settings.py:197 +#: application/view/settings.py:239 msgid "Javanese" msgstr "爪哇语" -#: application/view/settings.py:198 +#: application/view/settings.py:240 msgid "Tagalog" msgstr "他加禄语" -#: application/view/settings.py:199 +#: application/view/settings.py:241 msgid "Hausa" msgstr "豪萨语" -#: application/view/settings.py:200 +#: application/view/settings.py:242 msgid "Thai" msgstr "泰语" -#: application/view/settings.py:201 +#: application/view/settings.py:243 msgid "Polish" msgstr "波兰语" -#: application/view/settings.py:202 +#: application/view/settings.py:244 msgid "Romanian" msgstr "罗马尼亚语" -#: application/view/settings.py:203 +#: application/view/settings.py:245 msgid "Hungarian" msgstr "匈牙利语" -#: application/view/settings.py:204 +#: application/view/settings.py:246 msgid "Swedish" msgstr "瑞典语" -#: application/view/settings.py:205 +#: application/view/settings.py:247 msgid "Hebrew" msgstr "希伯来语" -#: application/view/settings.py:206 +#: application/view/settings.py:248 msgid "Norwegian" msgstr "挪威语" -#: application/view/settings.py:207 +#: application/view/settings.py:249 msgid "Finnish" msgstr "芬兰语" -#: application/view/settings.py:208 +#: application/view/settings.py:250 msgid "Danish" msgstr "丹麦语" -#: application/view/settings.py:209 +#: application/view/settings.py:251 msgid "Ukrainian" msgstr "乌克兰语" -#: application/view/settings.py:210 +#: application/view/settings.py:252 msgid "Tamil" msgstr "泰米尔语" -#: application/view/settings.py:211 +#: application/view/settings.py:253 msgid "Marathi" msgstr "马拉地语" -#: application/view/settings.py:212 +#: application/view/settings.py:254 msgid "Burmese" msgstr "缅甸语" -#: application/view/settings.py:213 +#: application/view/settings.py:255 msgid "Amharic" msgstr "阿姆哈拉语" -#: application/view/settings.py:214 +#: application/view/settings.py:256 msgid "Azerbaijani" msgstr "阿塞拜疆语" -#: application/view/settings.py:215 +#: application/view/settings.py:257 msgid "Kazakh" msgstr "哈萨克语" @@ -2109,3 +2124,4 @@ msgstr "此Recipe的网站登录信息已经保存。" msgid "The api key is required." msgstr "需要填写api key." + diff --git a/application/view/reader.py b/application/view/reader.py index 18f2756b..8b6c6e66 100644 --- a/application/view/reader.py +++ b/application/view/reader.py @@ -236,10 +236,10 @@ def ReaderDictPost(user: KeUser, userDir: str): word = stem definition = inst.definition(word, language) #再次查询 except Exception as e: - import traceback - traceback.print_exc() - definition = f'Error: {e}' - print(json.dumps(definition)) #TODO + #import traceback + #traceback.print_exc() + definition = f'Error:
{e}' + #print(json.dumps(definition)) #TODO return {'status': 'ok', 'word': word, 'definition': definition, 'dictname': str(inst), 'others': others} @@ -250,8 +250,8 @@ def GetWordStem(word, language): import dictionary import hunspell #type:ignore except Exception as e: - import traceback #TODO - default_log.warning(traceback.format_exc()) + #import traceback #TODO + #default_log.warning(traceback.format_exc()) return '' dictDir = app.config['DICTIONARY_DIR'] or '' diff --git a/docker/Dockerfile b/docker/Dockerfile index 0e7f298a..923c8826 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -28,8 +28,8 @@ COPY --from=builder /usr/kindleear/config.py /usr/kindleear/requirements.txt ./ COPY --from=builder /usr/kindleear/tools/mp3cat /usr/local/bin/mp3cat COPY ./docker/gunicorn.conf.py ./main.py ./ -RUN apk add libstdc++ && \ - pip install --upgrade pip && \ +#apk add libstdc++ && \ +RUN pip install --upgrade pip && \ pip install --no-cache-dir -r requirements.txt && \ chmod +x /usr/local/bin/mp3cat diff --git a/docs/Chinese/reader.md b/docs/Chinese/reader.md index dec5c763..51075376 100644 --- a/docs/Chinese/reader.md +++ b/docs/Chinese/reader.md @@ -47,9 +47,14 @@ KindleEar支持邮件推送和在线阅读,内置一个为电子墨水屏进 ### 安装词典 1. KindleEar支持在线词典 [dict.org](https://dict.org/), [dict.cc](https://www.dict.cc/), [dict.cn](http://dict.cn/), [韦氏词典](https://www.merriam-webster.com/),[牛津词典](https://www.oxfordlearnersdictionaries.com/), 这几个词典不需要安装,开箱即用。 2. 在线词典很方便,但是避免有时候因为网络原因不是太稳定,所以如果要稳定使用,最好还是使用离线词典,为此,KindleEar同时支持 mdict/stardict 格式词典,下载对应的词典后,解压到 `data/dict` 目录(可以使用子目录整理不同的词典)。 -3. 离线词典第一次查词会比较慢,因为要创建索引文件(后缀为trie),特别是针对大词典,不建议在KindleEar使用大词典。 -4. 已经默认支持美式英语的构词法规则,可以查询单词时态语态复数等变形,如果需要支持其他语种的构词法,请下载对应的hunspell格式的文件(.dic/.aff),然后拷贝到 `data/dict/morphology` ,注意不要存放到子目录下,KindleEar会自动使用和书本语言相匹配的构词法规则。 -至于到哪里下载hunspell/myspell构词法文件,可以到github/sourceforge等网站上搜索。额外的,这里有一个 [链接](https://sourceforge.net/projects/goldendict/files/better%20morphologies/1.0/) +3. 离线词典第一次查词会比较慢,因为要创建索引文件(后缀为trie),之后就很快了。 +如果要使用大型词典,在生成索引的过程中会消耗比较多的内存,如你的服务器内存比较小,可能会创建索引失败,你可以在你的本地机器先使用对应词典查一次单词,待本地生成trie文件后,拷贝到服务器对应目录即可。 +4. 已经默认支持美式英语的构词法规则,可以查询单词时态语态复数等变形,如果需要支持其他语种的构词法,请下载对应的hunspell格式的文件(.dic/.aff),然后拷贝到 `data/dict/morphology` (请直接创建此目录) ,注意不要存放到子目录下,KindleEar会自动使用和书本语言相匹配的构词法规则。 +至于到哪里下载Hunspell/MySpell构词法文件,可以到github/sourceforge等网站上搜索,下面是几个直链。 +[LibreOffice](https://github.com/LibreOffice/dictionaries) +[Firefox](https://addons.mozilla.org/en-US/firefox/language-tools/) +[sztaki](http://hlt.sztaki.hu/resources/hunspell/) +[wooorm](https://github.com/wooorm/dictionaries) ### 使用词典 diff --git a/docs/English/reader.md b/docs/English/reader.md index bdf16e86..56b7c9ee 100644 --- a/docs/English/reader.md +++ b/docs/English/reader.md @@ -53,12 +53,17 @@ The extracted word is sent to your deployed KindleEar site for translation, and 2. KindleEar also supports offline dictionaries in the stardict format. After downloading the corresponding dictionary, unzip it into the `data/dict` directory. You can organize different dictionaries into subdirectories. Then, restart the KindleEar service to refresh the dictionary list. -3. The first time you look up a word in the offline dictionary may be slow because it needs to create an index file (suffix: trie), especially for large dictionaries. It is not recommended to use large dictionaries in KindleEar. +3. The first time you look up a word in the offline dictionary, it may be slow because it needs to create an index file (suffix: trie), After that, it will be much faster. +If you are using a large dictionary, the indexing process will consume a significant amount of memory. If the server has limited memory, the indexing might fail. You can first use the dictionary on your local machine to look up a word and generate the "trie" file, then copy it to the corresponding directory on the server. -4. By default, American English morphology queries are supported. -If you need to support morphology rules for other languages, please download the corresponding Hunspell format files (.dic/.aff), and then copy them to `data/dict/morphology`. Be careful not to store them in a subdirectory. +4. By default, American English morphology queries are supported (tense, voice, plural etc.). +If you need to support morphology rules for other languages, please download the corresponding Hunspell format files (.dic/.aff), and then copy them to `data/dict/morphology` (create it if not exists). Be careful not to store them in a subdirectory. KindleEar will automatically use the morphology rules that match the book's language. -As for where to download Hunspell/MySpell morphology files, you can search on websites such as GitHub or SourceForge. Additionally, here is a [link](https://sourceforge.net/projects/goldendict/files/better%20morphologies/1.0/) you can check. +As for where to download Hunspell/MySpell morphology files, you can search on websites such as GitHub or SourceForge. +[LibreOffice](https://github.com/LibreOffice/dictionaries) +[Firefox](https://addons.mozilla.org/en-US/firefox/language-tools/) +[sztaki](http://hlt.sztaki.hu/resources/hunspell/) +[wooorm](https://github.com/wooorm/dictionaries) ### Using Dictionaries diff --git a/main.py b/main.py index fcc8f89b..bc08a6c5 100644 --- a/main.py +++ b/main.py @@ -4,7 +4,7 @@ # Visit for the latest version # Author: cdhigh -__Version__ = '3.1.0' +__Version__ = '3.1.1' import os, sys, builtins, logging from application.lib import clogging diff --git a/tools/update_req.py b/tools/update_req.py index 0daa73b2..78d8d70c 100644 --- a/tools/update_req.py +++ b/tools/update_req.py @@ -56,8 +56,8 @@ def new_secret_key(length=12): REQ_PLAT = {'gae': [('appengine-python-standard', '~=1.1.6'), ('google-cloud-texttospeech', '~=2.16.3')], - 'docker': [('chunspell', '~=2.0.3'), ('marisa_trie', '~=1.2.0'), ('indexed-gzip', '~=1.8.7')], #docker/amd64 basic libs - 'dockerArm': [('chunspell', '~=2.0.3'), ('marisa_trie', '~=1.2.0'), ('indexed-gzip', '~=1.8.7')], #docker/arm64 basic libs + 'docker': [('chunspell', '~=2.0.4'), ('marisa_trie', '~=1.2.0'), ('indexed-gzip', '~=1.8.7')], #docker/amd64 basic libs + 'dockerArm': [('chunspell', '~=2.0.4'), ('marisa_trie', '~=1.2.0'), ('indexed-gzip', '~=1.8.7')], #docker/arm64 basic libs 'dockerAll': [('weedata', '>=0.2.7,<1.0.0'),('pymysql', '~=1.1.0'), #docker[all] install all libs ('psycopg2-binary', '~=2.9.9'),('pymongo', '~=4.6.3'),('redis', '~=5.0.3'), ('celery', '~=5.3.6'),('flask-rq2', '~=18.3'),('sqlalchemy', '~=2.0.29')],