forked from sch/Blasta
Add functionality to search own directory;
Fix file check (Thank you roughnecks).
This commit is contained in:
parent
21e3aa34aa
commit
c31278b576
1 changed files with 88 additions and 26 deletions
114
blasta.py
114
blasta.py
|
@ -52,7 +52,7 @@ DBLOCK = Lock()
|
||||||
|
|
||||||
class Data:
|
class Data:
|
||||||
|
|
||||||
def cache_items_and_tags(entries, jid, tag=None):
|
def cache_items_and_tags_search(entries, jid, query):
|
||||||
"""Create a cache file of node items and tags."""
|
"""Create a cache file of node items and tags."""
|
||||||
item_ids = []
|
item_ids = []
|
||||||
tags = {}
|
tags = {}
|
||||||
|
@ -60,13 +60,7 @@ class Data:
|
||||||
entry_tags = entry['tags']
|
entry_tags = entry['tags']
|
||||||
entry_url_hash = entry['url_hash']
|
entry_url_hash = entry['url_hash']
|
||||||
tags_to_include = []
|
tags_to_include = []
|
||||||
if tag:
|
if query in entry_tags or query in ' '.join([entry['title'], entry['link'], entry['summary']]):
|
||||||
if tag in entry_tags:
|
|
||||||
item_ids.append(entry_url_hash)
|
|
||||||
tags_to_include += entry_tags
|
|
||||||
for tag_to_include in tags_to_include:
|
|
||||||
tags[tag_to_include] = tags[tag_to_include]+1 if tag_to_include in tags else 1
|
|
||||||
else:
|
|
||||||
item_ids.append(entry_url_hash)
|
item_ids.append(entry_url_hash)
|
||||||
tags_to_include += entry_tags
|
tags_to_include += entry_tags
|
||||||
for tag_to_include in tags_to_include:
|
for tag_to_include in tags_to_include:
|
||||||
|
@ -74,20 +68,60 @@ class Data:
|
||||||
if tags:
|
if tags:
|
||||||
tags = dict(sorted(tags.items(), key=lambda item: (-item[1], item[0])))
|
tags = dict(sorted(tags.items(), key=lambda item: (-item[1], item[0])))
|
||||||
tags = dict(list(tags.items())[:30])
|
tags = dict(list(tags.items())[:30])
|
||||||
if tag: del tags[tag]
|
if item_ids:
|
||||||
|
filename = 'data/{}_query.toml'.format(jid)
|
||||||
|
data = {
|
||||||
|
'item_ids' : item_ids,
|
||||||
|
'tags' : tags}
|
||||||
|
Data.save_to_toml(filename, data)
|
||||||
|
|
||||||
|
def cache_items_and_tags_filter(entries, jid, tag):
|
||||||
|
"""Create a cache file of node items and tags."""
|
||||||
|
item_ids = []
|
||||||
|
tags = {}
|
||||||
|
for entry in entries:
|
||||||
|
entry_tags = entry['tags']
|
||||||
|
entry_url_hash = entry['url_hash']
|
||||||
|
tags_to_include = []
|
||||||
|
if tag in entry_tags:
|
||||||
|
item_ids.append(entry_url_hash)
|
||||||
|
tags_to_include += entry_tags
|
||||||
|
for tag_to_include in tags_to_include:
|
||||||
|
tags[tag_to_include] = tags[tag_to_include]+1 if tag_to_include in tags else 1
|
||||||
|
if tags:
|
||||||
|
tags = dict(sorted(tags.items(), key=lambda item: (-item[1], item[0])))
|
||||||
|
tags = dict(list(tags.items())[:30])
|
||||||
|
del tags[tag]
|
||||||
if item_ids:
|
if item_ids:
|
||||||
directory = 'data/{}/'.format(jid)
|
directory = 'data/{}/'.format(jid)
|
||||||
if not exists(directory):
|
if not exists(directory):
|
||||||
mkdir(directory)
|
mkdir(directory)
|
||||||
if tag:
|
filename = 'data/{}/{}.toml'.format(jid, tag)
|
||||||
filename = 'data/{}/{}.toml'.format(jid, tag)
|
|
||||||
# Add support for search query
|
# Add support for search query
|
||||||
#if tag:
|
#filename = 'data/{}/query:{}.toml'.format(jid, query)
|
||||||
# filename = 'data/{}/query:{}.toml'.format(jid, query)
|
#filename = 'data/{}/tag:{}.toml'.format(jid, tag)
|
||||||
#if tag:
|
data = {
|
||||||
# filename = 'data/{}/tag:{}.toml'.format(jid, tag)
|
'item_ids' : item_ids,
|
||||||
else:
|
'tags' : tags}
|
||||||
filename = 'data/{}.toml'.format(jid)
|
Data.save_to_toml(filename, data)
|
||||||
|
|
||||||
|
def cache_items_and_tags(entries, jid):
|
||||||
|
"""Create a cache file of node items and tags."""
|
||||||
|
item_ids = []
|
||||||
|
tags = {}
|
||||||
|
for entry in entries:
|
||||||
|
entry_tags = entry['tags']
|
||||||
|
entry_url_hash = entry['url_hash']
|
||||||
|
tags_to_include = []
|
||||||
|
item_ids.append(entry_url_hash)
|
||||||
|
tags_to_include += entry_tags
|
||||||
|
for tag_to_include in tags_to_include:
|
||||||
|
tags[tag_to_include] = tags[tag_to_include]+1 if tag_to_include in tags else 1
|
||||||
|
if tags:
|
||||||
|
tags = dict(sorted(tags.items(), key=lambda item: (-item[1], item[0])))
|
||||||
|
tags = dict(list(tags.items())[:30])
|
||||||
|
if item_ids:
|
||||||
|
filename = 'data/{}.toml'.format(jid)
|
||||||
data = {
|
data = {
|
||||||
'item_ids' : item_ids,
|
'item_ids' : item_ids,
|
||||||
'tags' : tags}
|
'tags' : tags}
|
||||||
|
@ -787,20 +821,48 @@ class HttpInstance:
|
||||||
# NOTE Does it work?
|
# NOTE Does it work?
|
||||||
# It does not seem to actually filter tags.
|
# It does not seem to actually filter tags.
|
||||||
# NOTE Yes. It does work.
|
# NOTE Yes. It does work.
|
||||||
# See function "cache_items_and_tags".
|
# See function "cache_items_and_tags_filter".
|
||||||
|
if param_query:
|
||||||
# TODO Search by query
|
query = param_query
|
||||||
#if param_query:
|
entries_cache = Data.open_file_toml(filename_items)
|
||||||
|
entries_cache_node = entries_cache[node_type]
|
||||||
if param_tags or param_tld or param_filetype or param_protocol:
|
filename_cache = 'data/{}_query.toml'.format(jid)
|
||||||
|
Data.cache_items_and_tags_search(entries_cache_node, jid, query)
|
||||||
|
if exists(filename_cache) and getsize(filename_cache):
|
||||||
|
data = Data.open_file_toml(filename_cache)
|
||||||
|
item_ids_all = data['item_ids']
|
||||||
|
related_tags = data['tags']
|
||||||
|
if len(item_ids_all) <= index_last:
|
||||||
|
index_last = len(item_ids_all)
|
||||||
|
page_next = None
|
||||||
|
item_ids_selection = []
|
||||||
|
for item_id in item_ids_all[index_first:index_last]:
|
||||||
|
item_ids_selection.append(item_id)
|
||||||
|
entries = []
|
||||||
|
for entry in entries_cache_node:
|
||||||
|
for item_id in item_ids_selection:
|
||||||
|
if entry['url_hash'] == item_id:
|
||||||
|
entries.append(entry)
|
||||||
|
for entry in entries:
|
||||||
|
entry['published_mod'] = Utilities.convert_iso8601_to_readable(entry['published'])
|
||||||
|
entry['tags'] = entry['tags'][:5]
|
||||||
|
description = 'Your {} bookmarks with "{}"'.format(node_type, query)
|
||||||
|
message = 'Listing {} bookmarks {} - {} out of {}.'.format(node_type, index_first+1, index_last, len(item_ids_all))
|
||||||
|
#item_id_next = entries[len(entries)-1]
|
||||||
|
else:
|
||||||
|
description = 'No {} bookmarks with "{}" were found for {}'.format(node_type, query, jid)
|
||||||
|
message = 'Blasta system message » No entries.'
|
||||||
|
page_next = None
|
||||||
|
page_prev = None
|
||||||
|
elif param_tags or param_tld or param_filetype or param_protocol:
|
||||||
tags_list = param_tags.split('+')
|
tags_list = param_tags.split('+')
|
||||||
if len(tags_list) == 1:
|
if len(tags_list) == 1:
|
||||||
tag = param_tags
|
tag = param_tags
|
||||||
entries_cache = Data.open_file_toml(filename_items)
|
entries_cache = Data.open_file_toml(filename_items)
|
||||||
entries_cache_node = entries_cache[node_type]
|
entries_cache_node = entries_cache[node_type]
|
||||||
filename_cache = 'data/{}/{}.toml'.format(jid, tag)
|
filename_cache = 'data/{}/{}.toml'.format(jid, tag)
|
||||||
Data.cache_items_and_tags(entries_cache_node, jid, tag)
|
Data.cache_items_and_tags_filter(entries_cache_node, jid, tag)
|
||||||
if exists(filename_cache) or getsize(filename_cache):
|
if exists(filename_cache) and getsize(filename_cache):
|
||||||
data = Data.open_file_toml(filename_cache)
|
data = Data.open_file_toml(filename_cache)
|
||||||
item_ids_all = data['item_ids']
|
item_ids_all = data['item_ids']
|
||||||
related_tags = data['tags']
|
related_tags = data['tags']
|
||||||
|
@ -836,7 +898,7 @@ class HttpInstance:
|
||||||
filename_cache = 'data/{}.toml'.format(jid)
|
filename_cache = 'data/{}.toml'.format(jid)
|
||||||
#if len(entries_cache_node) and not exists(filename_cache):
|
#if len(entries_cache_node) and not exists(filename_cache):
|
||||||
Data.cache_items_and_tags(entries_cache_node, jid)
|
Data.cache_items_and_tags(entries_cache_node, jid)
|
||||||
if exists(filename_cache) or getsize(filename_cache):
|
if exists(filename_cache) and getsize(filename_cache):
|
||||||
data = Data.open_file_toml(filename_cache)
|
data = Data.open_file_toml(filename_cache)
|
||||||
item_ids_all = data['item_ids']
|
item_ids_all = data['item_ids']
|
||||||
related_tags = data['tags']
|
related_tags = data['tags']
|
||||||
|
|
Loading…
Reference in a new issue