Add functionality to search own directory;

Fix file check (Thank you roughnecks).
This commit is contained in:
Schimon Jehudah, Adv. 2024-09-05 14:58:14 +03:00
parent 21e3aa34aa
commit c31278b576

114
blasta.py
View file

@ -52,7 +52,7 @@ DBLOCK = Lock()
class Data:
def cache_items_and_tags(entries, jid, tag=None):
def cache_items_and_tags_search(entries, jid, query):
"""Create a cache file of node items and tags."""
item_ids = []
tags = {}
@ -60,13 +60,7 @@ class Data:
entry_tags = entry['tags']
entry_url_hash = entry['url_hash']
tags_to_include = []
if tag:
if tag in entry_tags:
item_ids.append(entry_url_hash)
tags_to_include += entry_tags
for tag_to_include in tags_to_include:
tags[tag_to_include] = tags[tag_to_include]+1 if tag_to_include in tags else 1
else:
if query in entry_tags or query in ' '.join([entry['title'], entry['link'], entry['summary']]):
item_ids.append(entry_url_hash)
tags_to_include += entry_tags
for tag_to_include in tags_to_include:
@ -74,20 +68,60 @@ class Data:
if tags:
tags = dict(sorted(tags.items(), key=lambda item: (-item[1], item[0])))
tags = dict(list(tags.items())[:30])
if tag: del tags[tag]
if item_ids:
filename = 'data/{}_query.toml'.format(jid)
data = {
'item_ids' : item_ids,
'tags' : tags}
Data.save_to_toml(filename, data)
def cache_items_and_tags_filter(entries, jid, tag):
"""Create a cache file of node items and tags."""
item_ids = []
tags = {}
for entry in entries:
entry_tags = entry['tags']
entry_url_hash = entry['url_hash']
tags_to_include = []
if tag in entry_tags:
item_ids.append(entry_url_hash)
tags_to_include += entry_tags
for tag_to_include in tags_to_include:
tags[tag_to_include] = tags[tag_to_include]+1 if tag_to_include in tags else 1
if tags:
tags = dict(sorted(tags.items(), key=lambda item: (-item[1], item[0])))
tags = dict(list(tags.items())[:30])
del tags[tag]
if item_ids:
directory = 'data/{}/'.format(jid)
if not exists(directory):
mkdir(directory)
if tag:
filename = 'data/{}/{}.toml'.format(jid, tag)
filename = 'data/{}/{}.toml'.format(jid, tag)
# Add support for search query
#if tag:
# filename = 'data/{}/query:{}.toml'.format(jid, query)
#if tag:
# filename = 'data/{}/tag:{}.toml'.format(jid, tag)
else:
filename = 'data/{}.toml'.format(jid)
#filename = 'data/{}/query:{}.toml'.format(jid, query)
#filename = 'data/{}/tag:{}.toml'.format(jid, tag)
data = {
'item_ids' : item_ids,
'tags' : tags}
Data.save_to_toml(filename, data)
def cache_items_and_tags(entries, jid):
"""Create a cache file of node items and tags."""
item_ids = []
tags = {}
for entry in entries:
entry_tags = entry['tags']
entry_url_hash = entry['url_hash']
tags_to_include = []
item_ids.append(entry_url_hash)
tags_to_include += entry_tags
for tag_to_include in tags_to_include:
tags[tag_to_include] = tags[tag_to_include]+1 if tag_to_include in tags else 1
if tags:
tags = dict(sorted(tags.items(), key=lambda item: (-item[1], item[0])))
tags = dict(list(tags.items())[:30])
if item_ids:
filename = 'data/{}.toml'.format(jid)
data = {
'item_ids' : item_ids,
'tags' : tags}
@ -787,20 +821,48 @@ class HttpInstance:
# NOTE Does it work?
# It does not seem to actually filter tags.
# NOTE Yes. It does work.
# See function "cache_items_and_tags".
# TODO Search by query
#if param_query:
if param_tags or param_tld or param_filetype or param_protocol:
# See function "cache_items_and_tags_filter".
if param_query:
query = param_query
entries_cache = Data.open_file_toml(filename_items)
entries_cache_node = entries_cache[node_type]
filename_cache = 'data/{}_query.toml'.format(jid)
Data.cache_items_and_tags_search(entries_cache_node, jid, query)
if exists(filename_cache) and getsize(filename_cache):
data = Data.open_file_toml(filename_cache)
item_ids_all = data['item_ids']
related_tags = data['tags']
if len(item_ids_all) <= index_last:
index_last = len(item_ids_all)
page_next = None
item_ids_selection = []
for item_id in item_ids_all[index_first:index_last]:
item_ids_selection.append(item_id)
entries = []
for entry in entries_cache_node:
for item_id in item_ids_selection:
if entry['url_hash'] == item_id:
entries.append(entry)
for entry in entries:
entry['published_mod'] = Utilities.convert_iso8601_to_readable(entry['published'])
entry['tags'] = entry['tags'][:5]
description = 'Your {} bookmarks with "{}"'.format(node_type, query)
message = 'Listing {} bookmarks {} - {} out of {}.'.format(node_type, index_first+1, index_last, len(item_ids_all))
#item_id_next = entries[len(entries)-1]
else:
description = 'No {} bookmarks with "{}" were found for {}'.format(node_type, query, jid)
message = 'Blasta system message » No entries.'
page_next = None
page_prev = None
elif param_tags or param_tld or param_filetype or param_protocol:
tags_list = param_tags.split('+')
if len(tags_list) == 1:
tag = param_tags
entries_cache = Data.open_file_toml(filename_items)
entries_cache_node = entries_cache[node_type]
filename_cache = 'data/{}/{}.toml'.format(jid, tag)
Data.cache_items_and_tags(entries_cache_node, jid, tag)
if exists(filename_cache) or getsize(filename_cache):
Data.cache_items_and_tags_filter(entries_cache_node, jid, tag)
if exists(filename_cache) and getsize(filename_cache):
data = Data.open_file_toml(filename_cache)
item_ids_all = data['item_ids']
related_tags = data['tags']
@ -836,7 +898,7 @@ class HttpInstance:
filename_cache = 'data/{}.toml'.format(jid)
#if len(entries_cache_node) and not exists(filename_cache):
Data.cache_items_and_tags(entries_cache_node, jid)
if exists(filename_cache) or getsize(filename_cache):
if exists(filename_cache) and getsize(filename_cache):
data = Data.open_file_toml(filename_cache)
item_ids_all = data['item_ids']
related_tags = data['tags']