2024-01-04 02:16:24 +01:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2024-01-06 23:03:08 +01:00
|
|
|
"""
|
|
|
|
|
|
|
|
TODO
|
|
|
|
|
2024-01-10 21:06:56 +01:00
|
|
|
1) Function scan at "for entry in entries"
|
|
|
|
Suppress directly calling function "add_entry" (accept db_file)
|
|
|
|
Pass a list of valid entries to a new function "add_entries"
|
|
|
|
(accept db_file) which would call function "add_entry" (accept cur).
|
|
|
|
* accelerate adding of large set of entries at once.
|
|
|
|
* prevent (or mitigate halt of consequent actions).
|
|
|
|
* reduce I/O.
|
|
|
|
|
|
|
|
2) Call sqlite function from function statistics.
|
2024-01-06 23:03:08 +01:00
|
|
|
Returning a list of values doesn't' seem to be a good practice.
|
|
|
|
|
2024-01-14 19:05:12 +01:00
|
|
|
3) Special statistics for operator:
|
|
|
|
* Size of database(s);
|
|
|
|
* Amount of JIDs subscribed;
|
|
|
|
* Amount of feeds of all JIDs;
|
|
|
|
* Amount of entries of all JIDs.
|
|
|
|
|
2024-01-06 23:03:08 +01:00
|
|
|
"""
|
|
|
|
|
2024-03-06 13:52:07 +01:00
|
|
|
# import asyncio
|
2024-01-04 02:16:24 +01:00
|
|
|
from asyncio.exceptions import IncompleteReadError
|
|
|
|
from bs4 import BeautifulSoup
|
|
|
|
from feedparser import parse
|
2024-04-14 14:56:45 +02:00
|
|
|
import hashlib
|
2024-01-20 18:28:31 +01:00
|
|
|
from http.client import IncompleteRead
|
|
|
|
import json
|
2024-03-03 15:13:01 +01:00
|
|
|
from slixfeed.log import Logger
|
2024-01-09 23:36:16 +01:00
|
|
|
from lxml import html
|
2024-01-23 15:37:10 +01:00
|
|
|
import os
|
2024-01-04 02:16:24 +01:00
|
|
|
import slixfeed.config as config
|
2024-03-06 13:52:07 +01:00
|
|
|
from slixfeed.config import Config
|
2024-01-04 02:16:24 +01:00
|
|
|
import slixfeed.crawl as crawl
|
2024-02-07 01:26:42 +01:00
|
|
|
import slixfeed.dt as dt
|
2024-01-04 02:16:24 +01:00
|
|
|
import slixfeed.fetch as fetch
|
|
|
|
import slixfeed.sqlite as sqlite
|
2024-01-06 23:03:08 +01:00
|
|
|
from slixfeed.url import (
|
2024-01-09 23:36:16 +01:00
|
|
|
complete_url,
|
2024-01-06 23:03:08 +01:00
|
|
|
join_url,
|
|
|
|
remove_tracking_parameters,
|
|
|
|
replace_hostname,
|
|
|
|
trim_url
|
|
|
|
)
|
2024-02-06 04:04:43 +01:00
|
|
|
import slixfeed.task as task
|
|
|
|
from slixfeed.xmpp.bookmark import XmppBookmark
|
2024-03-27 17:20:32 +01:00
|
|
|
from slixfeed.xmpp.muc import XmppGroupchat
|
2024-03-26 17:23:22 +01:00
|
|
|
from slixfeed.xmpp.iq import XmppIQ
|
2024-02-06 04:04:43 +01:00
|
|
|
from slixfeed.xmpp.message import XmppMessage
|
2024-02-07 01:26:42 +01:00
|
|
|
from slixfeed.xmpp.presence import XmppPresence
|
2024-03-26 17:23:22 +01:00
|
|
|
from slixfeed.xmpp.publish import XmppPubsub
|
2024-02-07 01:26:42 +01:00
|
|
|
from slixfeed.xmpp.upload import XmppUpload
|
|
|
|
from slixfeed.xmpp.utility import get_chat_type
|
2024-04-14 14:56:45 +02:00
|
|
|
from slixmpp.xmlstream import ET
|
2024-03-03 15:13:01 +01:00
|
|
|
import sys
|
2024-01-04 02:16:24 +01:00
|
|
|
from urllib import error
|
2024-01-13 18:17:43 +01:00
|
|
|
from urllib.parse import parse_qs, urlsplit
|
2024-04-14 14:56:45 +02:00
|
|
|
import xml.etree.ElementTree as ETR
|
2024-01-06 23:03:08 +01:00
|
|
|
|
2024-03-09 20:03:18 +01:00
|
|
|
try:
|
|
|
|
import tomllib
|
|
|
|
except:
|
|
|
|
import tomli as tomllib
|
|
|
|
|
2024-03-03 15:13:01 +01:00
|
|
|
logger = Logger(__name__)
|
|
|
|
|
2024-01-24 21:28:14 +01:00
|
|
|
try:
|
|
|
|
import xml2epub
|
2024-02-06 04:04:43 +01:00
|
|
|
except ImportError:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error('Package xml2epub was not found.\n'
|
|
|
|
'ePUB support is disabled.')
|
2024-01-24 21:28:14 +01:00
|
|
|
|
2024-01-10 21:06:56 +01:00
|
|
|
try:
|
|
|
|
import html2text
|
2024-02-06 04:04:43 +01:00
|
|
|
except ImportError:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error('Package html2text was not found.\n'
|
|
|
|
'Markdown support is disabled.')
|
2024-01-10 21:06:56 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
import pdfkit
|
2024-02-06 04:04:43 +01:00
|
|
|
except ImportError:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error('Package pdfkit was not found.\n'
|
|
|
|
'PDF support is disabled.')
|
2024-01-10 21:06:56 +01:00
|
|
|
|
2024-01-10 22:20:02 +01:00
|
|
|
try:
|
|
|
|
from readability import Document
|
2024-02-06 04:04:43 +01:00
|
|
|
except ImportError:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error('Package readability was not found.\n'
|
|
|
|
'Arc90 Lab algorithm is disabled.')
|
2024-01-10 22:20:02 +01:00
|
|
|
|
2024-02-10 18:53:53 +01:00
|
|
|
|
2024-03-26 18:49:16 +01:00
|
|
|
def export_feeds(self, jid, jid_file, ext):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: jid: {}: jid_file: {}: ext: {}'.format(function_name, jid, jid_file, ext))
|
2024-02-15 01:16:51 +01:00
|
|
|
cache_dir = config.get_default_cache_directory()
|
|
|
|
if not os.path.isdir(cache_dir):
|
|
|
|
os.mkdir(cache_dir)
|
|
|
|
if not os.path.isdir(cache_dir + '/' + ext):
|
|
|
|
os.mkdir(cache_dir + '/' + ext)
|
|
|
|
filename = os.path.join(
|
|
|
|
cache_dir, ext, 'slixfeed_' + dt.timestamp() + '.' + ext)
|
|
|
|
db_file = config.get_pathname_to_database(jid_file)
|
2024-03-08 10:14:36 +01:00
|
|
|
results = sqlite.get_feeds(db_file)
|
2024-02-15 01:16:51 +01:00
|
|
|
match ext:
|
|
|
|
# case 'html':
|
|
|
|
# response = 'Not yet implemented.'
|
|
|
|
case 'md':
|
|
|
|
export_to_markdown(jid, filename, results)
|
|
|
|
case 'opml':
|
|
|
|
export_to_opml(jid, filename, results)
|
|
|
|
# case 'xbel':
|
|
|
|
# response = 'Not yet implemented.'
|
|
|
|
return filename
|
|
|
|
|
2024-03-12 18:13:01 +01:00
|
|
|
|
2024-03-27 17:20:32 +01:00
|
|
|
async def xmpp_muc_autojoin(self, bookmarks):
|
|
|
|
for bookmark in bookmarks:
|
|
|
|
if bookmark["jid"] and bookmark["autojoin"]:
|
|
|
|
if not bookmark["nick"]:
|
|
|
|
bookmark["nick"] = self.alias
|
|
|
|
logger.error('Alias (i.e. Nicknname) is missing for '
|
|
|
|
'bookmark {}'.format(bookmark['name']))
|
|
|
|
alias = bookmark["nick"]
|
|
|
|
muc_jid = bookmark["jid"]
|
2024-05-12 18:25:21 +02:00
|
|
|
result = await XmppGroupchat.join(self, muc_jid, alias)
|
|
|
|
if result == 'ban':
|
|
|
|
await XmppBookmark.remove(self, muc_jid)
|
|
|
|
logger.warning('{} is banned from {}'.format(self.alias, muc_jid))
|
|
|
|
logger.warning('Groupchat {} has been removed from bookmarks'
|
|
|
|
.format(muc_jid))
|
|
|
|
else:
|
|
|
|
logger.info('Autojoin groupchat\n'
|
|
|
|
'Name : {}\n'
|
|
|
|
'JID : {}\n'
|
|
|
|
'Alias : {}\n'
|
|
|
|
.format(bookmark["name"],
|
|
|
|
bookmark["jid"],
|
|
|
|
bookmark["nick"]))
|
2024-03-27 17:20:32 +01:00
|
|
|
elif not bookmark["jid"]:
|
|
|
|
logger.error('JID is missing for bookmark {}'
|
|
|
|
.format(bookmark['name']))
|
|
|
|
|
|
|
|
|
2024-03-12 18:13:01 +01:00
|
|
|
"""
|
|
|
|
TODO
|
|
|
|
|
|
|
|
Consider to append text to remind to share presence
|
|
|
|
'✒️ Share online status to receive updates'
|
|
|
|
|
|
|
|
# TODO Request for subscription
|
|
|
|
if (await get_chat_type(self, jid_bare) == 'chat' and
|
|
|
|
not self.client_roster[jid_bare]['to']):
|
|
|
|
XmppPresence.subscription(self, jid_bare, 'subscribe')
|
|
|
|
await XmppRoster.add(self, jid_bare)
|
|
|
|
status_message = '✒️ Share online status to receive updates'
|
|
|
|
XmppPresence.send(self, jid_bare, status_message)
|
|
|
|
message_subject = 'RSS News Bot'
|
|
|
|
message_body = 'Share online status to receive updates.'
|
|
|
|
XmppMessage.send_headline(self, jid_bare, message_subject,
|
|
|
|
message_body, 'chat')
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
2024-03-26 17:23:22 +01:00
|
|
|
async def xmpp_send_status_message(self, jid):
|
2024-02-11 22:31:31 +01:00
|
|
|
"""
|
|
|
|
Send status message.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
jid : str
|
|
|
|
Jabber ID.
|
|
|
|
"""
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: jid: {}'.format(function_name, jid))
|
2024-02-11 22:31:31 +01:00
|
|
|
status_text = '📜️ Slixfeed RSS News Bot'
|
|
|
|
jid_file = jid.replace('/', '_')
|
|
|
|
db_file = config.get_pathname_to_database(jid_file)
|
2024-03-12 18:13:01 +01:00
|
|
|
enabled = Config.get_setting_value(self.settings, jid, 'enabled')
|
2024-04-14 14:56:45 +02:00
|
|
|
if enabled:
|
2024-03-18 19:49:52 +01:00
|
|
|
jid_task = self.pending_tasks[jid]
|
|
|
|
if len(jid_task):
|
|
|
|
status_mode = 'dnd'
|
|
|
|
status_text = jid_task[list(jid_task.keys())[0]]
|
2024-02-11 22:31:31 +01:00
|
|
|
else:
|
2024-04-05 17:25:04 +02:00
|
|
|
feeds = sqlite.get_number_of_items(db_file, 'feeds_properties')
|
2024-03-18 19:49:52 +01:00
|
|
|
# print(await current_time(), jid, "has", feeds, "feeds")
|
|
|
|
if not feeds:
|
2024-02-11 22:31:31 +01:00
|
|
|
status_mode = 'available'
|
2024-03-18 19:49:52 +01:00
|
|
|
status_text = '📪️ Send a URL from a blog or a news website'
|
|
|
|
else:
|
|
|
|
unread = sqlite.get_number_of_entries_unread(db_file)
|
|
|
|
if unread:
|
|
|
|
status_mode = 'chat'
|
|
|
|
status_text = '📬️ There are {} news items'.format(str(unread))
|
|
|
|
# status_text = (
|
|
|
|
# "📰 News items: {}"
|
|
|
|
# ).format(str(unread))
|
|
|
|
# status_text = (
|
|
|
|
# "📰 You have {} news items"
|
|
|
|
# ).format(str(unread))
|
|
|
|
else:
|
|
|
|
status_mode = 'available'
|
|
|
|
status_text = '📭️ No news'
|
2024-04-14 14:56:45 +02:00
|
|
|
else:
|
|
|
|
status_mode = 'xa'
|
|
|
|
status_text = '📪️ Send "Start" to receive updates'
|
2024-02-11 22:31:31 +01:00
|
|
|
# breakpoint()
|
|
|
|
# print(await current_time(), status_text, "for", jid)
|
|
|
|
XmppPresence.send(self, jid, status_text, status_type=status_mode)
|
|
|
|
# await asyncio.sleep(60 * 20)
|
|
|
|
# await refresh_task(self, jid, send_status, 'status', '90')
|
|
|
|
# loop.call_at(
|
|
|
|
# loop.time() + 60 * 20,
|
|
|
|
# loop.create_task,
|
|
|
|
# send_status(jid)
|
|
|
|
# )
|
|
|
|
|
|
|
|
|
2024-04-14 14:56:45 +02:00
|
|
|
async def xmpp_pubsub_send_selected_entry(self, jid_bare, jid_file, node_id, entry_id):
|
|
|
|
function_name = sys._getframe().f_code.co_name
|
|
|
|
logger.debug('{}: jid_bare: {} jid_file: {}'.format(function_name, jid_bare, jid_file))
|
|
|
|
# jid_file = jid_bare.replace('/', '_')
|
|
|
|
db_file = config.get_pathname_to_database(jid_file)
|
|
|
|
report = {}
|
|
|
|
if jid_bare == self.boundjid.bare:
|
|
|
|
node_id = 'urn:xmpp:microblog:0'
|
|
|
|
node_subtitle = None
|
|
|
|
node_title = None
|
|
|
|
else:
|
|
|
|
feed_id = sqlite.get_feed_id_by_entry_index(db_file, entry_id)
|
|
|
|
feed_id = feed_id[0]
|
|
|
|
feed_properties = sqlite.get_feed_properties(db_file, feed_id)
|
|
|
|
node_id = feed_properties[2]
|
|
|
|
node_title = feed_properties[3]
|
|
|
|
node_subtitle = feed_properties[5]
|
|
|
|
xep = None
|
|
|
|
iq_create_node = XmppPubsub.create_node(
|
|
|
|
self, jid_bare, node_id, xep, node_title, node_subtitle)
|
|
|
|
await XmppIQ.send(self, iq_create_node)
|
|
|
|
entry = sqlite.get_entry_properties(db_file, entry_id)
|
|
|
|
print('xmpp_pubsub_send_selected_entry',jid_bare)
|
|
|
|
print(node_id)
|
|
|
|
entry_dict = pack_entry_into_dict(db_file, entry)
|
|
|
|
node_item = create_rfc4287_entry(entry_dict)
|
|
|
|
entry_url = entry_dict['link']
|
|
|
|
item_id = hash_url_to_md5(entry_url)
|
|
|
|
iq_create_entry = XmppPubsub.create_entry(
|
|
|
|
self, jid_bare, node_id, item_id, node_item)
|
|
|
|
await XmppIQ.send(self, iq_create_entry)
|
|
|
|
await sqlite.mark_as_read(db_file, entry_id)
|
|
|
|
report = entry_url
|
|
|
|
return report
|
|
|
|
|
|
|
|
|
|
|
|
async def xmpp_pubsub_send_unread_items(self, jid_bare):
|
2024-05-13 09:28:12 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
jid_bare : TYPE
|
|
|
|
Bare Jabber ID.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
report : dict
|
|
|
|
URL and Number of processed entries.
|
|
|
|
|
|
|
|
"""
|
2024-03-26 17:23:22 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
|
|
|
logger.debug('{}: jid_bare: {}'.format(function_name, jid_bare))
|
|
|
|
jid_file = jid_bare.replace('/', '_')
|
|
|
|
db_file = config.get_pathname_to_database(jid_file)
|
2024-04-14 14:56:45 +02:00
|
|
|
report = {}
|
|
|
|
subscriptions = sqlite.get_active_feeds_url(db_file)
|
|
|
|
for url in subscriptions:
|
|
|
|
url = url[0]
|
2024-05-13 09:28:12 +02:00
|
|
|
# feed_id = sqlite.get_feed_id(db_file, url)
|
|
|
|
# feed_id = feed_id[0]
|
|
|
|
# feed_properties = sqlite.get_feed_properties(db_file, feed_id)
|
|
|
|
feed_id = sqlite.get_feed_id(db_file, url)
|
|
|
|
feed_id = feed_id[0]
|
|
|
|
|
|
|
|
# Publish to node 'urn:xmpp:microblog:0' for own JID
|
|
|
|
# Publish to node based on feed identifier for PubSub service.
|
|
|
|
|
2024-04-14 14:56:45 +02:00
|
|
|
if jid_bare == self.boundjid.bare:
|
|
|
|
node_id = 'urn:xmpp:microblog:0'
|
|
|
|
node_subtitle = None
|
|
|
|
node_title = None
|
|
|
|
else:
|
|
|
|
# node_id = feed_properties[2]
|
|
|
|
# node_title = feed_properties[3]
|
|
|
|
# node_subtitle = feed_properties[5]
|
|
|
|
node_id = sqlite.get_feed_identifier(db_file, feed_id)
|
|
|
|
node_id = node_id[0]
|
|
|
|
node_title = sqlite.get_feed_title(db_file, feed_id)
|
|
|
|
node_title = node_title[0]
|
|
|
|
node_subtitle = sqlite.get_feed_subtitle(db_file, feed_id)
|
|
|
|
node_subtitle = node_subtitle[0]
|
|
|
|
xep = None
|
|
|
|
iq_create_node = XmppPubsub.create_node(
|
|
|
|
self, jid_bare, node_id, xep, node_title, node_subtitle)
|
|
|
|
await XmppIQ.send(self, iq_create_node)
|
|
|
|
entries = sqlite.get_unread_entries_of_feed(db_file, feed_id)
|
|
|
|
report[url] = len(entries)
|
|
|
|
for entry in entries:
|
|
|
|
feed_entry = pack_entry_into_dict(db_file, entry)
|
|
|
|
node_entry = create_rfc4287_entry(feed_entry)
|
|
|
|
entry_url = feed_entry['link']
|
|
|
|
item_id = hash_url_to_md5(entry_url)
|
|
|
|
iq_create_entry = XmppPubsub.create_entry(
|
|
|
|
self, jid_bare, node_id, item_id, node_entry)
|
|
|
|
await XmppIQ.send(self, iq_create_entry)
|
|
|
|
ix = entry[0]
|
|
|
|
await sqlite.mark_as_read(db_file, ix)
|
|
|
|
return report
|
|
|
|
|
|
|
|
|
|
|
|
def pack_entry_into_dict(db_file, entry):
|
|
|
|
entry_id = entry[0]
|
|
|
|
authors = sqlite.get_authors_by_entry_id(db_file, entry_id)
|
|
|
|
entry_authors = []
|
|
|
|
for author in authors:
|
|
|
|
entry_author = {
|
|
|
|
'name': author[2],
|
|
|
|
'email': author[3],
|
|
|
|
'url': author[4]}
|
|
|
|
entry_authors.extend([entry_author])
|
|
|
|
|
|
|
|
contributors = sqlite.get_contributors_by_entry_id(db_file, entry_id)
|
|
|
|
entry_contributors = []
|
|
|
|
for contributor in contributors:
|
|
|
|
entry_contributor = {
|
|
|
|
'name': contributor[2],
|
|
|
|
'email': contributor[3],
|
|
|
|
'url': contributor[4]}
|
|
|
|
entry_contributors.extend([entry_contributor])
|
|
|
|
|
|
|
|
links = sqlite.get_links_by_entry_id(db_file, entry_id)
|
|
|
|
entry_links = []
|
|
|
|
for link in links:
|
|
|
|
entry_link = {
|
|
|
|
'url': link[2],
|
|
|
|
'type': link[3],
|
|
|
|
'rel': link[4],
|
|
|
|
'size': link[5]}
|
|
|
|
entry_links.extend([entry_link])
|
|
|
|
|
|
|
|
|
|
|
|
tags = sqlite.get_tags_by_entry_id(db_file, entry_id)
|
|
|
|
entry_tags = []
|
|
|
|
for tag in tags:
|
|
|
|
entry_tag = {
|
|
|
|
'term': tag[2],
|
|
|
|
'scheme': tag[3],
|
|
|
|
'label': tag[4]}
|
|
|
|
entry_tags.extend([entry_tag])
|
|
|
|
|
|
|
|
contents = sqlite.get_contents_by_entry_id(db_file, entry_id)
|
|
|
|
entry_contents = []
|
|
|
|
for content in contents:
|
|
|
|
entry_content = {
|
|
|
|
'text': content[2],
|
|
|
|
'type': content[3],
|
|
|
|
'base': content[4],
|
|
|
|
'lang': content[5]}
|
|
|
|
entry_contents.extend([entry_content])
|
|
|
|
|
|
|
|
feed_entry = {
|
|
|
|
'authors' : entry_authors,
|
|
|
|
'category' : entry[10],
|
|
|
|
'comments' : entry[12],
|
|
|
|
'contents' : entry_contents,
|
|
|
|
'contributors' : entry_contributors,
|
|
|
|
'summary_base' : entry[9],
|
|
|
|
'summary_lang' : entry[7],
|
|
|
|
'summary_text' : entry[6],
|
|
|
|
'summary_type' : entry[8],
|
|
|
|
'enclosures' : entry[13],
|
|
|
|
'href' : entry[11],
|
|
|
|
'link' : entry[3],
|
|
|
|
'links' : entry_links,
|
|
|
|
'published' : entry[14],
|
|
|
|
'rating' : entry[13],
|
|
|
|
'tags' : entry_tags,
|
|
|
|
'title' : entry[4],
|
|
|
|
'title_type' : entry[3],
|
|
|
|
'updated' : entry[15]}
|
|
|
|
return feed_entry
|
|
|
|
|
|
|
|
|
|
|
|
# NOTE Warning: Entry might not have a link
|
|
|
|
# TODO Handle situation error
|
|
|
|
def hash_url_to_md5(url):
|
|
|
|
url_encoded = url.encode()
|
|
|
|
url_hashed = hashlib.md5(url_encoded)
|
|
|
|
url_digest = url_hashed.hexdigest()
|
|
|
|
return url_digest
|
|
|
|
|
|
|
|
|
|
|
|
def create_rfc4287_entry(feed_entry):
|
|
|
|
node_entry = ET.Element('entry')
|
|
|
|
node_entry.set('xmlns', 'http://www.w3.org/2005/Atom')
|
|
|
|
|
|
|
|
# Title
|
|
|
|
title = ET.SubElement(node_entry, 'title')
|
|
|
|
if feed_entry['title']:
|
|
|
|
if feed_entry['title_type']: title.set('type', feed_entry['title_type'])
|
|
|
|
title.text = feed_entry['title']
|
|
|
|
elif feed_entry['summary_text']:
|
|
|
|
if feed_entry['summary_type']: title.set('type', feed_entry['summary_type'])
|
|
|
|
title.text = feed_entry['summary_text']
|
|
|
|
# if feed_entry['summary_base']: title.set('base', feed_entry['summary_base'])
|
|
|
|
# if feed_entry['summary_lang']: title.set('lang', feed_entry['summary_lang'])
|
|
|
|
else:
|
|
|
|
title.text = feed_entry['published']
|
|
|
|
|
|
|
|
# Some feeds have identical content for contents and summary
|
|
|
|
# So if content is present, do not add summary
|
|
|
|
if feed_entry['contents']:
|
|
|
|
# Content
|
|
|
|
for feed_entry_content in feed_entry['contents']:
|
|
|
|
content = ET.SubElement(node_entry, 'content')
|
|
|
|
# if feed_entry_content['base']: content.set('base', feed_entry_content['base'])
|
|
|
|
if feed_entry_content['lang']: content.set('lang', feed_entry_content['lang'])
|
|
|
|
if feed_entry_content['type']: content.set('type', feed_entry_content['type'])
|
|
|
|
content.text = feed_entry_content['text']
|
|
|
|
else:
|
|
|
|
# Summary
|
|
|
|
summary = ET.SubElement(node_entry, 'summary') # TODO Try 'content'
|
|
|
|
# if feed_entry['summary_base']: summary.set('base', feed_entry['summary_base'])
|
|
|
|
# TODO Check realization of "lang"
|
|
|
|
if feed_entry['summary_type']: summary.set('type', feed_entry['summary_type'])
|
|
|
|
if feed_entry['summary_lang']: summary.set('lang', feed_entry['summary_lang'])
|
|
|
|
summary.text = feed_entry['summary_text']
|
|
|
|
|
|
|
|
# Authors
|
|
|
|
for feed_entry_author in feed_entry['authors']:
|
|
|
|
author = ET.SubElement(node_entry, 'author')
|
|
|
|
name = ET.SubElement(author, 'name')
|
|
|
|
name.text = feed_entry_author['name']
|
|
|
|
if feed_entry_author['url']:
|
|
|
|
uri = ET.SubElement(author, 'uri')
|
|
|
|
uri.text = feed_entry_author['url']
|
|
|
|
if feed_entry_author['email']:
|
|
|
|
email = ET.SubElement(author, 'email')
|
|
|
|
email.text = feed_entry_author['email']
|
|
|
|
|
|
|
|
# Contributors
|
|
|
|
for feed_entry_contributor in feed_entry['contributors']:
|
|
|
|
contributor = ET.SubElement(node_entry, 'author')
|
|
|
|
name = ET.SubElement(contributor, 'name')
|
|
|
|
name.text = feed_entry_contributor['name']
|
|
|
|
if feed_entry_contributor['url']:
|
|
|
|
uri = ET.SubElement(contributor, 'uri')
|
|
|
|
uri.text = feed_entry_contributor['url']
|
|
|
|
if feed_entry_contributor['email']:
|
|
|
|
email = ET.SubElement(contributor, 'email')
|
|
|
|
email.text = feed_entry_contributor['email']
|
|
|
|
|
|
|
|
# Category
|
|
|
|
category = ET.SubElement(node_entry, "category")
|
|
|
|
category.set('category', feed_entry['category'])
|
|
|
|
|
|
|
|
# Tags
|
|
|
|
for feed_entry_tag in feed_entry['tags']:
|
|
|
|
tag = ET.SubElement(node_entry, 'category')
|
|
|
|
tag.set('term', feed_entry_tag['term'])
|
|
|
|
|
|
|
|
# Link
|
|
|
|
link = ET.SubElement(node_entry, "link")
|
|
|
|
link.set('href', feed_entry['link'])
|
|
|
|
|
|
|
|
# Links
|
|
|
|
for feed_entry_link in feed_entry['links']:
|
|
|
|
link = ET.SubElement(node_entry, "link")
|
|
|
|
link.set('href', feed_entry_link['url'])
|
|
|
|
link.set('type', feed_entry_link['type'])
|
|
|
|
link.set('rel', feed_entry_link['rel'])
|
|
|
|
|
|
|
|
# Date updated
|
|
|
|
if feed_entry['updated']:
|
|
|
|
updated = ET.SubElement(node_entry, 'updated')
|
|
|
|
updated.text = feed_entry['updated']
|
|
|
|
|
|
|
|
# Date published
|
|
|
|
if feed_entry['published']:
|
|
|
|
published = ET.SubElement(node_entry, 'published')
|
|
|
|
published.text = feed_entry['published']
|
|
|
|
|
|
|
|
return node_entry
|
|
|
|
|
|
|
|
|
|
|
|
async def xmpp_chat_send_unread_items(self, jid, num=None):
|
2024-02-11 22:31:31 +01:00
|
|
|
"""
|
|
|
|
Send news items as messages.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
jid : str
|
|
|
|
Jabber ID.
|
|
|
|
num : str, optional
|
|
|
|
Number. The default is None.
|
|
|
|
"""
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: jid: {} num: {}'.format(function_name, jid, num))
|
2024-02-11 22:31:31 +01:00
|
|
|
jid_file = jid.replace('/', '_')
|
|
|
|
db_file = config.get_pathname_to_database(jid_file)
|
2024-04-14 14:56:45 +02:00
|
|
|
show_media = Config.get_setting_value(self.settings, jid, 'media')
|
|
|
|
if not num:
|
|
|
|
num = Config.get_setting_value(self.settings, jid, 'quantum')
|
|
|
|
else:
|
|
|
|
num = int(num)
|
|
|
|
results = sqlite.get_unread_entries(db_file, num)
|
|
|
|
news_digest = ''
|
|
|
|
media = None
|
|
|
|
chat_type = await get_chat_type(self, jid)
|
|
|
|
for result in results:
|
|
|
|
ix = result[0]
|
|
|
|
title_e = result[1]
|
|
|
|
url = result[2]
|
|
|
|
summary = result[3]
|
|
|
|
feed_id = result[4]
|
|
|
|
date = result[5]
|
|
|
|
enclosure = sqlite.get_enclosure_by_entry_id(db_file, ix)
|
|
|
|
if enclosure: enclosure = enclosure[0]
|
|
|
|
title_f = sqlite.get_feed_title(db_file, feed_id)
|
|
|
|
title_f = title_f[0]
|
|
|
|
news_digest += await list_unread_entries(self, result, title_f, jid)
|
|
|
|
# print(db_file)
|
|
|
|
# print(result[0])
|
|
|
|
# breakpoint()
|
|
|
|
await sqlite.mark_as_read(db_file, ix)
|
|
|
|
|
|
|
|
# Find media
|
|
|
|
# if url.startswith("magnet:"):
|
|
|
|
# media = action.get_magnet(url)
|
|
|
|
# elif enclosure.startswith("magnet:"):
|
|
|
|
# media = action.get_magnet(enclosure)
|
|
|
|
# elif enclosure:
|
|
|
|
if show_media:
|
|
|
|
if enclosure:
|
|
|
|
media = enclosure
|
|
|
|
else:
|
|
|
|
media = await extract_image_from_html(url)
|
|
|
|
|
|
|
|
if media and news_digest:
|
|
|
|
# Send textual message
|
2024-02-11 22:31:31 +01:00
|
|
|
XmppMessage.send(self, jid, news_digest, chat_type)
|
2024-04-14 14:56:45 +02:00
|
|
|
news_digest = ''
|
|
|
|
# Send media
|
|
|
|
XmppMessage.send_oob(self, jid, media, chat_type)
|
|
|
|
media = None
|
|
|
|
|
|
|
|
if news_digest:
|
|
|
|
XmppMessage.send(self, jid, news_digest, chat_type)
|
2024-02-11 22:31:31 +01:00
|
|
|
# TODO Add while loop to assure delivery.
|
|
|
|
# print(await current_time(), ">>> ACT send_message",jid)
|
|
|
|
# NOTE Do we need "if statement"? See NOTE at is_muc.
|
|
|
|
# if chat_type in ('chat', 'groupchat'):
|
|
|
|
# # TODO Provide a choice (with or without images)
|
|
|
|
# XmppMessage.send(self, jid, news_digest, chat_type)
|
|
|
|
# See XEP-0367
|
|
|
|
# if media:
|
|
|
|
# # message = xmpp.Slixfeed.make_message(
|
|
|
|
# # self, mto=jid, mbody=new, mtype=chat_type)
|
|
|
|
# message = xmpp.Slixfeed.make_message(
|
|
|
|
# self, mto=jid, mbody=media, mtype=chat_type)
|
|
|
|
# message['oob']['url'] = media
|
|
|
|
# message.send()
|
|
|
|
|
|
|
|
# TODO Do not refresh task before
|
|
|
|
# verifying that it was completed.
|
|
|
|
|
2024-03-26 17:23:22 +01:00
|
|
|
# await start_tasks_xmpp_chat(self, jid, ['status'])
|
2024-02-11 22:31:31 +01:00
|
|
|
# await refresh_task(self, jid, send_update, 'interval')
|
|
|
|
|
|
|
|
# interval = await initdb(
|
|
|
|
# jid,
|
2024-02-16 13:12:06 +01:00
|
|
|
# sqlite.is_setting_key,
|
2024-02-11 22:31:31 +01:00
|
|
|
# "interval"
|
|
|
|
# )
|
|
|
|
# self.task_manager[jid]["interval"] = loop.call_at(
|
|
|
|
# loop.time() + 60 * interval,
|
|
|
|
# loop.create_task,
|
|
|
|
# send_update(jid)
|
|
|
|
# )
|
|
|
|
|
|
|
|
# print(await current_time(), "asyncio.get_event_loop().time()")
|
|
|
|
# print(await current_time(), asyncio.get_event_loop().time())
|
|
|
|
# await asyncio.sleep(60 * interval)
|
|
|
|
|
|
|
|
# loop.call_later(
|
|
|
|
# 60 * interval,
|
|
|
|
# loop.create_task,
|
|
|
|
# send_update(jid)
|
|
|
|
# )
|
|
|
|
|
|
|
|
# print
|
|
|
|
# await handle_event()
|
|
|
|
|
|
|
|
|
2024-01-28 12:17:31 +01:00
|
|
|
def manual(filename, section=None, command=None):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: filename: {}'.format(function_name, filename))
|
2024-01-27 20:21:45 +01:00
|
|
|
config_dir = config.get_default_config_directory()
|
2024-01-28 12:17:31 +01:00
|
|
|
with open(config_dir + '/' + filename, mode="rb") as commands:
|
2024-01-27 20:21:45 +01:00
|
|
|
cmds = tomllib.load(commands)
|
2024-02-22 15:09:13 +01:00
|
|
|
if section == 'all':
|
|
|
|
cmd_list = ''
|
|
|
|
for cmd in cmds:
|
|
|
|
for i in cmds[cmd]:
|
|
|
|
cmd_list += cmds[cmd][i] + '\n'
|
|
|
|
elif command and section:
|
2024-01-28 12:17:31 +01:00
|
|
|
try:
|
|
|
|
cmd_list = cmds[section][command]
|
2024-02-04 18:08:12 +01:00
|
|
|
except KeyError as e:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error(str(e))
|
2024-01-28 12:17:31 +01:00
|
|
|
cmd_list = None
|
2024-01-27 20:21:45 +01:00
|
|
|
elif section:
|
2024-01-28 12:17:31 +01:00
|
|
|
try:
|
|
|
|
cmd_list = []
|
|
|
|
for cmd in cmds[section]:
|
2024-01-27 20:21:45 +01:00
|
|
|
cmd_list.extend([cmd])
|
2024-02-04 18:08:12 +01:00
|
|
|
except KeyError as e:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error('KeyError:' + str(e))
|
2024-01-28 12:17:31 +01:00
|
|
|
cmd_list = None
|
2024-01-27 20:21:45 +01:00
|
|
|
else:
|
|
|
|
cmd_list = []
|
|
|
|
for cmd in cmds:
|
|
|
|
cmd_list.extend([cmd])
|
|
|
|
return cmd_list
|
|
|
|
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
def log_to_markdown(timestamp, filename, jid, message):
|
|
|
|
"""
|
|
|
|
Log message to file.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
timestamp : str
|
|
|
|
Time stamp.
|
|
|
|
filename : str
|
|
|
|
Jabber ID as name of file.
|
|
|
|
jid : str
|
|
|
|
Jabber ID.
|
|
|
|
message : str
|
|
|
|
Message content.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
None.
|
|
|
|
|
|
|
|
"""
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: timestamp: {} filename: {} jid: {} message: {}'.format(function_name, timestamp, filename, jid, message))
|
2024-01-06 23:03:08 +01:00
|
|
|
with open(filename + '.md', 'a') as file:
|
|
|
|
# entry = "{} {}:\n{}\n\n".format(timestamp, jid, message)
|
|
|
|
entry = (
|
|
|
|
"## {}\n"
|
|
|
|
"### {}\n\n"
|
|
|
|
"{}\n\n").format(jid, timestamp, message)
|
|
|
|
file.write(entry)
|
|
|
|
|
|
|
|
|
2024-01-20 18:28:31 +01:00
|
|
|
def is_feed_json(document):
|
|
|
|
"""
|
|
|
|
|
|
|
|
NOTE /kurtmckee/feedparser/issues/103
|
|
|
|
|
|
|
|
Determine whether document is json feed or not.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
feed : dict
|
|
|
|
Parsed feed.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
val : boolean
|
|
|
|
True or False.
|
|
|
|
"""
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}'.format(function_name))
|
2024-01-20 18:28:31 +01:00
|
|
|
value = False
|
2024-01-24 19:11:39 +01:00
|
|
|
try:
|
|
|
|
feed = json.loads(document)
|
|
|
|
if not feed['items']:
|
|
|
|
if "version" in feed.keys():
|
|
|
|
if 'jsonfeed' in feed['version']:
|
|
|
|
value = True
|
|
|
|
else: # TODO Test
|
|
|
|
value = False
|
|
|
|
# elif 'title' in feed.keys():
|
|
|
|
# value = True
|
|
|
|
else:
|
|
|
|
value = False
|
2024-01-20 18:28:31 +01:00
|
|
|
else:
|
2024-01-24 19:11:39 +01:00
|
|
|
value = True
|
|
|
|
except:
|
|
|
|
pass
|
2024-01-20 18:28:31 +01:00
|
|
|
return value
|
|
|
|
|
|
|
|
|
2024-01-06 23:03:08 +01:00
|
|
|
def is_feed(feed):
|
|
|
|
"""
|
|
|
|
Determine whether document is feed or not.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
feed : dict
|
|
|
|
Parsed feed.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
val : boolean
|
|
|
|
True or False.
|
|
|
|
"""
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}'.format(function_name))
|
2024-01-06 23:03:08 +01:00
|
|
|
value = False
|
2024-01-09 13:34:10 +01:00
|
|
|
# message = None
|
2024-01-06 23:03:08 +01:00
|
|
|
if not feed.entries:
|
|
|
|
if "version" in feed.keys():
|
2024-01-20 18:28:31 +01:00
|
|
|
# feed["version"]
|
2024-01-06 23:03:08 +01:00
|
|
|
if feed.version:
|
|
|
|
value = True
|
|
|
|
# message = (
|
|
|
|
# "Empty feed for {}"
|
|
|
|
# ).format(url)
|
|
|
|
elif "title" in feed["feed"].keys():
|
|
|
|
value = True
|
|
|
|
# message = (
|
|
|
|
# "Empty feed for {}"
|
|
|
|
# ).format(url)
|
|
|
|
else:
|
|
|
|
value = False
|
|
|
|
# message = (
|
|
|
|
# "No entries nor title for {}"
|
|
|
|
# ).format(url)
|
|
|
|
elif feed.bozo:
|
|
|
|
value = False
|
|
|
|
# message = (
|
|
|
|
# "Bozo detected for {}"
|
|
|
|
# ).format(url)
|
|
|
|
else:
|
|
|
|
value = True
|
|
|
|
# message = (
|
|
|
|
# "Good feed for {}"
|
|
|
|
# ).format(url)
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
2024-04-05 17:25:04 +02:00
|
|
|
async def list_unread_entries(self, result, feed_title, jid):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-07 15:52:51 +01:00
|
|
|
logger.debug('{}: feed_title: {} jid: {}'
|
|
|
|
.format(function_name, feed_title, jid))
|
2024-01-06 23:03:08 +01:00
|
|
|
# TODO Add filtering
|
|
|
|
# TODO Do this when entry is added to list and mark it as read
|
|
|
|
# DONE!
|
|
|
|
# results = []
|
2024-02-16 13:12:06 +01:00
|
|
|
# if sqlite.is_setting_key(db_file, "deny"):
|
2024-01-06 23:03:08 +01:00
|
|
|
# while len(results) < num:
|
|
|
|
# result = cur.execute(sql).fetchone()
|
2024-02-16 13:12:06 +01:00
|
|
|
# blacklist = sqlite.get_setting_value(db_file, "deny").split(",")
|
2024-01-06 23:03:08 +01:00
|
|
|
# for i in blacklist:
|
|
|
|
# if i in result[1]:
|
|
|
|
# continue
|
|
|
|
# print("rejected:", result[1])
|
|
|
|
# print("accepted:", result[1])
|
|
|
|
# results.extend([result])
|
|
|
|
|
|
|
|
# news_list = "You've got {} news items:\n".format(num)
|
|
|
|
# NOTE Why doesn't this work without list?
|
|
|
|
# i.e. for result in results
|
|
|
|
# for result in results.fetchall():
|
2024-02-26 02:17:50 +01:00
|
|
|
ix = str(result[0])
|
|
|
|
title = str(result[1])
|
2024-01-06 23:03:08 +01:00
|
|
|
# # TODO Retrieve summary from feed
|
|
|
|
# # See fetch.view_entry
|
2024-02-26 02:17:50 +01:00
|
|
|
summary = result[3]
|
|
|
|
# Remove HTML tags
|
|
|
|
try:
|
2024-02-29 18:08:53 +01:00
|
|
|
title = BeautifulSoup(title, "lxml").text
|
2024-02-26 02:17:50 +01:00
|
|
|
summary = BeautifulSoup(summary, "lxml").text
|
|
|
|
except:
|
|
|
|
print(result[3])
|
|
|
|
breakpoint()
|
|
|
|
# TODO Limit text length
|
2024-01-06 23:03:08 +01:00
|
|
|
# summary = summary.replace("\n\n\n", "\n\n")
|
2024-02-26 02:17:50 +01:00
|
|
|
summary = summary.replace('\n', ' ')
|
|
|
|
summary = summary.replace(' ', ' ')
|
|
|
|
summary = summary.replace(' ', ' ')
|
2024-03-12 18:13:01 +01:00
|
|
|
length = Config.get_setting_value(self.settings, jid, 'length')
|
2024-02-26 02:17:50 +01:00
|
|
|
length = int(length)
|
|
|
|
summary = summary[:length] + " […]"
|
2024-01-06 23:03:08 +01:00
|
|
|
# summary = summary.strip().split('\n')
|
|
|
|
# summary = ["> " + line for line in summary]
|
|
|
|
# summary = "\n".join(summary)
|
|
|
|
link = result[2]
|
|
|
|
link = remove_tracking_parameters(link)
|
2024-04-05 17:25:04 +02:00
|
|
|
link = await replace_hostname(link, "link") or link
|
2024-02-26 02:17:50 +01:00
|
|
|
# news_item = ("\n{}\n{}\n{} [{}]\n").format(str(title), str(link),
|
|
|
|
# str(feed_title), str(ix))
|
2024-03-12 18:13:01 +01:00
|
|
|
formatting = Config.get_setting_value(self.settings, jid, 'formatting')
|
2024-02-26 02:17:50 +01:00
|
|
|
news_item = formatting.format(feed_title=feed_title,
|
|
|
|
title=title,
|
|
|
|
summary=summary,
|
|
|
|
link=link,
|
|
|
|
ix=ix)
|
2024-03-12 18:13:01 +01:00
|
|
|
# news_item = news_item.replace('\\n', '\n')
|
2024-01-06 23:03:08 +01:00
|
|
|
return news_item
|
|
|
|
|
|
|
|
|
|
|
|
def list_search_results(query, results):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: query: {}'
|
2024-03-03 15:13:01 +01:00
|
|
|
.format(function_name, query))
|
2024-02-18 00:21:44 +01:00
|
|
|
message = ("Search results for '{}':\n\n```"
|
|
|
|
.format(query))
|
2024-01-06 23:03:08 +01:00
|
|
|
for result in results:
|
2024-02-18 00:21:44 +01:00
|
|
|
message += ("\n{}\n{}\n"
|
|
|
|
.format(str(result[0]), str(result[1])))
|
2024-01-06 23:03:08 +01:00
|
|
|
if len(results):
|
2024-01-07 10:57:54 +01:00
|
|
|
message += "```\nTotal of {} results".format(len(results))
|
2024-01-06 23:03:08 +01:00
|
|
|
else:
|
2024-01-07 10:57:54 +01:00
|
|
|
message = "No results were found for: {}".format(query)
|
|
|
|
return message
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
|
2024-03-12 18:13:01 +01:00
|
|
|
def list_feeds_by_query(query, results):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-12 18:13:01 +01:00
|
|
|
logger.debug('{}'.format(function_name))
|
2024-02-18 00:21:44 +01:00
|
|
|
message = ('Feeds containing "{}":\n\n```'
|
|
|
|
.format(query))
|
2024-01-06 23:03:08 +01:00
|
|
|
for result in results:
|
2024-02-18 00:21:44 +01:00
|
|
|
message += ('\nName : {} [{}]'
|
|
|
|
'\nURL : {}'
|
|
|
|
'\n'
|
|
|
|
.format(str(result[0]), str(result[1]), str(result[2])))
|
2024-01-06 23:03:08 +01:00
|
|
|
if len(results):
|
2024-01-07 10:57:54 +01:00
|
|
|
message += "\n```\nTotal of {} feeds".format(len(results))
|
2024-01-06 23:03:08 +01:00
|
|
|
else:
|
2024-01-07 10:57:54 +01:00
|
|
|
message = "No feeds were found for: {}".format(query)
|
|
|
|
return message
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
|
2024-03-07 15:52:51 +01:00
|
|
|
async def list_options(self, jid_bare):
|
|
|
|
"""
|
|
|
|
Print options.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
jid_bare : str
|
|
|
|
Jabber ID.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
msg : str
|
|
|
|
Options as message.
|
|
|
|
"""
|
|
|
|
function_name = sys._getframe().f_code.co_name
|
|
|
|
logger.debug('{}: jid: {}'
|
|
|
|
.format(function_name, jid_bare))
|
|
|
|
|
|
|
|
# msg = """You have {} unread news items out of {} from {} news sources.
|
|
|
|
# """.format(unread_entries, entries, feeds)
|
|
|
|
|
|
|
|
# try:
|
|
|
|
# value = cur.execute(sql, par).fetchone()[0]
|
|
|
|
# except:
|
|
|
|
# print("Error for key:", key)
|
|
|
|
# value = "Default"
|
|
|
|
# values.extend([value])
|
|
|
|
|
2024-03-12 18:13:01 +01:00
|
|
|
value_archive = Config.get_setting_value(self.settings, jid_bare, 'archive')
|
|
|
|
value_interval = Config.get_setting_value(self.settings, jid_bare, 'interval')
|
|
|
|
value_quantum = Config.get_setting_value(self.settings, jid_bare, 'quantum')
|
|
|
|
value_enabled = Config.get_setting_value(self.settings, jid_bare, 'enabled')
|
2024-03-07 21:29:52 +01:00
|
|
|
|
2024-03-07 15:52:51 +01:00
|
|
|
message = ("Options:"
|
|
|
|
"\n"
|
|
|
|
"```"
|
|
|
|
"\n"
|
|
|
|
"Items to archive : {}\n"
|
|
|
|
"Update interval : {}\n"
|
|
|
|
"Items per update : {}\n"
|
|
|
|
"Operation status : {}\n"
|
2024-03-07 21:29:52 +01:00
|
|
|
"```").format(value_archive, value_interval, value_quantum,
|
|
|
|
value_enabled)
|
2024-03-07 15:52:51 +01:00
|
|
|
return message
|
|
|
|
|
|
|
|
|
2024-01-14 19:05:12 +01:00
|
|
|
async def list_statistics(db_file):
|
2024-01-06 23:03:08 +01:00
|
|
|
"""
|
2024-03-07 15:52:51 +01:00
|
|
|
Print statistics.
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
db_file : str
|
|
|
|
Path to database file.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
msg : str
|
|
|
|
Statistics as message.
|
|
|
|
"""
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: db_file: {}'
|
2024-03-03 15:13:01 +01:00
|
|
|
.format(function_name, db_file))
|
2024-03-08 10:14:36 +01:00
|
|
|
entries_unread = sqlite.get_number_of_entries_unread(db_file)
|
2024-04-05 17:25:04 +02:00
|
|
|
entries = sqlite.get_number_of_items(db_file, 'entries_properties')
|
2024-03-08 10:14:36 +01:00
|
|
|
feeds_active = sqlite.get_number_of_feeds_active(db_file)
|
2024-04-05 17:25:04 +02:00
|
|
|
feeds_all = sqlite.get_number_of_items(db_file, 'feeds_properties')
|
2024-01-14 19:05:12 +01:00
|
|
|
|
|
|
|
# msg = """You have {} unread news items out of {} from {} news sources.
|
|
|
|
# """.format(unread_entries, entries, feeds)
|
|
|
|
|
|
|
|
# try:
|
|
|
|
# value = cur.execute(sql, par).fetchone()[0]
|
|
|
|
# except:
|
|
|
|
# print("Error for key:", key)
|
|
|
|
# value = "Default"
|
|
|
|
# values.extend([value])
|
|
|
|
|
2024-03-07 15:52:51 +01:00
|
|
|
message = ("Statistics:"
|
|
|
|
"\n"
|
|
|
|
"```"
|
|
|
|
"\n"
|
2024-02-11 22:31:31 +01:00
|
|
|
"News items : {}/{}\n"
|
|
|
|
"News sources : {}/{}\n"
|
|
|
|
"```").format(entries_unread,
|
2024-04-05 17:25:04 +02:00
|
|
|
entries,
|
2024-02-11 22:31:31 +01:00
|
|
|
feeds_active,
|
2024-03-07 15:52:51 +01:00
|
|
|
feeds_all)
|
2024-01-07 10:57:54 +01:00
|
|
|
return message
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
|
|
|
|
# FIXME Replace counter by len
|
|
|
|
def list_last_entries(results, num):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: num: {}'
|
|
|
|
.format(function_name, num))
|
2024-01-07 10:57:54 +01:00
|
|
|
message = "Recent {} titles:\n\n```".format(num)
|
2024-01-06 23:03:08 +01:00
|
|
|
for result in results:
|
2024-02-11 22:31:31 +01:00
|
|
|
message += ("\n{}\n{}\n"
|
|
|
|
.format(str(result[0]), str(result[1])))
|
2024-01-06 23:03:08 +01:00
|
|
|
if len(results):
|
2024-01-07 10:57:54 +01:00
|
|
|
message += "```\n"
|
2024-01-06 23:03:08 +01:00
|
|
|
else:
|
2024-01-07 10:57:54 +01:00
|
|
|
message = "There are no news at the moment."
|
|
|
|
return message
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
|
2024-02-18 00:21:44 +01:00
|
|
|
def pick_a_feed(lang=None):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: lang: {}'
|
|
|
|
.format(function_name, lang))
|
2024-02-18 00:21:44 +01:00
|
|
|
config_dir = config.get_default_config_directory()
|
|
|
|
with open(config_dir + '/' + 'feeds.toml', mode="rb") as feeds:
|
|
|
|
urls = tomllib.load(feeds)
|
|
|
|
import random
|
|
|
|
url = random.choice(urls['feeds'])
|
|
|
|
return url
|
|
|
|
|
|
|
|
|
2024-01-06 23:03:08 +01:00
|
|
|
def list_feeds(results):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}'.format(function_name))
|
2024-01-07 10:57:54 +01:00
|
|
|
message = "\nList of subscriptions:\n\n```\n"
|
2024-01-06 23:03:08 +01:00
|
|
|
for result in results:
|
2024-03-12 18:13:01 +01:00
|
|
|
message += ("{} [{}]\n"
|
|
|
|
"{}\n"
|
|
|
|
"\n\n"
|
|
|
|
.format(str(result[1]), str(result[0]), str(result[2])))
|
2024-01-06 23:03:08 +01:00
|
|
|
if len(results):
|
2024-02-11 22:31:31 +01:00
|
|
|
message += ('```\nTotal of {} subscriptions.\n'
|
|
|
|
.format(len(results)))
|
2024-01-06 23:03:08 +01:00
|
|
|
else:
|
2024-02-18 00:21:44 +01:00
|
|
|
url = pick_a_feed()
|
2024-04-05 17:25:04 +02:00
|
|
|
message = ('List of subscriptions is empty. To add a feed, send a URL.'
|
2024-03-09 20:03:18 +01:00
|
|
|
'\n'
|
2024-04-05 17:25:04 +02:00
|
|
|
'Featured news: *{}*\n{}'
|
2024-03-09 20:03:18 +01:00
|
|
|
.format(url['name'], url['link']))
|
2024-01-07 10:57:54 +01:00
|
|
|
return message
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
|
2024-04-05 17:25:04 +02:00
|
|
|
def list_bookmarks(self, conferences):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}'.format(function_name))
|
2024-02-11 22:31:31 +01:00
|
|
|
message = '\nList of groupchats:\n\n```\n'
|
2024-01-06 23:03:08 +01:00
|
|
|
for conference in conferences:
|
2024-02-14 04:04:49 +01:00
|
|
|
message += ('Name: {}\n'
|
|
|
|
'Room: {}\n'
|
2024-02-11 22:31:31 +01:00
|
|
|
'\n'
|
2024-02-14 04:04:49 +01:00
|
|
|
.format(conference['name'], conference['jid']))
|
2024-02-11 22:31:31 +01:00
|
|
|
message += ('```\nTotal of {} groupchats.\n'
|
|
|
|
.format(len(conferences)))
|
2024-01-07 10:57:54 +01:00
|
|
|
return message
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
|
|
|
|
def export_to_markdown(jid, filename, results):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: jid: {} filename: {}'
|
|
|
|
.format(function_name, jid, filename))
|
2024-01-06 23:03:08 +01:00
|
|
|
with open(filename, 'w') as file:
|
2024-02-11 22:31:31 +01:00
|
|
|
file.write('# Subscriptions for {}\n'.format(jid))
|
|
|
|
file.write('## Set of feeds exported with Slixfeed\n')
|
2024-01-06 23:03:08 +01:00
|
|
|
for result in results:
|
2024-03-26 18:49:16 +01:00
|
|
|
file.write('- [{}]({})\n'.format(result[1], result[2]))
|
2024-02-11 22:31:31 +01:00
|
|
|
file.write('\n\n* * *\n\nThis list was saved on {} from xmpp:{} using '
|
|
|
|
'[Slixfeed](https://gitgud.io/sjehuda/slixfeed)\n'
|
|
|
|
.format(dt.current_date(), jid))
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
|
2024-01-10 21:06:56 +01:00
|
|
|
# TODO Consider adding element jid as a pointer of import
|
2024-01-06 23:03:08 +01:00
|
|
|
def export_to_opml(jid, filename, results):
|
2024-03-26 18:49:16 +01:00
|
|
|
print(jid, filename, results)
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{} jid: {} filename: {}'
|
|
|
|
.format(function_name, jid, filename))
|
2024-04-14 14:56:45 +02:00
|
|
|
root = ETR.Element("opml")
|
2024-01-06 23:03:08 +01:00
|
|
|
root.set("version", "1.0")
|
2024-04-14 14:56:45 +02:00
|
|
|
head = ETR.SubElement(root, "head")
|
|
|
|
ETR.SubElement(head, "title").text = "{}".format(jid)
|
|
|
|
ETR.SubElement(head, "description").text = (
|
2024-01-10 21:06:56 +01:00
|
|
|
"Set of subscriptions exported by Slixfeed")
|
2024-04-14 14:56:45 +02:00
|
|
|
ETR.SubElement(head, "generator").text = "Slixfeed"
|
|
|
|
ETR.SubElement(head, "urlPublic").text = (
|
2024-01-06 23:03:08 +01:00
|
|
|
"https://gitgud.io/sjehuda/slixfeed")
|
2024-02-07 01:26:42 +01:00
|
|
|
time_stamp = dt.current_time()
|
2024-04-14 14:56:45 +02:00
|
|
|
ETR.SubElement(head, "dateCreated").text = time_stamp
|
|
|
|
ETR.SubElement(head, "dateModified").text = time_stamp
|
|
|
|
body = ETR.SubElement(root, "body")
|
2024-01-06 23:03:08 +01:00
|
|
|
for result in results:
|
2024-04-14 14:56:45 +02:00
|
|
|
outline = ETR.SubElement(body, "outline")
|
2024-03-26 18:49:16 +01:00
|
|
|
outline.set("text", result[1])
|
|
|
|
outline.set("xmlUrl", result[2])
|
2024-01-06 23:03:08 +01:00
|
|
|
# outline.set("type", result[2])
|
2024-04-14 14:56:45 +02:00
|
|
|
tree = ETR.ElementTree(root)
|
2024-01-06 23:03:08 +01:00
|
|
|
tree.write(filename)
|
|
|
|
|
|
|
|
|
2024-03-24 09:14:20 +01:00
|
|
|
async def import_opml(db_file, result):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-24 09:14:20 +01:00
|
|
|
logger.debug('{}: db_file: {}'
|
|
|
|
.format(function_name, db_file))
|
2024-02-18 00:21:44 +01:00
|
|
|
if not result['error']:
|
|
|
|
document = result['content']
|
2024-04-14 14:56:45 +02:00
|
|
|
root = ETR.fromstring(document)
|
2024-04-05 17:25:04 +02:00
|
|
|
before = sqlite.get_number_of_items(db_file, 'feeds_properties')
|
2024-01-06 23:03:08 +01:00
|
|
|
feeds = []
|
|
|
|
for child in root.findall(".//outline"):
|
|
|
|
url = child.get("xmlUrl")
|
|
|
|
title = child.get("text")
|
|
|
|
# feed = (url, title)
|
|
|
|
# feeds.extend([feed])
|
2024-04-05 17:25:04 +02:00
|
|
|
feed = {
|
|
|
|
'title' : title,
|
|
|
|
'url' : url,
|
|
|
|
}
|
|
|
|
feeds.extend([feed])
|
2024-01-10 21:06:56 +01:00
|
|
|
await sqlite.import_feeds(db_file, feeds)
|
|
|
|
await sqlite.add_metadata(db_file)
|
2024-04-05 17:25:04 +02:00
|
|
|
after = sqlite.get_number_of_items(db_file, 'feeds_properties')
|
2024-01-06 23:03:08 +01:00
|
|
|
difference = int(after) - int(before)
|
|
|
|
return difference
|
2024-01-04 02:16:24 +01:00
|
|
|
|
|
|
|
|
2024-04-05 17:25:04 +02:00
|
|
|
async def add_feed(self, jid_bare, db_file, url, identifier):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: db_file: {} url: {}'
|
|
|
|
.format(function_name, db_file, url))
|
2024-01-04 02:16:24 +01:00
|
|
|
while True:
|
2024-04-05 17:25:04 +02:00
|
|
|
feed_id = sqlite.get_feed_id(db_file, url)
|
|
|
|
if not feed_id:
|
|
|
|
exist_identifier = sqlite.check_identifier_exist(db_file, identifier)
|
|
|
|
if not exist_identifier:
|
2024-03-26 17:23:22 +01:00
|
|
|
result = await fetch.http(url)
|
|
|
|
message = result['message']
|
|
|
|
status_code = result['status_code']
|
|
|
|
if not result['error']:
|
2024-04-05 17:25:04 +02:00
|
|
|
await sqlite.update_feed_status(db_file, feed_id, status_code)
|
2024-03-26 17:23:22 +01:00
|
|
|
document = result['content']
|
|
|
|
feed = parse(document)
|
2024-04-05 17:25:04 +02:00
|
|
|
# if document and status_code == 200:
|
2024-03-26 17:23:22 +01:00
|
|
|
# if is_feed(url, feed):
|
|
|
|
if is_feed(feed):
|
|
|
|
if "title" in feed["feed"].keys():
|
|
|
|
title = feed["feed"]["title"]
|
|
|
|
else:
|
|
|
|
title = urlsplit(url).netloc
|
|
|
|
if "language" in feed["feed"].keys():
|
|
|
|
language = feed["feed"]["language"]
|
|
|
|
else:
|
|
|
|
language = ''
|
|
|
|
if "encoding" in feed.keys():
|
|
|
|
encoding = feed["encoding"]
|
|
|
|
else:
|
|
|
|
encoding = ''
|
|
|
|
if "updated_parsed" in feed["feed"].keys():
|
|
|
|
updated = feed["feed"]["updated_parsed"]
|
|
|
|
try:
|
|
|
|
updated = dt.convert_struct_time_to_iso8601(updated)
|
|
|
|
except:
|
|
|
|
updated = ''
|
|
|
|
else:
|
2024-01-20 18:28:31 +01:00
|
|
|
updated = ''
|
2024-04-05 17:25:04 +02:00
|
|
|
version = feed.version
|
|
|
|
entries_count = len(feed.entries)
|
|
|
|
await sqlite.insert_feed(db_file,
|
|
|
|
url,
|
|
|
|
title,
|
|
|
|
identifier,
|
|
|
|
entries=entries_count,
|
2024-03-26 17:23:22 +01:00
|
|
|
version=version,
|
|
|
|
encoding=encoding,
|
|
|
|
language=language,
|
|
|
|
status_code=status_code,
|
|
|
|
updated=updated)
|
2024-04-05 17:25:04 +02:00
|
|
|
feed_valid = 0 if feed.bozo else 1
|
|
|
|
await sqlite.update_feed_validity(db_file, feed_id, feed_valid)
|
|
|
|
if feed.has_key('updated_parsed'):
|
|
|
|
feed_updated = feed.updated_parsed
|
|
|
|
try:
|
|
|
|
feed_updated = dt.convert_struct_time_to_iso8601(feed_updated)
|
|
|
|
except:
|
|
|
|
feed_updated = None
|
|
|
|
else:
|
|
|
|
feed_updated = None
|
2024-05-12 11:55:23 +02:00
|
|
|
feed_properties = get_properties_of_feed(db_file,
|
|
|
|
feed_id, feed)
|
2024-04-05 17:25:04 +02:00
|
|
|
await sqlite.update_feed_properties(db_file, feed_id,
|
2024-04-05 17:59:24 +02:00
|
|
|
feed_properties)
|
2024-03-08 10:14:36 +01:00
|
|
|
feed_id = sqlite.get_feed_id(db_file, url)
|
2024-02-04 18:08:12 +01:00
|
|
|
feed_id = feed_id[0]
|
2024-04-05 17:25:04 +02:00
|
|
|
new_entries = get_properties_of_entries(
|
2024-05-12 11:55:23 +02:00
|
|
|
jid_bare, db_file, url, feed_id, feed)
|
2024-04-05 17:25:04 +02:00
|
|
|
if new_entries:
|
|
|
|
await sqlite.add_entries_and_update_feed_state(
|
|
|
|
db_file, feed_id, new_entries)
|
|
|
|
old = Config.get_setting_value(self.settings, jid_bare, 'old')
|
|
|
|
if not old: await sqlite.mark_feed_as_read(db_file, feed_id)
|
2024-02-18 00:21:44 +01:00
|
|
|
result_final = {'link' : url,
|
2024-03-26 17:23:22 +01:00
|
|
|
'index' : feed_id,
|
|
|
|
'name' : title,
|
2024-02-18 00:21:44 +01:00
|
|
|
'code' : status_code,
|
2024-03-26 17:23:22 +01:00
|
|
|
'error' : False,
|
2024-03-13 16:44:20 +01:00
|
|
|
'message': message,
|
2024-03-26 17:23:22 +01:00
|
|
|
'exist' : False,
|
2024-04-05 17:25:04 +02:00
|
|
|
'identifier' : None}
|
2024-02-18 00:21:44 +01:00
|
|
|
break
|
2024-03-26 17:23:22 +01:00
|
|
|
# NOTE This elif statement be unnecessary
|
|
|
|
# when feedparser be supporting json feed.
|
|
|
|
elif is_feed_json(document):
|
|
|
|
feed = json.loads(document)
|
|
|
|
if "title" in feed.keys():
|
|
|
|
title = feed["title"]
|
|
|
|
else:
|
|
|
|
title = urlsplit(url).netloc
|
|
|
|
if "language" in feed.keys():
|
|
|
|
language = feed["language"]
|
|
|
|
else:
|
|
|
|
language = ''
|
|
|
|
if "encoding" in feed.keys():
|
|
|
|
encoding = feed["encoding"]
|
|
|
|
else:
|
|
|
|
encoding = ''
|
|
|
|
if "date_published" in feed.keys():
|
|
|
|
updated = feed["date_published"]
|
|
|
|
try:
|
|
|
|
updated = dt.convert_struct_time_to_iso8601(updated)
|
|
|
|
except:
|
|
|
|
updated = ''
|
|
|
|
else:
|
|
|
|
updated = ''
|
|
|
|
version = 'json' + feed["version"].split('/').pop()
|
2024-04-05 17:25:04 +02:00
|
|
|
entries_count = len(feed["items"])
|
|
|
|
await sqlite.insert_feed(db_file,
|
|
|
|
url,
|
|
|
|
title,
|
|
|
|
identifier,
|
|
|
|
entries=entries_count,
|
2024-03-26 17:23:22 +01:00
|
|
|
version=version,
|
|
|
|
encoding=encoding,
|
|
|
|
language=language,
|
|
|
|
status_code=status_code,
|
|
|
|
updated=updated)
|
|
|
|
await scan_json(self, jid_bare, db_file, url)
|
|
|
|
old = Config.get_setting_value(self.settings, jid_bare, 'old')
|
|
|
|
if not old:
|
|
|
|
feed_id = sqlite.get_feed_id(db_file, url)
|
|
|
|
feed_id = feed_id[0]
|
|
|
|
await sqlite.mark_feed_as_read(db_file, feed_id)
|
|
|
|
result_final = {'link' : url,
|
|
|
|
'index' : feed_id,
|
|
|
|
'name' : title,
|
|
|
|
'code' : status_code,
|
|
|
|
'error' : False,
|
|
|
|
'message': message,
|
|
|
|
'exist' : False,
|
2024-04-05 17:25:04 +02:00
|
|
|
'identifier' : None}
|
2024-01-04 02:16:24 +01:00
|
|
|
break
|
2024-01-09 13:34:10 +01:00
|
|
|
else:
|
2024-03-26 17:23:22 +01:00
|
|
|
# NOTE Do not be tempted to return a compact dictionary.
|
|
|
|
# That is, dictionary within dictionary
|
|
|
|
# Return multiple dictionaries in a list or tuple.
|
|
|
|
result = await crawl.probe_page(url, document)
|
|
|
|
if not result:
|
|
|
|
# Get out of the loop with dict indicating error.
|
|
|
|
result_final = {'link' : url,
|
|
|
|
'index' : None,
|
|
|
|
'name' : None,
|
|
|
|
'code' : status_code,
|
|
|
|
'error' : True,
|
|
|
|
'message': message,
|
|
|
|
'exist' : False,
|
2024-04-05 17:25:04 +02:00
|
|
|
'identifier' : None}
|
2024-03-26 17:23:22 +01:00
|
|
|
break
|
|
|
|
elif isinstance(result, list):
|
|
|
|
# Get out of the loop and deliver a list of dicts.
|
|
|
|
result_final = result
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
# Go back up to the while loop and try again.
|
|
|
|
url = result['link']
|
|
|
|
else:
|
2024-04-05 17:25:04 +02:00
|
|
|
await sqlite.update_feed_status(db_file, feed_id, status_code)
|
2024-03-26 17:23:22 +01:00
|
|
|
result_final = {'link' : url,
|
|
|
|
'index' : None,
|
|
|
|
'name' : None,
|
|
|
|
'code' : status_code,
|
|
|
|
'error' : True,
|
|
|
|
'message': message,
|
|
|
|
'exist' : False,
|
2024-04-05 17:25:04 +02:00
|
|
|
'identifier' : None}
|
2024-03-26 17:23:22 +01:00
|
|
|
break
|
2024-01-04 02:16:24 +01:00
|
|
|
else:
|
2024-04-05 17:25:04 +02:00
|
|
|
ix = exist_identifier[1]
|
|
|
|
identifier = exist_identifier[2]
|
|
|
|
message = ('Identifier "{}" is already allocated.'
|
|
|
|
.format(identifier))
|
2024-02-18 00:21:44 +01:00
|
|
|
result_final = {'link' : url,
|
2024-03-26 17:23:22 +01:00
|
|
|
'index' : ix,
|
2024-02-16 02:46:04 +01:00
|
|
|
'name' : None,
|
2024-03-26 17:23:22 +01:00
|
|
|
'code' : None,
|
|
|
|
'error' : False,
|
2024-03-13 16:44:20 +01:00
|
|
|
'message': message,
|
2024-03-26 17:23:22 +01:00
|
|
|
'exist' : False,
|
2024-04-05 17:25:04 +02:00
|
|
|
'identifier' : identifier}
|
2024-01-04 02:16:24 +01:00
|
|
|
break
|
|
|
|
else:
|
2024-04-05 17:25:04 +02:00
|
|
|
feed_id = feed_id[0]
|
|
|
|
title = sqlite.get_feed_title(db_file, feed_id)
|
|
|
|
title = title[0]
|
2024-03-13 16:44:20 +01:00
|
|
|
message = 'URL already exist.'
|
2024-02-18 00:21:44 +01:00
|
|
|
result_final = {'link' : url,
|
2024-04-05 17:25:04 +02:00
|
|
|
'index' : feed_id,
|
|
|
|
'name' : title,
|
2024-02-16 02:46:04 +01:00
|
|
|
'code' : None,
|
|
|
|
'error' : False,
|
2024-03-13 16:44:20 +01:00
|
|
|
'message': message,
|
2024-03-26 17:23:22 +01:00
|
|
|
'exist' : True,
|
2024-04-05 17:25:04 +02:00
|
|
|
'identifier' : None}
|
2024-01-04 02:16:24 +01:00
|
|
|
break
|
2024-02-16 02:46:04 +01:00
|
|
|
return result_final
|
2024-01-04 02:16:24 +01:00
|
|
|
|
|
|
|
|
2024-03-07 15:52:51 +01:00
|
|
|
async def scan_json(self, jid_bare, db_file, url):
|
2024-01-20 18:28:31 +01:00
|
|
|
"""
|
|
|
|
Check feeds for new entries.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
db_file : str
|
|
|
|
Path to database file.
|
|
|
|
url : str, optional
|
|
|
|
URL. The default is None.
|
|
|
|
"""
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: db_file: {} url: {}'
|
|
|
|
.format(function_name, db_file, url))
|
2024-01-20 18:28:31 +01:00
|
|
|
if isinstance(url, tuple): url = url[0]
|
|
|
|
result = await fetch.http(url)
|
2024-02-18 00:21:44 +01:00
|
|
|
if not result['error']:
|
|
|
|
document = result['content']
|
|
|
|
status = result['status_code']
|
|
|
|
new_entries = []
|
|
|
|
if document and status == 200:
|
|
|
|
feed = json.loads(document)
|
|
|
|
entries = feed["items"]
|
2024-03-07 15:52:51 +01:00
|
|
|
await remove_nonexistent_entries_json(self, jid_bare, db_file, url, feed)
|
2024-02-18 00:21:44 +01:00
|
|
|
try:
|
2024-03-08 10:14:36 +01:00
|
|
|
feed_id = sqlite.get_feed_id(db_file, url)
|
2024-02-18 00:21:44 +01:00
|
|
|
feed_id = feed_id[0]
|
|
|
|
# await sqlite.update_feed_validity(
|
|
|
|
# db_file, feed_id, valid)
|
|
|
|
if "date_published" in feed.keys():
|
|
|
|
updated = feed["date_published"]
|
|
|
|
try:
|
|
|
|
updated = dt.convert_struct_time_to_iso8601(updated)
|
|
|
|
except:
|
|
|
|
updated = ''
|
|
|
|
else:
|
2024-01-20 18:28:31 +01:00
|
|
|
updated = ''
|
2024-03-08 10:14:36 +01:00
|
|
|
feed_id = sqlite.get_feed_id(db_file, url)
|
2024-02-18 00:21:44 +01:00
|
|
|
feed_id = feed_id[0]
|
|
|
|
await sqlite.update_feed_properties(
|
|
|
|
db_file, feed_id, len(feed["items"]), updated)
|
|
|
|
# await update_feed_status
|
|
|
|
except (
|
|
|
|
IncompleteReadError,
|
|
|
|
IncompleteRead,
|
|
|
|
error.URLError
|
|
|
|
) as e:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error(e)
|
2024-02-18 00:21:44 +01:00
|
|
|
return
|
|
|
|
# new_entry = 0
|
|
|
|
for entry in entries:
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: entry: {}'
|
|
|
|
.format(function_name, entry["title"]))
|
2024-02-18 00:21:44 +01:00
|
|
|
if "date_published" in entry.keys():
|
|
|
|
date = entry["date_published"]
|
|
|
|
date = dt.rfc2822_to_iso8601(date)
|
|
|
|
elif "date_modified" in entry.keys():
|
|
|
|
date = entry["date_modified"]
|
|
|
|
date = dt.rfc2822_to_iso8601(date)
|
|
|
|
else:
|
|
|
|
date = dt.now()
|
|
|
|
if "url" in entry.keys():
|
|
|
|
# link = complete_url(source, entry.link)
|
|
|
|
link = join_url(url, entry["url"])
|
|
|
|
link = trim_url(link)
|
|
|
|
else:
|
|
|
|
link = url
|
|
|
|
# title = feed["feed"]["title"]
|
|
|
|
# title = "{}: *{}*".format(feed["feed"]["title"], entry.title)
|
|
|
|
title = entry["title"] if "title" in entry.keys() else date
|
|
|
|
entry_id = entry["id"] if "id" in entry.keys() else link
|
2024-03-08 10:14:36 +01:00
|
|
|
feed_id = sqlite.get_feed_id(db_file, url)
|
2024-02-18 00:21:44 +01:00
|
|
|
feed_id = feed_id[0]
|
2024-03-03 15:13:01 +01:00
|
|
|
exist = sqlite.check_entry_exist(db_file, feed_id,
|
|
|
|
entry_id=entry_id,
|
|
|
|
title=title, link=link,
|
|
|
|
date=date)
|
2024-02-18 00:21:44 +01:00
|
|
|
if not exist:
|
|
|
|
summary = entry["summary"] if "summary" in entry.keys() else ''
|
|
|
|
if not summary:
|
|
|
|
summary = (entry["content_html"]
|
|
|
|
if "content_html" in entry.keys()
|
|
|
|
else '')
|
|
|
|
if not summary:
|
|
|
|
summary = (entry["content_text"]
|
|
|
|
if "content_text" in entry.keys()
|
|
|
|
else '')
|
|
|
|
read_status = 0
|
|
|
|
pathname = urlsplit(link).path
|
|
|
|
string = (
|
|
|
|
"{} {} {}"
|
|
|
|
).format(
|
|
|
|
title, summary, pathname)
|
2024-03-12 20:46:50 +01:00
|
|
|
if self.settings['default']['filter']:
|
2024-03-07 15:52:51 +01:00
|
|
|
print('Filter is now processing data.')
|
|
|
|
allow_list = config.is_include_keyword(db_file,
|
|
|
|
"allow", string)
|
|
|
|
if not allow_list:
|
|
|
|
reject_list = config.is_include_keyword(db_file,
|
|
|
|
"deny",
|
|
|
|
string)
|
|
|
|
if reject_list:
|
|
|
|
read_status = 1
|
|
|
|
logger.debug('Rejected : {}'
|
|
|
|
'\n'
|
|
|
|
'Keyword : {}'
|
|
|
|
.format(link, reject_list))
|
2024-02-18 00:21:44 +01:00
|
|
|
if isinstance(date, int):
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error('Variable "date" is int: {}'.format(date))
|
2024-02-18 00:21:44 +01:00
|
|
|
media_link = ''
|
|
|
|
if "attachments" in entry.keys():
|
|
|
|
for e_link in entry["attachments"]:
|
|
|
|
try:
|
|
|
|
# if (link.rel == "enclosure" and
|
|
|
|
# (link.type.startswith("audio/") or
|
|
|
|
# link.type.startswith("image/") or
|
|
|
|
# link.type.startswith("video/"))
|
|
|
|
# ):
|
|
|
|
media_type = e_link["mime_type"][:e_link["mime_type"].index("/")]
|
|
|
|
if media_type in ("audio", "image", "video"):
|
|
|
|
media_link = e_link["url"]
|
|
|
|
media_link = join_url(url, e_link["url"])
|
|
|
|
media_link = trim_url(media_link)
|
|
|
|
break
|
|
|
|
except:
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.error('KeyError: "url"\n'
|
|
|
|
'Missing "url" attribute for {}'
|
|
|
|
.format(url))
|
|
|
|
logger.error('Continue scanning for next '
|
2024-02-18 00:21:44 +01:00
|
|
|
'potential enclosure of {}'
|
|
|
|
.format(link))
|
|
|
|
entry = {
|
|
|
|
"title": title,
|
|
|
|
"link": link,
|
|
|
|
"enclosure": media_link,
|
|
|
|
"entry_id": entry_id,
|
|
|
|
"date": date,
|
|
|
|
"read_status": read_status
|
|
|
|
}
|
|
|
|
new_entries.extend([entry])
|
|
|
|
# await sqlite.add_entry(
|
|
|
|
# db_file, title, link, entry_id,
|
|
|
|
# url, date, read_status)
|
|
|
|
# await sqlite.set_date(db_file, url)
|
|
|
|
if len(new_entries):
|
2024-03-08 10:14:36 +01:00
|
|
|
feed_id = sqlite.get_feed_id(db_file, url)
|
2024-02-04 18:08:12 +01:00
|
|
|
feed_id = feed_id[0]
|
2024-04-05 17:25:04 +02:00
|
|
|
await sqlite.add_entries_and_update_feed_state(db_file, feed_id,
|
|
|
|
new_entries)
|
2024-01-20 18:28:31 +01:00
|
|
|
|
|
|
|
|
2024-03-24 09:14:20 +01:00
|
|
|
def view_feed(url, feed):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: url: {}'
|
|
|
|
.format(function_name, url))
|
2024-03-24 09:14:20 +01:00
|
|
|
if "title" in feed["feed"].keys():
|
|
|
|
title = feed["feed"]["title"]
|
|
|
|
else:
|
|
|
|
title = urlsplit(url).netloc
|
|
|
|
entries = feed.entries
|
|
|
|
response = "Preview of {}:\n\n```\n".format(title)
|
|
|
|
counter = 0
|
|
|
|
for entry in entries:
|
|
|
|
counter += 1
|
|
|
|
if entry.has_key("title"):
|
|
|
|
title = entry.title
|
|
|
|
else:
|
|
|
|
title = "*** No title ***"
|
|
|
|
if entry.has_key("link"):
|
|
|
|
# link = complete_url(source, entry.link)
|
|
|
|
link = join_url(url, entry.link)
|
|
|
|
link = trim_url(link)
|
|
|
|
else:
|
|
|
|
link = "*** No link ***"
|
|
|
|
if entry.has_key("published"):
|
|
|
|
date = entry.published
|
|
|
|
date = dt.rfc2822_to_iso8601(date)
|
|
|
|
elif entry.has_key("updated"):
|
|
|
|
date = entry.updated
|
|
|
|
date = dt.rfc2822_to_iso8601(date)
|
2024-01-04 02:16:24 +01:00
|
|
|
else:
|
2024-03-24 09:14:20 +01:00
|
|
|
date = "*** No date ***"
|
|
|
|
response += ("Title : {}\n"
|
|
|
|
"Date : {}\n"
|
|
|
|
"Link : {}\n"
|
|
|
|
"Count : {}\n"
|
|
|
|
"\n"
|
|
|
|
.format(title, date, link, counter))
|
|
|
|
if counter > 4:
|
2024-01-04 02:16:24 +01:00
|
|
|
break
|
2024-03-24 09:14:20 +01:00
|
|
|
response += (
|
|
|
|
"```\nSource: {}"
|
|
|
|
).format(url)
|
2024-01-04 02:16:24 +01:00
|
|
|
return response
|
|
|
|
|
|
|
|
|
2024-03-24 09:14:20 +01:00
|
|
|
def view_entry(url, feed, num):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: url: {} num: {}'
|
|
|
|
.format(function_name, url, num))
|
2024-03-24 09:14:20 +01:00
|
|
|
if "title" in feed["feed"].keys():
|
|
|
|
title = feed["feed"]["title"]
|
|
|
|
else:
|
|
|
|
title = urlsplit(url).netloc
|
|
|
|
entries = feed.entries
|
|
|
|
num = int(num) - 1
|
|
|
|
entry = entries[num]
|
|
|
|
response = "Preview of {}:\n\n```\n".format(title)
|
|
|
|
if entry.has_key("title"):
|
|
|
|
title = entry.title
|
|
|
|
else:
|
|
|
|
title = "*** No title ***"
|
|
|
|
if entry.has_key("published"):
|
|
|
|
date = entry.published
|
|
|
|
date = dt.rfc2822_to_iso8601(date)
|
|
|
|
elif entry.has_key("updated"):
|
|
|
|
date = entry.updated
|
|
|
|
date = dt.rfc2822_to_iso8601(date)
|
|
|
|
else:
|
|
|
|
date = "*** No date ***"
|
|
|
|
if entry.has_key("summary"):
|
|
|
|
summary = entry.summary
|
|
|
|
# Remove HTML tags
|
|
|
|
summary = BeautifulSoup(summary, "lxml").text
|
|
|
|
# TODO Limit text length
|
|
|
|
summary = summary.replace("\n\n\n", "\n\n")
|
|
|
|
else:
|
|
|
|
summary = "*** No summary ***"
|
|
|
|
if entry.has_key("link"):
|
|
|
|
# link = complete_url(source, entry.link)
|
|
|
|
link = join_url(url, entry.link)
|
|
|
|
link = trim_url(link)
|
|
|
|
else:
|
|
|
|
link = "*** No link ***"
|
|
|
|
response = ("{}\n"
|
|
|
|
"\n"
|
|
|
|
# "> {}\n"
|
|
|
|
"{}\n"
|
|
|
|
"\n"
|
|
|
|
"{}\n"
|
|
|
|
"\n"
|
|
|
|
.format(title, summary, link))
|
2024-01-04 02:16:24 +01:00
|
|
|
return response
|
|
|
|
|
|
|
|
|
2024-04-05 17:25:04 +02:00
|
|
|
async def download_feed(self, db_file, feed_url):
|
2024-01-06 23:03:08 +01:00
|
|
|
"""
|
2024-04-05 17:25:04 +02:00
|
|
|
Get feed content.
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
db_file : str
|
|
|
|
Path to database file.
|
|
|
|
url : str, optional
|
2024-04-05 17:25:04 +02:00
|
|
|
URL.
|
2024-01-06 23:03:08 +01:00
|
|
|
"""
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: db_file: {} url: {}'
|
2024-04-05 17:25:04 +02:00
|
|
|
.format(function_name, db_file, feed_url))
|
|
|
|
if isinstance(feed_url, tuple): feed_url = feed_url[0]
|
|
|
|
result = await fetch.http(feed_url)
|
|
|
|
feed_id = sqlite.get_feed_id(db_file, feed_url)
|
2024-03-04 11:16:49 +01:00
|
|
|
feed_id = feed_id[0]
|
|
|
|
status_code = result['status_code']
|
|
|
|
await sqlite.update_feed_status(db_file, feed_id, status_code)
|
2024-04-05 17:25:04 +02:00
|
|
|
|
|
|
|
|
2024-05-12 11:55:23 +02:00
|
|
|
def get_properties_of_feed(db_file, feed_id, feed):
|
|
|
|
|
|
|
|
if feed.has_key('updated_parsed'):
|
|
|
|
feed_updated = feed.updated_parsed
|
|
|
|
try:
|
|
|
|
feed_updated = dt.convert_struct_time_to_iso8601(feed_updated)
|
|
|
|
except:
|
|
|
|
feed_updated = ''
|
|
|
|
else:
|
|
|
|
feed_updated = ''
|
|
|
|
|
|
|
|
entries_count = len(feed.entries)
|
|
|
|
|
|
|
|
feed_version = feed.version if feed.has_key('version') else ''
|
|
|
|
feed_encoding = feed.encoding if feed.has_key('encoding') else ''
|
|
|
|
feed_language = feed.feed.language if feed.feed.has_key('language') else ''
|
|
|
|
feed_icon = feed.feed.icon if feed.feed.has_key('icon') else ''
|
|
|
|
feed_image = feed.feed.image.href if feed.feed.has_key('image') else ''
|
|
|
|
feed_logo = feed.feed.logo if feed.feed.has_key('logo') else ''
|
|
|
|
feed_ttl = feed.feed.ttl if feed.feed.has_key('ttl') else ''
|
|
|
|
|
|
|
|
feed_properties = {
|
|
|
|
"version" : feed_version,
|
|
|
|
"encoding" : feed_encoding,
|
|
|
|
"language" : feed_language,
|
|
|
|
"rating" : '',
|
|
|
|
"entries_count" : entries_count,
|
|
|
|
"icon" : feed_icon,
|
|
|
|
"image" : feed_image,
|
|
|
|
"logo" : feed_logo,
|
|
|
|
"ttl" : feed_ttl,
|
|
|
|
"updated" : feed_updated,
|
|
|
|
}
|
|
|
|
|
|
|
|
return feed_properties
|
|
|
|
|
2024-04-05 17:25:04 +02:00
|
|
|
# TODO get all active feeds of active accounts and scan the feed with the earliest scanned time
|
|
|
|
# TODO Rename function name (idea: scan_and_populate)
|
2024-05-12 11:55:23 +02:00
|
|
|
def get_properties_of_entries(jid_bare, db_file, feed_url, feed_id, feed):
|
2024-04-05 17:25:04 +02:00
|
|
|
"""
|
|
|
|
Get new entries.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
db_file : str
|
|
|
|
Path to database file.
|
|
|
|
url : str, optional
|
|
|
|
URL.
|
|
|
|
"""
|
2024-05-13 18:22:14 +02:00
|
|
|
# print('MID', feed_url, jid_bare, 'get_properties_of_entries')
|
2024-04-05 17:25:04 +02:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
|
|
|
logger.debug('{}: feed_id: {} url: {}'
|
|
|
|
.format(function_name, feed_id, feed_url))
|
|
|
|
|
|
|
|
new_entries = []
|
|
|
|
for entry in feed.entries:
|
|
|
|
logger.debug('{}: entry: {}'.format(function_name, entry.link))
|
|
|
|
if entry.has_key("published"):
|
|
|
|
entry_published = entry.published
|
|
|
|
entry_published = dt.rfc2822_to_iso8601(entry_published)
|
|
|
|
else:
|
|
|
|
entry_published = ''
|
|
|
|
if entry.has_key("updated"):
|
|
|
|
entry_updated = entry.updated
|
|
|
|
entry_updated = dt.rfc2822_to_iso8601(entry_updated)
|
|
|
|
else:
|
|
|
|
entry_updated = dt.now()
|
|
|
|
if entry.has_key("link"):
|
|
|
|
# link = complete_url(source, entry.link)
|
|
|
|
entry_link = join_url(feed_url, entry.link)
|
|
|
|
entry_link = trim_url(entry_link)
|
|
|
|
else:
|
|
|
|
entry_link = feed_url
|
|
|
|
# title = feed["feed"]["title"]
|
|
|
|
# title = "{}: *{}*".format(feed["feed"]["title"], entry.title)
|
|
|
|
entry_title = entry.title if entry.has_key("title") else entry_published
|
|
|
|
entry_id = entry.id if entry.has_key("id") else entry_link
|
|
|
|
exist = sqlite.check_entry_exist(db_file, feed_id,
|
|
|
|
identifier=entry_id,
|
|
|
|
title=entry_title,
|
|
|
|
link=entry_link,
|
|
|
|
published=entry_published)
|
|
|
|
if not exist:
|
|
|
|
read_status = 0
|
|
|
|
# # Filter
|
|
|
|
# pathname = urlsplit(link).path
|
|
|
|
# string = (
|
|
|
|
# "{} {} {}"
|
|
|
|
# ).format(
|
|
|
|
# title, summary, pathname)
|
|
|
|
# if self.settings['default']['filter']:
|
|
|
|
# print('Filter is now processing data.')
|
|
|
|
# allow_list = config.is_include_keyword(db_file,
|
|
|
|
# "allow", string)
|
|
|
|
# if not allow_list:
|
|
|
|
# reject_list = config.is_include_keyword(db_file,
|
|
|
|
# "deny",
|
|
|
|
# string)
|
|
|
|
# if reject_list:
|
|
|
|
# read_status = 1
|
|
|
|
# logger.debug('Rejected : {}'
|
|
|
|
# '\n'
|
|
|
|
# 'Keyword : {}'
|
|
|
|
# .format(link, reject_list))
|
|
|
|
if isinstance(entry_published, int):
|
|
|
|
logger.error('Variable "published" is int: {}'.format(entry_published))
|
|
|
|
if isinstance(entry_updated, int):
|
|
|
|
logger.error('Variable "updated" is int: {}'.format(entry_updated))
|
|
|
|
|
|
|
|
# Authors
|
|
|
|
entry_authors =[]
|
|
|
|
if entry.has_key('authors'):
|
|
|
|
for author in entry.authors:
|
|
|
|
author_properties = {
|
|
|
|
'name' : author.name if author.has_key('name') else '',
|
|
|
|
'url' : author.href if author.has_key('href') else '',
|
|
|
|
'email' : author.email if author.has_key('email') else '',
|
2024-02-18 00:21:44 +01:00
|
|
|
}
|
2024-04-05 17:25:04 +02:00
|
|
|
entry_authors.extend([author_properties])
|
|
|
|
elif entry.has_key('author_detail'):
|
|
|
|
author_properties = {
|
|
|
|
'name' : entry.author_detail.name if entry.author_detail.has_key('name') else '',
|
|
|
|
'url' : entry.author_detail.href if entry.author_detail.has_key('href') else '',
|
|
|
|
'email' : entry.author_detail.email if entry.author_detail.has_key('email') else '',
|
|
|
|
}
|
|
|
|
entry_authors.extend([author_properties])
|
|
|
|
elif entry.has_key('author'):
|
|
|
|
author_properties = {
|
|
|
|
'name' : entry.author,
|
|
|
|
'url' : '',
|
|
|
|
'email' : '',
|
|
|
|
}
|
|
|
|
entry_authors.extend([author_properties])
|
|
|
|
|
|
|
|
# Contributors
|
|
|
|
entry_contributors = []
|
|
|
|
if entry.has_key('contributors'):
|
|
|
|
for contributor in entry.contributors:
|
|
|
|
contributor_properties = {
|
|
|
|
'name' : contributor.name if contributor.has_key('name') else '',
|
|
|
|
'url' : contributor.href if contributor.has_key('href') else '',
|
|
|
|
'email' : contributor.email if contributor.has_key('email') else '',
|
|
|
|
}
|
|
|
|
entry_contributors.extend([contributor_properties])
|
|
|
|
|
|
|
|
# Tags
|
|
|
|
entry_tags = []
|
|
|
|
if entry.has_key('tags'):
|
|
|
|
for tag in entry.tags:
|
|
|
|
tag_properties = {
|
|
|
|
'term' : tag.term if tag.has_key('term') else '',
|
|
|
|
'scheme' : tag.scheme if tag.has_key('scheme') else '',
|
|
|
|
'label' : tag.label if tag.has_key('label') else '',
|
|
|
|
}
|
|
|
|
entry_tags.extend([tag_properties])
|
|
|
|
|
|
|
|
# Content
|
|
|
|
entry_contents = []
|
|
|
|
if entry.has_key('content'):
|
|
|
|
for content in entry.content:
|
|
|
|
text = content.value if content.has_key('value') else ''
|
|
|
|
type = content.type if content.has_key('type') else ''
|
|
|
|
lang = content.lang if content.has_key('lang') else ''
|
|
|
|
base = content.base if content.has_key('base') else ''
|
|
|
|
entry_content = {
|
|
|
|
'text' : text,
|
|
|
|
'lang' : lang,
|
|
|
|
'type' : type,
|
|
|
|
'base' : base,
|
|
|
|
}
|
|
|
|
entry_contents.extend([entry_content])
|
|
|
|
|
|
|
|
# Links and Enclosures
|
|
|
|
entry_links = []
|
|
|
|
if entry.has_key('links'):
|
|
|
|
for link in entry.links:
|
|
|
|
link_properties = {
|
|
|
|
'url' : link.href if link.has_key('href') else '',
|
|
|
|
'rel' : link.rel if link.has_key('rel') else '',
|
|
|
|
'type' : link.type if link.has_key('type') else '',
|
|
|
|
'length' : '',
|
|
|
|
}
|
|
|
|
entry_links.extend([link_properties])
|
|
|
|
# Element media:content is utilized by Mastodon
|
|
|
|
if entry.has_key('media_content'):
|
|
|
|
for link in entry.media_content:
|
|
|
|
link_properties = {
|
|
|
|
'url' : link['url'] if 'url' in link else '',
|
|
|
|
'rel' : 'enclosure',
|
|
|
|
'type' : link['type'] if 'type' in link else '',
|
|
|
|
# 'medium' : link['medium'] if 'medium' in link else '',
|
|
|
|
'length' : link['filesize'] if 'filesize' in link else '',
|
|
|
|
}
|
|
|
|
entry_links.extend([link_properties])
|
|
|
|
if entry.has_key('media_thumbnail'):
|
|
|
|
for link in entry.media_thumbnail:
|
|
|
|
link_properties = {
|
|
|
|
'url' : link['url'] if 'url' in link else '',
|
|
|
|
'rel' : 'enclosure',
|
|
|
|
'type' : '',
|
|
|
|
# 'medium' : 'image',
|
|
|
|
'length' : '',
|
|
|
|
}
|
|
|
|
entry_links.extend([link_properties])
|
|
|
|
|
|
|
|
# Category
|
|
|
|
entry_category = entry.category if entry.has_key('category') else ''
|
|
|
|
|
|
|
|
# Comments
|
|
|
|
entry_comments = entry.comments if entry.has_key('comments') else ''
|
|
|
|
|
|
|
|
# href
|
|
|
|
entry_href = entry.href if entry.has_key('href') else ''
|
|
|
|
|
|
|
|
# Link: Same as entry.links[0].href in most if not all cases
|
|
|
|
entry_link = entry.link if entry.has_key('link') else ''
|
|
|
|
|
|
|
|
# Rating
|
|
|
|
entry_rating = entry.rating if entry.has_key('rating') else ''
|
|
|
|
|
|
|
|
# Summary
|
|
|
|
entry_summary_text = entry.summary if entry.has_key('summary') else ''
|
|
|
|
if entry.has_key('summary_detail'):
|
|
|
|
entry_summary_type = entry.summary_detail.type if entry.summary_detail.has_key('type') else ''
|
|
|
|
entry_summary_lang = entry.summary_detail.lang if entry.summary_detail.has_key('lang') else ''
|
|
|
|
entry_summary_base = entry.summary_detail.base if entry.summary_detail.has_key('base') else ''
|
|
|
|
else:
|
|
|
|
entry_summary_type = ''
|
|
|
|
entry_summary_lang = ''
|
|
|
|
entry_summary_base = ''
|
|
|
|
|
|
|
|
# Title
|
|
|
|
entry_title = entry.title if entry.has_key('title') else ''
|
|
|
|
if entry.has_key('title_detail'):
|
|
|
|
entry_title_type = entry.title_detail.type if entry.title_detail.has_key('type') else ''
|
|
|
|
else:
|
|
|
|
entry_title_type = ''
|
|
|
|
|
|
|
|
###########################################################
|
|
|
|
|
|
|
|
# media_type = e_link.type[:e_link.type.index("/")]
|
|
|
|
# if (e_link.rel == "enclosure" and
|
|
|
|
# media_type in ("audio", "image", "video")):
|
|
|
|
# media_link = e_link.href
|
|
|
|
# media_link = join_url(url, e_link.href)
|
|
|
|
# media_link = trim_url(media_link)
|
|
|
|
|
|
|
|
###########################################################
|
|
|
|
|
|
|
|
entry_properties = {
|
|
|
|
"identifier": entry_id,
|
|
|
|
"link": entry_link,
|
|
|
|
"href": entry_href,
|
|
|
|
"title": entry_title,
|
|
|
|
"title_type": entry_title_type,
|
|
|
|
'summary_text' : entry_summary_text,
|
|
|
|
'summary_lang' : entry_summary_lang,
|
|
|
|
'summary_type' : entry_summary_type,
|
|
|
|
'summary_base' : entry_summary_base,
|
|
|
|
'category' : entry_category,
|
|
|
|
"comments": entry_comments,
|
|
|
|
"rating": entry_rating,
|
|
|
|
"published": entry_published,
|
|
|
|
"updated": entry_updated,
|
|
|
|
"read_status": read_status
|
|
|
|
}
|
|
|
|
|
|
|
|
new_entries.extend([{
|
|
|
|
"entry_properties" : entry_properties,
|
|
|
|
"entry_authors" : entry_authors,
|
|
|
|
"entry_contributors" : entry_contributors,
|
|
|
|
"entry_contents" : entry_contents,
|
|
|
|
"entry_links" : entry_links,
|
|
|
|
"entry_tags" : entry_tags
|
|
|
|
}])
|
|
|
|
# await sqlite.add_entry(
|
|
|
|
# db_file, title, link, entry_id,
|
|
|
|
# url, date, read_status)
|
|
|
|
# await sqlite.set_date(db_file, url)
|
|
|
|
return new_entries
|
2024-01-06 23:03:08 +01:00
|
|
|
|
|
|
|
|
2024-01-13 18:17:43 +01:00
|
|
|
def get_document_title(data):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}'.format(function_name))
|
2024-01-13 18:17:43 +01:00
|
|
|
try:
|
|
|
|
document = Document(data)
|
|
|
|
title = document.short_title()
|
|
|
|
except:
|
|
|
|
document = BeautifulSoup(data, 'html.parser')
|
|
|
|
title = document.title.string
|
|
|
|
return title
|
|
|
|
|
|
|
|
|
2024-02-19 21:50:53 +01:00
|
|
|
def get_document_content(data):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}'.format(function_name))
|
2024-02-19 21:50:53 +01:00
|
|
|
try:
|
|
|
|
document = Document(data)
|
|
|
|
content = document.summary()
|
|
|
|
except:
|
|
|
|
document = BeautifulSoup(data, 'html.parser')
|
|
|
|
content = data
|
|
|
|
return content
|
|
|
|
|
|
|
|
|
|
|
|
def get_document_content_as_text(data):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}'.format(function_name))
|
2024-02-19 21:50:53 +01:00
|
|
|
try:
|
|
|
|
document = Document(data)
|
|
|
|
content = document.summary()
|
|
|
|
except:
|
|
|
|
document = BeautifulSoup(data, 'html.parser')
|
|
|
|
content = data
|
|
|
|
text = remove_html_tags(content)
|
|
|
|
return text
|
|
|
|
|
|
|
|
|
2024-02-07 01:26:42 +01:00
|
|
|
def generate_document(data, url, ext, filename, readability=False):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: url: {} ext: {} filename: {}'
|
|
|
|
.format(function_name, url, ext, filename))
|
2024-01-13 18:17:43 +01:00
|
|
|
error = None
|
2024-02-07 01:26:42 +01:00
|
|
|
if readability:
|
|
|
|
try:
|
|
|
|
document = Document(data)
|
|
|
|
content = document.summary()
|
|
|
|
except:
|
|
|
|
content = data
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.warning('Check that package readability is installed.')
|
2024-02-07 01:26:42 +01:00
|
|
|
else:
|
2024-01-13 18:17:43 +01:00
|
|
|
content = data
|
|
|
|
match ext:
|
2024-01-23 15:37:10 +01:00
|
|
|
case "epub":
|
2024-03-15 16:20:54 +01:00
|
|
|
filename = filename.split('.')
|
|
|
|
filename.pop()
|
|
|
|
filename = '.'.join(filename)
|
2024-01-26 12:34:07 +01:00
|
|
|
error = generate_epub(content, filename)
|
|
|
|
if error:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error(error)
|
|
|
|
# logger.error(
|
2024-01-26 12:34:07 +01:00
|
|
|
# "Check that packages xml2epub is installed, "
|
|
|
|
# "or try again.")
|
2024-01-13 18:17:43 +01:00
|
|
|
case "html":
|
|
|
|
generate_html(content, filename)
|
|
|
|
case "md":
|
|
|
|
try:
|
|
|
|
generate_markdown(content, filename)
|
|
|
|
except:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.warning('Check that package html2text '
|
2024-02-04 18:08:12 +01:00
|
|
|
'is installed, or try again.')
|
|
|
|
error = 'Package html2text was not found.'
|
2024-01-13 18:17:43 +01:00
|
|
|
case "pdf":
|
2024-01-26 12:34:07 +01:00
|
|
|
error = generate_pdf(content, filename)
|
|
|
|
if error:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error(error)
|
|
|
|
# logger.warning(
|
2024-01-26 12:34:07 +01:00
|
|
|
# "Check that packages pdfkit and wkhtmltopdf "
|
|
|
|
# "are installed, or try again.")
|
|
|
|
# error = (
|
|
|
|
# "Package pdfkit or wkhtmltopdf was not found.")
|
2024-01-23 15:37:10 +01:00
|
|
|
case "txt":
|
|
|
|
generate_txt(content, filename)
|
2024-01-13 18:17:43 +01:00
|
|
|
if error:
|
|
|
|
return error
|
2024-01-09 16:53:19 +01:00
|
|
|
|
2024-01-11 11:55:42 +01:00
|
|
|
# TODO Either adapt it to filename
|
|
|
|
# or change it to something else
|
|
|
|
#filename = document.title()
|
|
|
|
# with open(filename, 'w') as file:
|
|
|
|
# html_doc = document.summary()
|
|
|
|
# file.write(html_doc)
|
2024-01-09 16:53:19 +01:00
|
|
|
|
2024-01-11 11:55:42 +01:00
|
|
|
|
2024-01-13 18:17:43 +01:00
|
|
|
async def extract_image_from_feed(db_file, feed_id, url):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: db_file: {} feed_id: {} url: {}'
|
|
|
|
.format(function_name, db_file, feed_id, url))
|
2024-01-13 18:17:43 +01:00
|
|
|
feed_url = sqlite.get_feed_url(db_file, feed_id)
|
2024-02-04 18:08:12 +01:00
|
|
|
feed_url = feed_url[0]
|
2024-01-11 11:55:42 +01:00
|
|
|
result = await fetch.http(feed_url)
|
2024-02-18 00:21:44 +01:00
|
|
|
if not result['error']:
|
|
|
|
document = result['content']
|
2024-01-11 11:55:42 +01:00
|
|
|
feed = parse(document)
|
|
|
|
for entry in feed.entries:
|
2024-01-13 18:17:43 +01:00
|
|
|
try:
|
|
|
|
if entry.link == url:
|
|
|
|
for link in entry.links:
|
|
|
|
if (link.rel == "enclosure" and
|
|
|
|
link.type.startswith("image/")):
|
|
|
|
image_url = link.href
|
|
|
|
return image_url
|
|
|
|
except:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.error(url)
|
|
|
|
logger.error('AttributeError: object has no attribute "link"')
|
2024-01-11 11:55:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
async def extract_image_from_html(url):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: url: {}'.format(function_name, url))
|
2024-01-11 11:55:42 +01:00
|
|
|
result = await fetch.http(url)
|
2024-02-18 00:21:44 +01:00
|
|
|
if not result['error']:
|
|
|
|
data = result['content']
|
2024-01-11 11:55:42 +01:00
|
|
|
try:
|
|
|
|
document = Document(data)
|
|
|
|
content = document.summary()
|
|
|
|
except:
|
2024-01-13 18:17:43 +01:00
|
|
|
content = data
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.warning('Check that package readability is installed.')
|
2024-01-14 13:46:38 +01:00
|
|
|
tree = html.fromstring(content)
|
|
|
|
# TODO Exclude banners, class="share" links etc.
|
2024-01-17 15:36:28 +01:00
|
|
|
images = tree.xpath(
|
|
|
|
'//img[not('
|
|
|
|
'contains(@src, "avatar") or '
|
|
|
|
'contains(@src, "emoji") or '
|
|
|
|
'contains(@src, "icon") or '
|
|
|
|
'contains(@src, "logo") or '
|
2024-03-12 18:13:01 +01:00
|
|
|
'contains(@src, "letture") or '
|
2024-01-22 13:48:00 +01:00
|
|
|
'contains(@src, "search") or '
|
|
|
|
'contains(@src, "share") or '
|
2024-01-17 15:36:28 +01:00
|
|
|
'contains(@src, "smiley")'
|
|
|
|
')]/@src')
|
2024-01-14 13:46:38 +01:00
|
|
|
if len(images):
|
|
|
|
image = images[0]
|
|
|
|
image = str(image)
|
|
|
|
image_url = complete_url(url, image)
|
|
|
|
return image_url
|
2024-01-09 23:36:16 +01:00
|
|
|
|
2024-01-10 21:06:56 +01:00
|
|
|
|
2024-03-15 15:55:22 +01:00
|
|
|
def generate_epub(text, filename):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-15 15:55:22 +01:00
|
|
|
logger.debug('{}: text: {} pathname: {}'.format(function_name, text, filename))
|
2024-01-23 15:37:10 +01:00
|
|
|
## create an empty eBook
|
2024-03-15 15:55:22 +01:00
|
|
|
filename_list = filename.split("/")
|
|
|
|
file_title = filename_list.pop()
|
|
|
|
directory = "/".join(filename_list)
|
|
|
|
book = xml2epub.Epub(file_title)
|
2024-01-23 15:37:10 +01:00
|
|
|
## create chapters by url
|
|
|
|
# chapter0 = xml2epub.create_chapter_from_string(text, title=filename, strict=False)
|
|
|
|
chapter0 = xml2epub.create_chapter_from_string(text, strict=False)
|
|
|
|
#### create chapter objects
|
|
|
|
# chapter1 = xml2epub.create_chapter_from_url("https://dev.to/devteam/top-7-featured-dev-posts-from-the-past-week-h6h")
|
|
|
|
# chapter2 = xml2epub.create_chapter_from_url("https://dev.to/ks1912/getting-started-with-docker-34g6")
|
|
|
|
## add chapters to your eBook
|
2024-01-26 12:34:07 +01:00
|
|
|
try:
|
|
|
|
book.add_chapter(chapter0)
|
|
|
|
# book.add_chapter(chapter1)
|
|
|
|
# book.add_chapter(chapter2)
|
|
|
|
## generate epub file
|
2024-03-15 15:55:22 +01:00
|
|
|
book.create_epub(directory, absolute_location=filename)
|
2024-01-26 12:34:07 +01:00
|
|
|
except ValueError as error:
|
|
|
|
return error
|
|
|
|
|
2024-01-23 15:37:10 +01:00
|
|
|
|
|
|
|
|
2024-01-09 16:53:19 +01:00
|
|
|
def generate_html(text, filename):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: text: {} filename: {}'.format(function_name, text, filename))
|
2024-01-10 21:06:56 +01:00
|
|
|
with open(filename, 'w') as file:
|
|
|
|
file.write(text)
|
2024-01-09 16:53:19 +01:00
|
|
|
|
|
|
|
|
|
|
|
def generate_markdown(text, filename):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: text: {} filename: {}'.format(function_name, text, filename))
|
2024-01-10 21:06:56 +01:00
|
|
|
h2m = html2text.HTML2Text()
|
|
|
|
# Convert HTML to Markdown
|
|
|
|
markdown = h2m.handle(text)
|
|
|
|
with open(filename, 'w') as file:
|
|
|
|
file.write(markdown)
|
2024-01-09 16:53:19 +01:00
|
|
|
|
2024-01-06 23:03:08 +01:00
|
|
|
|
2024-01-23 15:37:10 +01:00
|
|
|
def generate_pdf(text, filename):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: text: {} filename: {}'.format(function_name, text, filename))
|
2024-01-26 12:34:07 +01:00
|
|
|
try:
|
|
|
|
pdfkit.from_string(text, filename)
|
|
|
|
except IOError as error:
|
|
|
|
return error
|
|
|
|
except OSError as error:
|
|
|
|
return error
|
2024-01-23 15:37:10 +01:00
|
|
|
|
|
|
|
|
|
|
|
def generate_txt(text, filename):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: text: {} filename: {}'.format(function_name, text, filename))
|
2024-01-23 15:37:10 +01:00
|
|
|
text = remove_html_tags(text)
|
|
|
|
with open(filename, 'w') as file:
|
|
|
|
file.write(text)
|
|
|
|
|
2024-04-14 14:56:45 +02:00
|
|
|
|
|
|
|
# This works too
|
|
|
|
# ''.join(xml.etree.ElementTree.fromstring(text).itertext())
|
2024-01-23 15:37:10 +01:00
|
|
|
def remove_html_tags(data):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}'.format(function_name))
|
2024-01-23 15:37:10 +01:00
|
|
|
data = BeautifulSoup(data, "lxml").text
|
|
|
|
data = data.replace("\n\n", "\n")
|
|
|
|
return data
|
|
|
|
|
2024-01-13 18:17:43 +01:00
|
|
|
# TODO Add support for eDonkey, Gnutella, Soulseek
|
|
|
|
async def get_magnet(link):
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: {}'.format(function_name, link))
|
2024-01-13 18:17:43 +01:00
|
|
|
parted_link = urlsplit(link)
|
|
|
|
queries = parse_qs(parted_link.query)
|
|
|
|
query_xt = queries["xt"][0]
|
|
|
|
if query_xt.startswith("urn:btih:"):
|
|
|
|
filename = queries["dn"][0]
|
|
|
|
checksum = query_xt[len("urn:btih:"):]
|
|
|
|
torrent = await fetch.magnet(link)
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.debug('Attempting to retrieve {} ({})'
|
|
|
|
.format(filename, checksum))
|
2024-01-13 18:17:43 +01:00
|
|
|
if not torrent:
|
2024-03-03 15:13:01 +01:00
|
|
|
logger.debug('Attempting to retrieve {} from HTTP caching service'
|
|
|
|
.format(filename))
|
2024-01-13 18:17:43 +01:00
|
|
|
urls = [
|
|
|
|
'https://watercache.libertycorp.org/get/{}/{}',
|
|
|
|
'https://itorrents.org/torrent/{}.torrent?title={}',
|
|
|
|
'https://firecache.libertycorp.org/get/{}/{}',
|
|
|
|
'http://fcache63sakpihd44kxdduy6kgpdhgejgp323wci435zwy6kiylcnfad.onion/get/{}/{}'
|
|
|
|
]
|
|
|
|
for url in urls:
|
|
|
|
torrent = fetch.http(url.format(checksum, filename))
|
|
|
|
if torrent:
|
|
|
|
break
|
|
|
|
return torrent
|
|
|
|
|
|
|
|
|
2024-03-07 15:52:51 +01:00
|
|
|
async def remove_nonexistent_entries(self, jid_bare, db_file, url, feed):
|
2024-01-04 13:38:22 +01:00
|
|
|
"""
|
|
|
|
Remove entries that don't exist in a given parsed feed.
|
|
|
|
Check the entries returned from feed and delete read non
|
|
|
|
existing entries, otherwise move to table archive, if unread.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
db_file : str
|
|
|
|
Path to database file.
|
2024-01-14 22:43:23 +01:00
|
|
|
url : str
|
|
|
|
Feed URL.
|
2024-01-04 13:38:22 +01:00
|
|
|
feed : list
|
|
|
|
Parsed feed document.
|
|
|
|
"""
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: db_file: {} url: {}'
|
2024-03-03 15:13:01 +01:00
|
|
|
.format(function_name, db_file, url))
|
2024-03-08 10:14:36 +01:00
|
|
|
feed_id = sqlite.get_feed_id(db_file, url)
|
2024-02-04 18:08:12 +01:00
|
|
|
feed_id = feed_id[0]
|
2024-03-08 10:14:36 +01:00
|
|
|
items = sqlite.get_entries_of_feed(db_file, feed_id)
|
2024-01-04 13:38:22 +01:00
|
|
|
entries = feed.entries
|
2024-03-12 18:13:01 +01:00
|
|
|
limit = Config.get_setting_value(self.settings, jid_bare, 'archive')
|
2024-01-04 13:38:22 +01:00
|
|
|
for item in items:
|
2024-01-14 22:43:23 +01:00
|
|
|
ix = item[0]
|
|
|
|
entry_title = item[1]
|
|
|
|
entry_link = item[2]
|
|
|
|
entry_id = item[3]
|
|
|
|
timestamp = item[4]
|
|
|
|
read_status = item[5]
|
2024-01-04 13:38:22 +01:00
|
|
|
valid = False
|
|
|
|
for entry in entries:
|
|
|
|
title = None
|
|
|
|
link = None
|
|
|
|
time = None
|
|
|
|
# valid = False
|
|
|
|
# TODO better check and don't repeat code
|
2024-01-14 22:43:23 +01:00
|
|
|
if entry.has_key("id") and entry_id:
|
|
|
|
if entry.id == entry_id:
|
2024-01-04 13:38:22 +01:00
|
|
|
# print("compare1:", entry.id)
|
2024-01-14 22:43:23 +01:00
|
|
|
# print("compare2:", entry_id)
|
2024-01-04 13:38:22 +01:00
|
|
|
# print("============")
|
|
|
|
valid = True
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
if entry.has_key("title"):
|
|
|
|
title = entry.title
|
|
|
|
else:
|
|
|
|
title = feed["feed"]["title"]
|
|
|
|
if entry.has_key("link"):
|
2024-01-06 23:03:08 +01:00
|
|
|
link = join_url(url, entry.link)
|
2024-01-04 13:38:22 +01:00
|
|
|
else:
|
2024-01-06 23:03:08 +01:00
|
|
|
link = url
|
2024-01-14 22:43:23 +01:00
|
|
|
if entry.has_key("published") and timestamp:
|
2024-01-04 13:38:22 +01:00
|
|
|
# print("compare11:", title, link, time)
|
2024-01-14 22:43:23 +01:00
|
|
|
# print("compare22:", entry_title, entry_link, timestamp)
|
2024-01-04 13:38:22 +01:00
|
|
|
# print("============")
|
2024-02-07 01:26:42 +01:00
|
|
|
time = dt.rfc2822_to_iso8601(entry.published)
|
2024-01-14 22:43:23 +01:00
|
|
|
if (entry_title == title and
|
|
|
|
entry_link == link and
|
|
|
|
timestamp == time):
|
2024-01-04 13:38:22 +01:00
|
|
|
valid = True
|
|
|
|
break
|
|
|
|
else:
|
2024-01-14 22:43:23 +01:00
|
|
|
if (entry_title == title and
|
|
|
|
entry_link == link):
|
2024-01-04 13:38:22 +01:00
|
|
|
# print("compare111:", title, link)
|
2024-01-14 22:43:23 +01:00
|
|
|
# print("compare222:", entry_title, entry_link)
|
2024-01-04 13:38:22 +01:00
|
|
|
# print("============")
|
|
|
|
valid = True
|
|
|
|
break
|
|
|
|
# TODO better check and don't repeat code
|
|
|
|
if not valid:
|
2024-01-14 22:43:23 +01:00
|
|
|
# print("id: ", ix)
|
2024-01-04 13:38:22 +01:00
|
|
|
# if title:
|
|
|
|
# print("title: ", title)
|
2024-01-14 22:43:23 +01:00
|
|
|
# print("entry_title: ", entry_title)
|
2024-01-04 13:38:22 +01:00
|
|
|
# if link:
|
|
|
|
# print("link: ", link)
|
2024-01-14 22:43:23 +01:00
|
|
|
# print("entry_link: ", entry_link)
|
2024-01-04 13:38:22 +01:00
|
|
|
# if entry.id:
|
|
|
|
# print("last_entry:", entry.id)
|
2024-01-14 22:43:23 +01:00
|
|
|
# print("entry_id: ", entry_id)
|
2024-01-04 13:38:22 +01:00
|
|
|
# if time:
|
|
|
|
# print("time: ", time)
|
2024-01-14 22:43:23 +01:00
|
|
|
# print("timestamp: ", timestamp)
|
|
|
|
# print("read: ", read_status)
|
2024-01-04 13:38:22 +01:00
|
|
|
# breakpoint()
|
|
|
|
|
|
|
|
# TODO Send to table archive
|
|
|
|
# TODO Also make a regular/routine check for sources that
|
|
|
|
# have been changed (though that can only happen when
|
|
|
|
# manually editing)
|
2024-01-14 22:43:23 +01:00
|
|
|
# ix = item[0]
|
2024-01-04 13:38:22 +01:00
|
|
|
# print(">>> SOURCE: ", source)
|
2024-01-14 22:43:23 +01:00
|
|
|
# print(">>> INVALID:", entry_title)
|
|
|
|
# print("title:", entry_title)
|
|
|
|
# print("link :", entry_link)
|
|
|
|
# print("id :", entry_id)
|
|
|
|
if read_status == 1:
|
2024-01-06 23:03:08 +01:00
|
|
|
await sqlite.delete_entry_by_id(db_file, ix)
|
2024-01-14 22:43:23 +01:00
|
|
|
# print(">>> DELETING:", entry_title)
|
2024-01-04 13:38:22 +01:00
|
|
|
else:
|
2024-01-14 22:43:23 +01:00
|
|
|
# print(">>> ARCHIVING:", entry_title)
|
2024-01-06 23:03:08 +01:00
|
|
|
await sqlite.archive_entry(db_file, ix)
|
2024-01-04 13:38:22 +01:00
|
|
|
await sqlite.maintain_archive(db_file, limit)
|
2024-01-20 18:28:31 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
2024-03-07 15:52:51 +01:00
|
|
|
async def remove_nonexistent_entries_json(self, jid_bare, db_file, url, feed):
|
2024-01-20 18:28:31 +01:00
|
|
|
"""
|
|
|
|
Remove entries that don't exist in a given parsed feed.
|
|
|
|
Check the entries returned from feed and delete read non
|
|
|
|
existing entries, otherwise move to table archive, if unread.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
db_file : str
|
|
|
|
Path to database file.
|
|
|
|
url : str
|
|
|
|
Feed URL.
|
|
|
|
feed : list
|
|
|
|
Parsed feed document.
|
|
|
|
"""
|
2024-03-03 15:13:01 +01:00
|
|
|
function_name = sys._getframe().f_code.co_name
|
2024-03-04 11:16:49 +01:00
|
|
|
logger.debug('{}: db_file: {}: url: {}'
|
2024-03-03 15:13:01 +01:00
|
|
|
.format(function_name, db_file, url))
|
2024-03-08 10:14:36 +01:00
|
|
|
feed_id = sqlite.get_feed_id(db_file, url)
|
2024-02-04 18:08:12 +01:00
|
|
|
feed_id = feed_id[0]
|
2024-03-08 10:14:36 +01:00
|
|
|
items = sqlite.get_entries_of_feed(db_file, feed_id)
|
2024-01-20 18:28:31 +01:00
|
|
|
entries = feed["items"]
|
2024-03-12 18:13:01 +01:00
|
|
|
limit = Config.get_setting_value(self.settings, jid_bare, 'archive')
|
2024-01-20 18:28:31 +01:00
|
|
|
for item in items:
|
|
|
|
ix = item[0]
|
|
|
|
entry_title = item[1]
|
|
|
|
entry_link = item[2]
|
|
|
|
entry_id = item[3]
|
|
|
|
timestamp = item[4]
|
|
|
|
read_status = item[5]
|
|
|
|
valid = False
|
|
|
|
for entry in entries:
|
|
|
|
title = None
|
|
|
|
link = None
|
|
|
|
time = None
|
|
|
|
# valid = False
|
|
|
|
# TODO better check and don't repeat code
|
|
|
|
if entry.has_key("id") and entry_id:
|
|
|
|
if entry["id"] == entry_id:
|
|
|
|
# print("compare1:", entry.id)
|
|
|
|
# print("compare2:", entry_id)
|
|
|
|
# print("============")
|
|
|
|
valid = True
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
if entry.has_key("title"):
|
|
|
|
title = entry["title"]
|
|
|
|
else:
|
|
|
|
title = feed["title"]
|
|
|
|
if entry.has_key("link"):
|
|
|
|
link = join_url(url, entry["link"])
|
|
|
|
else:
|
|
|
|
link = url
|
|
|
|
# "date_published" "date_modified"
|
|
|
|
if entry.has_key("date_published") and timestamp:
|
2024-02-07 01:26:42 +01:00
|
|
|
time = dt.rfc2822_to_iso8601(entry["date_published"])
|
2024-01-20 18:28:31 +01:00
|
|
|
if (entry_title == title and
|
|
|
|
entry_link == link and
|
|
|
|
timestamp == time):
|
|
|
|
valid = True
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
if (entry_title == title and
|
|
|
|
entry_link == link):
|
|
|
|
valid = True
|
|
|
|
break
|
|
|
|
if not valid:
|
|
|
|
print("CHECK ENTRY OF JSON FEED IN ARCHIVE")
|
|
|
|
if read_status == 1:
|
|
|
|
await sqlite.delete_entry_by_id(db_file, ix)
|
|
|
|
else:
|
|
|
|
await sqlite.archive_entry(db_file, ix)
|
2024-02-18 00:21:44 +01:00
|
|
|
await sqlite.maintain_archive(db_file, limit)
|