Fix keywords extracted from sqlite.

Improve modiles fetch and crawl.
Add form featured feeds.
Add form roster manager.
Add form subscibers manager.
WIP
This commit is contained in:
Schimon Jehudah 2024-02-17 23:21:44 +00:00
parent c1dec9d808
commit 7b98d32d7f
9 changed files with 1543 additions and 456 deletions

View file

@ -498,22 +498,17 @@ def list_unread_entries(result, feed_title):
link = result[2]
link = remove_tracking_parameters(link)
link = (replace_hostname(link, "link")) or link
news_item = (
"\n{}\n{}\n{} [{}]\n"
).format(
str(title), str(link), str(feed_title), str(ix)
)
news_item = ("\n{}\n{}\n{} [{}]\n").format(str(title), str(link),
str(feed_title), str(ix))
return news_item
def list_search_results(query, results):
message = (
"Search results for '{}':\n\n```"
).format(query)
message = ("Search results for '{}':\n\n```"
.format(query))
for result in results:
message += (
"\n{}\n{}\n"
).format(str(result[0]), str(result[1]))
message += ("\n{}\n{}\n"
.format(str(result[0]), str(result[1])))
if len(results):
message += "```\nTotal of {} results".format(len(results))
else:
@ -523,15 +518,13 @@ def list_search_results(query, results):
def list_feeds_by_query(db_file, query):
results = sqlite.search_feeds(db_file, query)
message = (
'Feeds containing "{}":\n\n```'
.format(query))
message = ('Feeds containing "{}":\n\n```'
.format(query))
for result in results:
message += (
'\nName : {} [{}]'
'\nURL : {}'
'\n'
.format(str(result[0]), str(result[1]), str(result[2])))
message += ('\nName : {} [{}]'
'\nURL : {}'
'\n'
.format(str(result[0]), str(result[1]), str(result[2])))
if len(results):
message += "\n```\nTotal of {} feeds".format(len(results))
else:
@ -607,6 +600,15 @@ def list_last_entries(results, num):
return message
def pick_a_feed(lang=None):
config_dir = config.get_default_config_directory()
with open(config_dir + '/' + 'feeds.toml', mode="rb") as feeds:
urls = tomllib.load(feeds)
import random
url = random.choice(urls['feeds'])
return url
def list_feeds(results):
message = "\nList of subscriptions:\n\n```\n"
for result in results:
@ -621,10 +623,11 @@ def list_feeds(results):
message += ('```\nTotal of {} subscriptions.\n'
.format(len(results)))
else:
message = ('List of subscriptions is empty.\n'
'To add feed, send a URL\n'
'Featured feed: '
'https://reclaimthenet.org/feed/')
url = pick_a_feed()
message = ('List of subscriptions is empty. To add a feed, send a URL.'
'Featured feed:\n*{}*\n{}'
.format(url['name'],
url['link']))
return message
@ -678,8 +681,8 @@ def export_to_opml(jid, filename, results):
async def import_opml(db_file, url):
result = await fetch.http(url)
document = result[0]
if document:
if not result['error']:
document = result['content']
root = ET.fromstring(document)
before = await sqlite.get_number_of_items(
db_file, 'feeds')
@ -703,9 +706,9 @@ async def add_feed(db_file, url):
exist = await sqlite.get_feed_id_and_name(db_file, url)
if not exist:
result = await fetch.http(url)
document = result[0]
status_code = result[1]
if document:
if not result['error']:
document = result['content']
status_code = result['status_code']
feed = parse(document)
# if is_feed(url, feed):
if is_feed(feed):
@ -745,7 +748,7 @@ async def add_feed(db_file, url):
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
await sqlite.mark_feed_as_read(db_file, feed_id)
result_final = {'url' : url,
result_final = {'link' : url,
'index' : feed_id,
'name' : title,
'code' : status_code,
@ -795,7 +798,7 @@ async def add_feed(db_file, url):
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
await sqlite.mark_feed_as_read(db_file, feed_id)
result_final = {'url' : url,
result_final = {'link' : url,
'index' : feed_id,
'name' : title,
'code' : status_code,
@ -808,15 +811,26 @@ async def add_feed(db_file, url):
else:
# NOTE Do not be tempted to return a compact dictionary.
# That is, dictionary within dictionary
# Return multimple dictionaries.
# Return multiple dictionaries in a list or tuple.
result = await crawl.probe_page(url, document)
if isinstance(result, list):
if not result:
# Get out of the loop with dict indicating error.
result_final = {'link' : url,
'index' : None,
'name' : None,
'code' : status_code,
'error' : True,
'exist' : False}
break
elif isinstance(result, list):
# Get out of the loop and deliver a list of dicts.
result_final = result
break
else:
url = result['url']
# Go back up to the while loop and try again.
url = result['link']
else:
result_final = {'url' : url,
result_final = {'link' : url,
'index' : None,
'name' : None,
'code' : status_code,
@ -828,7 +842,7 @@ async def add_feed(db_file, url):
else:
ix = exist[0]
name = exist[1]
result_final = {'url' : url,
result_final = {'link' : url,
'index' : ix,
'name' : name,
'code' : None,
@ -854,145 +868,142 @@ async def scan_json(db_file, url):
"""
if isinstance(url, tuple): url = url[0]
result = await fetch.http(url)
try:
document = result[0]
status = result[1]
except:
return
new_entries = []
if document and status == 200:
feed = json.loads(document)
entries = feed["items"]
await remove_nonexistent_entries_json(
db_file, url, feed)
try:
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
# await sqlite.update_feed_validity(
# db_file, feed_id, valid)
if "date_published" in feed.keys():
updated = feed["date_published"]
try:
updated = dt.convert_struct_time_to_iso8601(updated)
except:
if not result['error']:
document = result['content']
status = result['status_code']
new_entries = []
if document and status == 200:
feed = json.loads(document)
entries = feed["items"]
await remove_nonexistent_entries_json(
db_file, url, feed)
try:
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
# await sqlite.update_feed_validity(
# db_file, feed_id, valid)
if "date_published" in feed.keys():
updated = feed["date_published"]
try:
updated = dt.convert_struct_time_to_iso8601(updated)
except:
updated = ''
else:
updated = ''
else:
updated = ''
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
await sqlite.update_feed_properties(
db_file, feed_id, len(feed["items"]), updated)
# await update_feed_status
except (
IncompleteReadError,
IncompleteRead,
error.URLError
) as e:
logging.error(e)
return
# new_entry = 0
for entry in entries:
if "date_published" in entry.keys():
date = entry["date_published"]
date = dt.rfc2822_to_iso8601(date)
elif "date_modified" in entry.keys():
date = entry["date_modified"]
date = dt.rfc2822_to_iso8601(date)
else:
date = dt.now()
if "url" in entry.keys():
# link = complete_url(source, entry.link)
link = join_url(url, entry["url"])
link = trim_url(link)
else:
link = url
# title = feed["feed"]["title"]
# title = "{}: *{}*".format(feed["feed"]["title"], entry.title)
title = entry["title"] if "title" in entry.keys() else date
entry_id = entry["id"] if "id" in entry.keys() else link
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
exist = await sqlite.check_entry_exist(
db_file, feed_id, entry_id=entry_id,
title=title, link=link, date=date)
if not exist:
summary = entry["summary"] if "summary" in entry.keys() else ''
if not summary:
summary = (entry["content_html"]
if "content_html" in entry.keys()
else '')
if not summary:
summary = (entry["content_text"]
if "content_text" in entry.keys()
else '')
read_status = 0
pathname = urlsplit(link).path
string = (
"{} {} {}"
).format(
title, summary, pathname)
allow_list = config.is_include_keyword(db_file, "allow",
string)
if not allow_list:
reject_list = config.is_include_keyword(db_file, "deny",
string)
if reject_list:
read_status = 1
logging.debug('Rejected : {}'
'\n'
'Keyword : {}'
.format(link, reject_list))
if isinstance(date, int):
logging.error('Variable "date" is int: {}'.format(date))
media_link = ''
if "attachments" in entry.keys():
for e_link in entry["attachments"]:
try:
# if (link.rel == "enclosure" and
# (link.type.startswith("audio/") or
# link.type.startswith("image/") or
# link.type.startswith("video/"))
# ):
media_type = e_link["mime_type"][:e_link["mime_type"].index("/")]
if media_type in ("audio", "image", "video"):
media_link = e_link["url"]
media_link = join_url(url, e_link["url"])
media_link = trim_url(media_link)
break
except:
logging.info('KeyError: "url"\n'
'Missing "url" attribute for {}'
.format(url))
logging.info('Continue scanning for next '
'potential enclosure of {}'
.format(link))
entry = {
"title": title,
"link": link,
"enclosure": media_link,
"entry_id": entry_id,
"date": date,
"read_status": read_status
}
new_entries.extend([entry])
# await sqlite.add_entry(
# db_file, title, link, entry_id,
# url, date, read_status)
# await sqlite.set_date(db_file, url)
if len(new_entries):
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
await sqlite.update_feed_properties(
db_file, feed_id, len(feed["items"]), updated)
# await update_feed_status
except (
IncompleteReadError,
IncompleteRead,
error.URLError
) as e:
logging.error(e)
return
# new_entry = 0
for entry in entries:
if "date_published" in entry.keys():
date = entry["date_published"]
date = dt.rfc2822_to_iso8601(date)
elif "date_modified" in entry.keys():
date = entry["date_modified"]
date = dt.rfc2822_to_iso8601(date)
else:
date = dt.now()
if "url" in entry.keys():
# link = complete_url(source, entry.link)
link = join_url(url, entry["url"])
link = trim_url(link)
else:
link = url
# title = feed["feed"]["title"]
# title = "{}: *{}*".format(feed["feed"]["title"], entry.title)
title = entry["title"] if "title" in entry.keys() else date
entry_id = entry["id"] if "id" in entry.keys() else link
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
exist = await sqlite.check_entry_exist(
db_file, feed_id, entry_id=entry_id,
title=title, link=link, date=date)
if not exist:
summary = entry["summary"] if "summary" in entry.keys() else ''
if not summary:
summary = (entry["content_html"]
if "content_html" in entry.keys()
else '')
if not summary:
summary = (entry["content_text"]
if "content_text" in entry.keys()
else '')
read_status = 0
pathname = urlsplit(link).path
string = (
"{} {} {}"
).format(
title, summary, pathname)
allow_list = await config.is_include_keyword(
db_file, "allow", string)
if not allow_list:
reject_list = await config.is_include_keyword(
db_file, "deny", string)
if reject_list:
read_status = 1
logging.debug(
"Rejected : {}\n"
"Keyword : {}".format(
link, reject_list))
if isinstance(date, int):
logging.error(
"Variable 'date' is int: {}".format(date))
media_link = ''
if "attachments" in entry.keys():
for e_link in entry["attachments"]:
try:
# if (link.rel == "enclosure" and
# (link.type.startswith("audio/") or
# link.type.startswith("image/") or
# link.type.startswith("video/"))
# ):
media_type = e_link["mime_type"][:e_link["mime_type"].index("/")]
if media_type in ("audio", "image", "video"):
media_link = e_link["url"]
media_link = join_url(url, e_link["url"])
media_link = trim_url(media_link)
break
except:
logging.info('KeyError: "url"\n'
'Missing "url" attribute for {}'
.format(url))
logging.info('Continue scanning for next '
'potential enclosure of {}'
.format(link))
entry = {
"title": title,
"link": link,
"enclosure": media_link,
"entry_id": entry_id,
"date": date,
"read_status": read_status
}
new_entries.extend([entry])
# await sqlite.add_entry(
# db_file, title, link, entry_id,
# url, date, read_status)
# await sqlite.set_date(db_file, url)
if len(new_entries):
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
await sqlite.add_entries_and_update_timestamp(
db_file, feed_id, new_entries)
await sqlite.add_entries_and_update_timestamp(db_file, feed_id,
new_entries)
async def view_feed(url):
while True:
result = await fetch.http(url)
document = result[0]
status = result[1]
if document:
if not result['error']:
document = result['content']
status = result['status_code']
feed = parse(document)
# if is_feed(url, feed):
if is_feed(feed):
@ -1023,13 +1034,12 @@ async def view_feed(url):
date = dt.rfc2822_to_iso8601(date)
else:
date = "*** No date ***"
response += (
"Title : {}\n"
"Date : {}\n"
"Link : {}\n"
"Count : {}\n"
"\n"
).format(title, date, link, counter)
response += ("Title : {}\n"
"Date : {}\n"
"Link : {}\n"
"Count : {}\n"
"\n"
.format(title, date, link, counter))
if counter > 4:
break
response += (
@ -1037,8 +1047,7 @@ async def view_feed(url):
).format(url)
break
else:
result = await crawl.probe_page(
url, document)
result = await crawl.probe_page(url, document)
if isinstance(result, str):
response = result
break
@ -1054,9 +1063,9 @@ async def view_feed(url):
async def view_entry(url, num):
while True:
result = await fetch.http(url)
document = result[0]
status = result[1]
if document:
if not result['error']:
document = result['content']
status = result['status_code']
feed = parse(document)
# if is_feed(url, feed):
if is_feed(feed):
@ -1094,19 +1103,17 @@ async def view_entry(url, num):
link = trim_url(link)
else:
link = "*** No link ***"
response = (
"{}\n"
"\n"
# "> {}\n"
"{}\n"
"\n"
"{}\n"
"\n"
).format(title, summary, link)
response = ("{}\n"
"\n"
# "> {}\n"
"{}\n"
"\n"
"{}\n"
"\n"
.format(title, summary, link))
break
else:
result = await crawl.probe_page(
url, document)
result = await crawl.probe_page(url, document)
if isinstance(result, str):
response = result
break
@ -1119,6 +1126,7 @@ async def view_entry(url, num):
return response
# TODO Rename function name (idea: scan_and_populate)
async def scan(db_file, url):
"""
Check feeds for new entries.
@ -1132,142 +1140,136 @@ async def scan(db_file, url):
"""
if isinstance(url, tuple): url = url[0]
result = await fetch.http(url)
try:
document = result[0]
status = result[1]
except:
return
new_entries = []
if document and status == 200:
feed = parse(document)
entries = feed.entries
# length = len(entries)
await remove_nonexistent_entries(
db_file, url, feed)
try:
if feed.bozo:
# bozo = (
# "WARNING: Bozo detected for feed: {}\n"
# "For more information, visit "
# "https://pythonhosted.org/feedparser/bozo.html"
# ).format(url)
# print(bozo)
valid = 0
else:
valid = 1
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
await sqlite.update_feed_validity(
db_file, feed_id, valid)
if "updated_parsed" in feed["feed"].keys():
updated = feed["feed"]["updated_parsed"]
try:
updated = dt.convert_struct_time_to_iso8601(updated)
except:
if not result['error']:
document = result['content']
status = result['status_code']
new_entries = []
if document and status == 200:
feed = parse(document)
entries = feed.entries
# length = len(entries)
await remove_nonexistent_entries(db_file, url, feed)
try:
if feed.bozo:
# bozo = (
# "WARNING: Bozo detected for feed: {}\n"
# "For more information, visit "
# "https://pythonhosted.org/feedparser/bozo.html"
# ).format(url)
# print(bozo)
valid = 0
else:
valid = 1
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
await sqlite.update_feed_validity(
db_file, feed_id, valid)
if "updated_parsed" in feed["feed"].keys():
updated = feed["feed"]["updated_parsed"]
try:
updated = dt.convert_struct_time_to_iso8601(updated)
except:
updated = ''
else:
updated = ''
else:
updated = ''
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
await sqlite.update_feed_properties(db_file, feed_id,
len(feed["entries"]), updated)
# await update_feed_status
except (IncompleteReadError, IncompleteRead, error.URLError) as e:
logging.error(e)
return
# new_entry = 0
for entry in entries:
if entry.has_key("published"):
date = entry.published
date = dt.rfc2822_to_iso8601(date)
elif entry.has_key("updated"):
date = entry.updated
date = dt.rfc2822_to_iso8601(date)
else:
date = dt.now()
if entry.has_key("link"):
# link = complete_url(source, entry.link)
link = join_url(url, entry.link)
link = trim_url(link)
else:
link = url
# title = feed["feed"]["title"]
# title = "{}: *{}*".format(feed["feed"]["title"], entry.title)
title = entry.title if entry.has_key("title") else date
entry_id = entry.id if entry.has_key("id") else link
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
exist = await sqlite.check_entry_exist(db_file, feed_id,
entry_id=entry_id,
title=title, link=link,
date=date)
if not exist:
summary = entry.summary if entry.has_key("summary") else ''
read_status = 0
pathname = urlsplit(link).path
string = (
"{} {} {}"
).format(
title, summary, pathname)
allow_list = config.is_include_keyword(db_file, "allow",
string)
if not allow_list:
reject_list = config.is_include_keyword(db_file, "deny",
string)
if reject_list:
read_status = 1
logging.debug('Rejected : {}'
'\n'
'Keyword : {}'.format(link,
reject_list))
if isinstance(date, int):
logging.error('Variable "date" is int: {}'
.format(date))
media_link = ''
if entry.has_key("links"):
for e_link in entry.links:
try:
# if (link.rel == "enclosure" and
# (link.type.startswith("audio/") or
# link.type.startswith("image/") or
# link.type.startswith("video/"))
# ):
media_type = e_link.type[:e_link.type.index("/")]
if e_link.has_key("rel"):
if (e_link.rel == "enclosure" and
media_type in ("audio", "image", "video")):
media_link = e_link.href
media_link = join_url(url, e_link.href)
media_link = trim_url(media_link)
break
except:
logging.info('KeyError: "href"\n'
'Missing "href" attribute for {}'
.format(url))
logging.info('Continue scanning for next '
'potential enclosure of {}'
.format(link))
entry = {
"title": title,
"link": link,
"enclosure": media_link,
"entry_id": entry_id,
"date": date,
"read_status": read_status
}
new_entries.extend([entry])
# await sqlite.add_entry(
# db_file, title, link, entry_id,
# url, date, read_status)
# await sqlite.set_date(db_file, url)
if len(new_entries):
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
await sqlite.update_feed_properties(
db_file, feed_id, len(feed["entries"]), updated)
# await update_feed_status
except (
IncompleteReadError,
IncompleteRead,
error.URLError
) as e:
logging.error(e)
return
# new_entry = 0
for entry in entries:
if entry.has_key("published"):
date = entry.published
date = dt.rfc2822_to_iso8601(date)
elif entry.has_key("updated"):
date = entry.updated
date = dt.rfc2822_to_iso8601(date)
else:
date = dt.now()
if entry.has_key("link"):
# link = complete_url(source, entry.link)
link = join_url(url, entry.link)
link = trim_url(link)
else:
link = url
# title = feed["feed"]["title"]
# title = "{}: *{}*".format(feed["feed"]["title"], entry.title)
title = entry.title if entry.has_key("title") else date
entry_id = entry.id if entry.has_key("id") else link
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
exist = await sqlite.check_entry_exist(
db_file, feed_id, entry_id=entry_id,
title=title, link=link, date=date)
if not exist:
summary = entry.summary if entry.has_key("summary") else ''
read_status = 0
pathname = urlsplit(link).path
string = (
"{} {} {}"
).format(
title, summary, pathname)
allow_list = await config.is_include_keyword(
db_file, "allow", string)
if not allow_list:
reject_list = await config.is_include_keyword(
db_file, "deny", string)
if reject_list:
read_status = 1
logging.debug(
"Rejected : {}\n"
"Keyword : {}".format(
link, reject_list))
if isinstance(date, int):
logging.error('Variable "date" is int: {}'
.format(date))
media_link = ''
if entry.has_key("links"):
for e_link in entry.links:
try:
# if (link.rel == "enclosure" and
# (link.type.startswith("audio/") or
# link.type.startswith("image/") or
# link.type.startswith("video/"))
# ):
media_type = e_link.type[:e_link.type.index("/")]
if e_link.has_key("rel"):
if (e_link.rel == "enclosure" and
media_type in ("audio", "image", "video")):
media_link = e_link.href
media_link = join_url(url, e_link.href)
media_link = trim_url(media_link)
break
except:
logging.info('KeyError: "href"\n'
'Missing "href" attribute for {}'
.format(url))
logging.info('Continue scanning for next '
'potential enclosure of {}'
.format(link))
entry = {
"title": title,
"link": link,
"enclosure": media_link,
"entry_id": entry_id,
"date": date,
"read_status": read_status
}
new_entries.extend([entry])
# await sqlite.add_entry(
# db_file, title, link, entry_id,
# url, date, read_status)
# await sqlite.set_date(db_file, url)
if len(new_entries):
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
await sqlite.add_entries_and_update_timestamp(
db_file, feed_id, new_entries)
await sqlite.add_entries_and_update_timestamp(db_file, feed_id,
new_entries)
async def download_document(self, message, jid, jid_file, message_text, ix_url,
@ -1286,8 +1288,7 @@ async def download_document(self, message, jid, jid_file, message_text, ix_url,
status_type = 'dnd'
status_message = ('📃️ Procesing request to produce {} document...'
.format(ext.upper()))
XmppPresence.send(self, jid, status_message,
status_type=status_type)
XmppPresence.send(self, jid, status_message, status_type=status_type)
db_file = config.get_pathname_to_database(jid_file)
cache_dir = config.get_default_cache_directory()
if ix_url:
@ -1313,9 +1314,9 @@ async def download_document(self, message, jid, jid_file, message_text, ix_url,
logging.info('Processed URL (replace hostname): {}'
.format(url))
result = await fetch.http(url)
data = result[0]
code = result[1]
if data:
if not result['error']:
data = result['content']
code = result['status_code']
title = get_document_title(data)
title = title.strip().lower()
for i in (' ', '-'):
@ -1332,8 +1333,7 @@ async def download_document(self, message, jid, jid_file, message_text, ix_url,
'Failed to export {}. Reason: {}'
.format(url, ext.upper(), error))
else:
url = await XmppUpload.start(self, jid,
filename)
url = await XmppUpload.start(self, jid, filename)
chat_type = await get_chat_type(self, jid)
XmppMessage.send_oob(self, jid, url, chat_type)
else:
@ -1416,8 +1416,8 @@ async def extract_image_from_feed(db_file, feed_id, url):
feed_url = sqlite.get_feed_url(db_file, feed_id)
feed_url = feed_url[0]
result = await fetch.http(feed_url)
document = result[0]
if document:
if not result['error']:
document = result['content']
feed = parse(document)
for entry in feed.entries:
try:
@ -1434,8 +1434,8 @@ async def extract_image_from_feed(db_file, feed_id, url):
async def extract_image_from_html(url):
result = await fetch.http(url)
data = result[0]
if data:
if not result['error']:
data = result['content']
try:
document = Document(data)
content = document.summary()
@ -1723,4 +1723,4 @@ async def remove_nonexistent_entries_json(db_file, url, feed):
else:
await sqlite.archive_entry(db_file, ix)
limit = config.get_setting_value(db_file, "archive")
await sqlite.maintain_archive(db_file, limit)
await sqlite.maintain_archive(db_file, limit)

840
slixfeed/assets/feeds.toml Normal file
View file

@ -0,0 +1,840 @@
# This is a default set of featured feeds allocated by locality (i.e. language code).
# Copy this file to ~/.config/slixfeed/feeds.toml if you want to modify the list.
# NOTE <presence xml:lang="fr"></presence>
# TODO Consider splitting into files feeds-cz.toml, feeds-de.toml, feeds-el.toml.
[[feeds]]
lang = "cs-cz"
name = "Česká pirátská strana"
link = "https://www.pirati.cz/feeds/atom/"
tags = ["czech", "party", "pirate"]
[[feeds]]
lang = "cs-cz"
name = "Paralelní Polis"
link = "https://www.paralelnipolis.cz/feed/"
tags = ["communication", "czech", "hackerspace", "hardware", "internet", "makers", "technology", "telecom"]
[[feeds]]
lang = "de-at"
name = "Alle Neuigkeiten von jotwewe.de"
link = "http://www.jotwewe.de/alles_de.xml"
tags = ["jabber", "jabrss", "xmpp", "syndication"]
[[feeds]]
lang = "de-at"
name = "Piratenpartei Österreichs"
link = "https://piratenpartei.at/feed/"
tags = ["austria", "party", "pirate"]
[[feeds]]
lang = "de-ch"
name = "Chaos Computer Club Basel"
link = "https://ccc-basel.ch/feeds/all.atom.xml"
tags = ["ccc", "club", "computer", "switzerland"]
[[feeds]]
lang = "de-ch"
name = "GNU/Linux.ch"
link = "https://gnulinux.ch/rss.xml"
tags = ["computer", "industry", "computer", "linux", "mobile", "pda", "switzerland"]
[[feeds]]
lang = "de-ch"
name = "Piratenpartei Schweiz"
link = "https://www.piratenpartei.ch/feed/"
tags = ["party", "pirate", "switzerland"]
[[feeds]]
lang = "de-de"
name = "0d - Zeroday"
link = "https://0x0d.de/feed/mp3/"
tags = ["computer", "germany", "podcast"]
[[feeds]]
lang = "de-de"
name = "Berlin XMPP Meetup"
link = "https://mov.im/?feed/pubsub.movim.eu/berlin-xmpp-meetup"
tags = ["event", "germany", "xmpp"]
[[feeds]]
lang = "de-de"
name = "CCC Event Blog"
link = "https://events.ccc.de/feed"
tags = ["ccc", "club", "event"]
[[feeds]]
lang = "de-de"
name = "Chaos Computer Club: Updates"
link = "https://www.ccc.de/de/rss/updates.rdf"
tags = ["ccc", "news", "politics", "privacy", "surveillance", "technology", "germany"]
[[feeds]]
lang = "de-de"
name = "classless Kulla"
link = "https://www.classless.org/feed/atom/"
tags = ["europe ,germany ,history ,literature ,war"]
[[feeds]]
lang = "de-de"
name = "Digitalcourage"
link = "https://digitalcourage.de/rss.xml"
tags = ["culture", "digital", "germany"]
[[feeds]]
lang = "de-de"
name = "FSFE Events"
link = "https://fsfe.org/events/events.de.rss"
tags = ["computer", "events", "germany", "internet", "technology"]
[[feeds]]
lang = "de-de"
name = "FSFE News"
link = "https://fsfe.org/news/news.de.rss"
tags = ["computer", "events", "germany", "internet", "privacy", "technology"]
[[feeds]]
lang = "de-de"
name = "Jabber.de"
link = "https://www.jabber.de/?feed=rss2"
tags = ["germany", "jabber", "telecom", "xmpp"]
[[feeds]]
lang = "de-de"
name = "KI-Verband"
link = "https://ki-verband.de/feed/"
tags = ["ai", "germany", "machine", "model", "learning", "technology"]
[[feeds]]
lang = "de-de"
name = "media.ccc.de"
link = "https://media.ccc.de/news.atom"
tags = ["ccc", "technology", "video"]
[[feeds]]
lang = "de-de"
name = "pimux.de"
link = "https://pimux.de/blog.rss"
tags = ["germany", "jabber", "mastodon", "telecom", "xmpp"]
[[feeds]]
lang = "de-de"
name = "Sovereign Tech Fund"
link = "https://www.sovereigntechfund.de/de/feed.rss"
tags = ["germany", "technology", "open source"]
[[feeds]]
lang = "de-de"
name = "WinFuture News"
link = "https://static.winfuture.de/feeds/WinFuture-News-rss2.0.xml"
tags = ["computer", "germany", "technology", "germany"]
[[feeds]]
lang = "el-gr"
name = "Ηρακλής Παπαθεοδώρου • heracl.es"
link = "https://heracl.es/el/feed.xml"
tags = ["computer", "electronics", "greece", "technology"]
[[feeds]]
lang = "el-gr"
name = "Κόμμα Πειρατών Ελλάδας Pirate party of Greece"
link = "https://www.pirateparty.gr/feed/"
tags = ["greece", "party", "pirate"]
[[feeds]]
lang = "en-au"
name = "Pirate Party Australia"
link = "https://pirateparty.org.au/feed/podcast/"
tags = ["australia", "party", "pirate"]
[[feeds]]
lang = "en-ca"
name = "CAFE - The Canadian Association for Free Expression"
link = "http://cafe.nfshost.com/?feed=rss2"
tags = ["canada", "freedom", "government", "immigration", "society", "speech"]
[[feeds]]
lang = "en-ca"
name = "Christian Faith and Copyfreedom"
link = "https://singpolyma.net/2015/07/christian-faith-and-copyfreedom/feed/"
tags = ["christianity", "copy", "freedom", "religion", "software", "technology"]
[[feeds]]
lang = "en-ca"
name = "blog.jmp.chat's blog"
link = "https://blog.jmp.chat/atom.xml"
tags = ["jmp", "service", "sms", "telecom", "xmpp"]
[[feeds]]
lang = "en-ca"
name = "RantMedia Forum"
link = "https://smf.rantradio.com/index.php?type=atom;action=.xml"
tags = ["canada", "forum", "politics", "privacy", "surveillance"]
[[feeds]]
lang = "en-ca"
name = "Singpolyma"
link = "https://singpolyma.net/feed/"
tags = ["code", "computer", "culture", "jmp", "people", "software", "technology", "xmpp"]
[[feeds]]
lang = "en-gb"
name = "A sysadmin's (mis)adventures"
link = "https://blog.woodpeckersnest.space/feed/"
tags = ["computer", "internet", "people", "technology", "xmpp"]
[[feeds]]
lang = "en-gb"
name = "media.ccc.de"
link = "https://media.ccc.de/news.atom"
tags = ["ccc", "technology", "video"]
[[feeds]]
lang = "en-gb"
name = "Christof Meerwald"
link = "https://cmeerw.org/blog.atom"
tags = ["austria", "computer", "linux", "people", "xmpp"]
[[feeds]]
lang = "en-gb"
name = "German AI Association"
link = "https://ki-verband.de/en/feed/"
tags = ["ai", "machine", "model", "learning", "technology"]
[[feeds]]
lang = "en-gb"
name = "La Quadrature du Net"
link = "https://www.laquadrature.net/en/feed/"
tags = ["news", "politics", "privacy", "surveillance"]
[[feeds]]
lang = "en-gb"
name = "Pimux XMPP News"
link = "https://pimux.de/blog.rss"
tags = ["germany", "jabber", "mastodon", "telecom", "xmpp"]
[[feeds]]
lang = "en-gb"
name = "Pirate Party UK"
link = "https://pirateparty.org.uk/feed.xml"
tags = ["party", "pirate", "uk"]
[[feeds]]
lang = "en-gb"
name = "RIAT Institute"
link = "https://riat.at/feed/"
tags = ["art", "economics", "education", "hardware", "research", "technology"]
[[feeds]]
lang = "en-gb"
name = "Snikket Blog on Snikket Chat"
link = "https://snikket.org/blog/index.xml"
tags = ["chat", "jabber", "telecom", "xmpp"]
[[feeds]]
lang = "en-gb"
name = "The Brexit Party"
link = "https://www.thebrexitparty.org/feed/"
tags = ["europe", "politics", "uk"]
[[feeds]]
lang = "en-us"
name = "153 News - Videos Being Watched"
link = "https://153news.net/rss.php?mode=watching"
tags = ["news", "politics", "usa", "video"]
[[feeds]]
lang = "en-us"
name = "AudioBook Bay (ABB)"
link = "https://audiobookbay.is/feed/atom/"
tags = ["audiobook", "torrent"]
[[feeds]]
lang = "en-us"
name = "Bald & Beards"
link = "https://www.baldandbeards.com/feed/"
tags = ["lifestyle", "men"]
[[feeds]]
lang = "en-us"
name = "BlackListed News"
link = "https://www.blacklistednews.com/rss.php"
tags = ["news", "politics", "usa", "world"]
[[feeds]]
lang = "en-us"
name = "Canoe Mail"
link = "https://canoemail.net/rss.xml"
tags = ["email", "jabber", "telecom", "xmpp"]
[[feeds]]
lang = "en-us"
name = "CODEPINK - Women for Peace"
link = "https://www.codepink.org/news.rss"
tags = ["activism", "peace", "war", "women"]
[[feeds]]
lang = "en-us"
name = "Ctrl blog"
link = "https://feed.ctrl.blog/latest.atom"
tags = ["computer", "technology"]
[[feeds]]
lang = "en-us"
name = "Disroot Blog"
link = "https://disroot.org/en/blog.atom"
tags = ["decentralization", "privacy"]
[[feeds]]
lang = "en-us"
name = "F-Droid"
link = "https://f-droid.org/feed.xml"
tags = ["android", "pda", "privacy"]
[[feeds]]
lang = "en-us"
name = "Fairphone"
link = "https://www.fairphone.com/en/feed/"
tags = ["pda", "privacy"]
[[feeds]]
lang = "en-us"
name = "Fakeologist.com"
link = "https://fakeologist.com/feed/"
tags = ["news", "politics", "usa", "world"]
[[feeds]]
lang = "en-us"
name = "Fakeologist Forums"
link = "https://fakeologist.com/forums2/app.php/feed/news"
tags = ["forum", "politics", "usa", "world"]
[[feeds]]
lang = "en-us"
name = "FSFE Events"
link = "https://fsfe.org/events/events.en.rss"
tags = ["computer", "events", "internet", "technology"]
[[feeds]]
lang = "en-us"
name = "FSFE News"
link = "https://fsfe.org/news/news.en.rss"
tags = ["computer", "events", "internet", "privacy", "technology"]
[[feeds]]
lang = "en-us"
name = "Hacker Public Radio"
link = "https://hackerpublicradio.org/hpr_ogg_rss.php"
tags = ["computer", "internet", "podcast", "technology"]
[[feeds]]
lang = "en-us"
name = "Hardy Fruit Tree Nursery"
link = "https://www.hardyfruittrees.ca/feed/"
tags = ["farming", "food", "gardening", "survival"]
[[feeds]]
lang = "en-us"
name = "Ice Age Farmer"
link = "https://www.iceagefarmer.com/feed/"
tags = ["farming", "food", "gardening", "survival"]
[[feeds]]
lang = "en-us"
name = "International Consortium of Investigative Journalists"
link = "https://www.icij.org/feed/"
tags = ["news", "politics", "usa", "world"]
[[feeds]]
lang = "en-us"
name = "Jacob's Unnamed Blog"
link = "https://jacobwsmith.xyz/feed.xml"
tags = ["book", "community", "culture", "family", "finance", "lifestyle", "market", "usa"]
[[feeds]]
lang = "en-us"
name = "Juicing for Health"
link = "https://juicing-for-health.com/feed"
tags = ["beverage", "drink", "recipe"]
[[feeds]]
lang = "en-us"
name = "karson777"
link = "https://videos.danksquad.org/feeds/videos.xml?videoChannelId=4711"
tags = ["computer", "game", "internet", "linux", "software", "technology"]
[[feeds]]
lang = "en-us"
name = "Larken Rose"
link = "http://larkenrose.com/?format=feed&type=atom"
tags = ["news", "politics", "usa"]
[[feeds]]
lang = "en-us"
name = "LibriVox's New Releases"
link = "https://librivox.org/rss/latest_releases"
tags = ["audiobook,"]
[[feeds]]
lang = "en-us"
name = "Massachusetts Pirate Party"
link = "https://masspirates.org/blog/feed/"
tags = ["massachusetts", "party", "pirate", "usa"]
[[feeds]]
lang = "en-us"
name = "Mercola.com"
link = "https://articles.mercola.com/sites/articles/rss.aspx"
tags = ["health"]
[[feeds]]
lang = "en-us"
name = "Monal IM"
link = "https://monal-im.org/index.xml"
tags = ["iphone", "xmpp"]
[[feeds]]
lang = "en-us"
name = "Mom on a Mission"
link = "https://www.mom-on-a-mission.blog/all-posts?format=rss"
tags = ["family", "farming", "food", "gardening", "survival"]
[[feeds]]
lang = "en-us"
name = "NLnet News"
link = "https://nlnet.nl/feed.atom"
tags = ["decentralization", "privacy"]
[[feeds]]
lang = "en-us"
name = "nobulart"
link = "https://nobulart.com/feed/"
tags = ["news", "survival", "politics", "usa", "world"]
[[feeds]]
lang = "en-us"
name = "Opt Out Podcast"
link = "https://optoutpod.com/index.xml"
tags = ["podcast", "politics", "privacy", "surveillance"]
[[feeds]]
lang = "en-us"
name = "Phish.in Music Updates"
link = "https://phish.in/feeds/rss"
tags = ["music"]
[[feeds]]
lang = "en-us"
name = "PINE64"
link = "https://www.pine64.org/feed/"
tags = ["pda", "privacy"]
[[feeds]]
lang = "en-us"
name = "PineTalk Podcast"
link = "https://www.pine64.org/feed/opus/"
tags = ["pda", "podcast", "privacy"]
[[feeds]]
lang = "en-us"
name = "Pirate Parties International"
link = "https://pp-international.net/feed/"
tags = ["party", "pirate", "international"]
[[feeds]]
lang = "en-us"
name = "Planet Jabber"
link = "https://planet.jabber.org/atom.xml"
tags = ["xmpp"]
[[feeds]]
lang = "en-us"
name = "Ploum.net"
link = "https://ploum.net/atom_en.xml"
tags = ["computer", "decentralization", "internet", "linux", "technology"]
[[feeds]]
lang = "en-us"
name = "PrivacySavvy"
link = "https://privacysavvy.com/"
tags = ["privacy"]
[[feeds]]
lang = "en-us"
name = "postmarketOS Podcast"
link = "https://cast.postmarketos.org/feed.rss"
tags = ["pda", "podcast", "privacy"]
[[feeds]]
lang = "en-us"
name = "Project Gemini news"
link = "https://gemini.circumlunar.space/news/atom.xml"
tags = ["gemini", "internet"]
[[feeds]]
lang = "en-us"
name = "PUNCH"
link = "https://punchdrink.com/feed/"
tags = ["beverage", "drink", "recipe"]
[[feeds]]
lang = "en-us"
name = "Radio 3Fourteen"
link = "https://redice.tv/rss/radio-3fourteen"
tags = ["culture", "podcast", "politics", "radio", "usa"]
[[feeds]]
lang = "en-us"
name = "Real Liberty Media"
link = "https://www.reallibertymedia.com/category/podcasts/feed/?redirect=no"
tags = ["culture", "news", "podcast", "politics", "privacy", "surveillance", "usa"]
[[feeds]]
lang = "en-us"
name = "Reclaim The Net"
link = "https://reclaimthenet.org/feed"
tags = ["news", "politics", "privacy", "surveillance"]
[[feeds]]
lang = "en-us"
name = "Red Ice News"
link = "https://redice.tv/rss/news"
tags = ["culture", "news", "politics", "usa"]
[[feeds]]
lang = "en-us"
name = "Red Ice Radio"
link = "https://redice.tv/rss/red-ice-radio"
tags = ["culture", "podcast", "politics", "radio", "usa"]
[[feeds]]
lang = "en-us"
name = "Red Ice TV"
link = "https://redice.tv/rss/red-ice-tv"
tags = ["culture", "podcast", "politics", "usa", "vodcast"]
[[feeds]]
lang = "en-us"
name = "Redecentralize Blog"
link = "https://redecentralize.org/blog/feed.rss"
tags = ["podcast", "privacy", "surveillance", "vodcast"]
[[feeds]]
lang = "en-us"
name = "Road Runners Club of America"
link = "https://www.rrca.org/feed/"
tags = ["jog", "road", "run", "sports", "usa"]
[[feeds]]
lang = "en-us"
name = "Seymour Hersh"
link = "https://seymourhersh.substack.com/feed"
tags = ["news", "politics", "usa", "war"]
[[feeds]]
lang = "en-us"
name = "Signs of the Times"
link = "https://www.sott.net/xml_engine/signs_rss"
tags = ["europe", "industry", "news", "politics", "usa", "war", "world"]
[[feeds]]
lang = "en-us"
name = "Simplified Privacy"
link = "https://simplifiedprivacy.com/feed/"
tags = ["news", "privacy", "surveillance", "vodcast"]
[[feeds]]
lang = "en-us"
name = "Software Freedom Conservancy News"
link = "https://sfconservancy.org/feeds/news/"
tags = ["culture", "free software", "freedom", "liberty", "open source", "software"]
[[feeds]]
lang = "en-us"
name = "Software Freedom Podcast"
link = "http://fsfe.org/news/podcast.en.rss"
tags = ["computer", "podcast", "technology"]
[[feeds]]
lang = "en-us"
name = "Stop Spraying Us!"
link = "http://www.stopsprayingus.com/feed/"
tags = ["activism", "geoengineering"]
[[feeds]]
lang = "en-us"
name = "Sweet Home 3D Blog"
link = "http://www.sweethome3d.com/blog/rss.xml"
tags = ["3d", "architecture", "design", "game"]
[[feeds]]
lang = "en-us"
name = "Take Back Our Tech"
link = "https://takebackourtech.org/rss/"
tags = ["internet", "privacy", "surveillance"]
[[feeds]]
lang = "en-us"
name = "The Bald Brothers"
link = "https://thebaldbrothers.com/feed/"
tags = ["lifestyle", "men"]
[[feeds]]
lang = "en-us"
name = "The 250kb Club"
link = "https://250kb.club/rss.xml"
tags = ["webring"]
[[feeds]]
lang = "en-us"
name = "The Conscious Resistance Network"
link = "https://theconsciousresistance.com/feed/"
tags = ["culture", "government", "podcast", "politics", "privacy", "surveillance", "usa"]
[[feeds]]
lang = "en-us"
name = "The Corbett Report"
link = "https://www.corbettreport.com/feed/"
tags = ["podcast", "politics", "privacy", "surveillance", "usa", "vodcast"]
[[feeds]]
lang = "en-us"
name = "The Organic Prepper"
link = "https://www.theorganicprepper.com/feed/"
tags = ["farming", "food", "gardening", "survival"]
[[feeds]]
lang = "en-us"
name = "The XMPP Blog on XMPP"
link = "https://xmpp.org/feeds/all.atom.xml"
tags = ["jabber", "telecom", "xmpp"]
[[feeds]]
lang = "en-us"
name = "Truthstream Media"
link = "http://truthstreammedia.com/feed/"
tags = ["culture", "privacy", "surveillance", "usa", "vodcast"]
[[feeds]]
lang = "en-us"
name = "United States Pirate Party"
link = "https://uspirates.org/feed/"
tags = ["party", "pirate", "usa"]
[[feeds]]
lang = "en-us"
name = "Xonotic"
link = "https://xonotic.org/index.xml"
tags = ["3d", "game"]
[[feeds]]
lang = "en-us"
name = "yaxim"
link = "https://yaxim.org/atom.xml"
tags = ["android", "germany", "jabber", "telecom", "xmpp"]
[[feeds]]
lang = "es-es"
name = "Disroot Blog"
link = "https://disroot.org/es/blog.atom"
tags = ["decentralization", "privacy"]
[[feeds]]
lang = "fr-fr"
name = "Agate Blue"
link = "https://agate.blue/feed.xml"
tags = ["computer", "music"]
[[feeds]]
lang = "fr-fr"
name = "Archlinux.fr [Forums]"
link = "https://forums.archlinux.fr/app.php/feed"
tags = ["forum", "linux"]
[[feeds]]
lang = "fr-fr"
name = "Cours et Tutoriels sur le Langage SQL"
link = "https://sql.sh/feed"
tags = ["sql", "tutorial"]
[[feeds]]
lang = "fr-fr"
name = "Developpez"
link = "https://www.developpez.com/index/atom"
tags = ["technology"]
[[feeds]]
lang = "fr-fr"
name = "Disroot Blog"
link = "https://disroot.org/fr/blog.atom"
tags = ["decentralization", "privacy"]
[[feeds]]
lang = "fr-fr"
name = "Framablog"
link = "https://framablog.org/feed/"
tags = ["fediverse", "framasoft", "open source", "peertube", "privacy", "software", "xmpp"]
[[feeds]]
lang = "fr-fr"
name = "FSFE Events"
link = "https://fsfe.org/events/events.fr.rss"
tags = ["computer", "events", "internet", "technology"]
[[feeds]]
lang = "fr-fr"
name = "FSFE News"
link = "https://fsfe.org/news/news.fr.rss"
tags = ["computer", "events", "internet", "privacy", "technology"]
[[feeds]]
lang = "fr-fr"
name = "La Quadrature du Net"
link = "https://www.laquadrature.net/feed/"
tags = ["news", "politics", "privacy", "surveillance"]
[[feeds]]
lang = "fr-fr"
name = "Ploum.net"
link = "https://ploum.net/atom_fr.xml"
tags = ["computer", "decentralization", "internet", "linux", "technology"]
[[feeds]]
lang = "fr-fr"
name = "Wiki de sebsauvage.net"
link = "https://sebsauvage.net/wiki/feed.php"
tags = ["computer", "network"]
[[feeds]]
lang = "he-il"
name = "ALIVE528"
link = "https://alive528.com/feed/"
tags = ["health", "news", "politics", "privacy", "surveillance"]
[[feeds]]
lang = "he-il"
name = "אגוגו מגזין בריאות"
link = "https://www.agogo.co.il/feed/"
tags = ["food", "health"]
[[feeds]]
lang = "he-il"
name = "האייל הקורא"
link = "http://www.haayal.co.il/xml/rss"
tags = ["news", "politics"]
[[feeds]]
lang = "he-il"
name = "העין השביעית"
link = "https://www.the7eye.org.il/feed"
tags = ["industry", "media", "news", "propaganda"]
[[feeds]]
lang = "he-il"
name = "מזבלה"
link = "https://mizbala.com/feed"
tags = ["industry", "media", "news", "propaganda"]
[[feeds]]
lang = "he-il"
name = "פרויקט אמת אחרת"
link = "https://www.emetaheret.org.il/feed/"
tags = ["food", "health", "media", "news", "politics"]
[[feeds]]
lang = "he-il"
name = "שיחה מקומית"
link = "https://www.mekomit.co.il/feed/"
tags = ["news", "politics"]
[[feeds]]
lang = "it-it"
name = "Diggita / Prima Pagina"
link = "https://diggita.com/rss.php"
tags = ["computer", "culture", "food", "technology"]
[[feeds]]
lang = "it-it"
name = "Feddit.it"
link = "https://feddit.it/feeds/local.xml?sort=Active"
tags = ["fediverse", "forum"]
[[feeds]]
lang = "it-it"
name = "A sysadmin's (mis)adventures"
link = "https://blog.woodpeckersnest.space/feed/"
tags = ["computer", "internet", "people", "technology", "xmpp"]
[[feeds]]
lang = "it-it"
name = "Avvocato a Roma"
link = "https://www.studiosabatino.it/feed/"
tags = ["law"]
[[feeds]]
lang = "it-it"
name = "Disroot Blog"
link = "https://disroot.org/it/blog.atom"
tags = ["decentralization", "privacy"]
[[feeds]]
lang = "it-it"
name = "LinuxTrent"
link = "https://www.linuxtrent.it/feed/"
tags = ["computer", "internet", "linux", "technology", "xmpp"]
[[feeds]]
lang = "it-it"
name = "Mario Sabatino Gemini capsule"
link = "https://gemini.sabatino.cloud/rss.xml"
tags = ["computer", "people", "xmpp"]
[[feeds]]
lang = "it-it"
name = "XMPP-IT Italian Community"
link = "https://www.xmpp-it.net/feed/"
tags = ["xmpp"]
[[feeds]]
lang = "ja-jp"
name = "ニュース速報(総合)"
link = "https://mainichi.jp/rss/etc/mainichi-flash.rss"
tags = ["japan", "news", "politics", "world"]
[[feeds]]
lang = "nl-nl"
name = "Piratenpartij"
link = "https://piratenpartij.nl/feed/"
tags = ["netherlands", "party", "pirate"]
[[feeds]]
lang = "pl-pl"
name = "Fundacja Internet. Czas działać!"
link = "https://www.internet-czas-dzialac.pl/rss/"
tags = ["computer", "technology", "design"]
[[feeds]]
lang = "ru-ru"
name = "Disroot Blog"
link = "https://disroot.org/ru/blog.atom"
tags = ["decentralization", "privacy"]
[[feeds]]
lang = "ru-ru"
name = "За вашу и нашу Свободу!"
link = "https://lev-sharansky2.livejournal.com/data/atom"
tags = ["culture"]
[[feeds]]
lang = "sv-se"
name = "Piratpartiet"
link = "https://piratpartiet.se/feed/"
tags = ["party", "pirate", "sweden"]
[[feeds]]
lang = "vi-vn"
name = "Trần H. Trung"
link = "https://trung.fun/atom.xml"
tags = ["artist", "computer", "filmmaker", "people", "technology", "vietnam", "xmpp"]

View file

@ -35,8 +35,11 @@ import tomli_w
import tomllib
def get_setting_value(db_file, key):
value = (sqlite.get_setting_value(db_file, key)[0] or
get_value("settings", "Settings", key))
value = sqlite.get_setting_value(db_file, key)
if value:
value = value[0]
else:
value = get_value("settings", "Settings", key)
try:
value = int(value)
except ValueError as e:
@ -448,7 +451,7 @@ async def remove_from_list(newwords, keywords):
return val
async def is_include_keyword(db_file, key, string):
def is_include_keyword(db_file, key, string):
"""
Check keyword match.
@ -468,7 +471,8 @@ async def is_include_keyword(db_file, key, string):
"""
# async def reject(db_file, string):
# async def is_blacklisted(db_file, string):
keywords = sqlite.get_filter_value(db_file, key) or ''
keywords = sqlite.get_filter_value(db_file, key)
keywords = keywords[0] if keywords else ''
keywords = keywords.split(",")
keywords = keywords + (open_config_file("lists.toml")[key])
for keyword in keywords:

View file

@ -108,7 +108,7 @@ from urllib.parse import urlsplit, urlunsplit
# return await callback(url)
async def probe_page(url, document):
async def probe_page(url, document=None):
"""
Parameters
----------
@ -122,32 +122,34 @@ async def probe_page(url, document):
result : list or str
Single URL as list or selection of URLs as str.
"""
result = None
if not document:
response = await fetch.http(url)
if not response['error']:
document = response['content']
try:
# tree = etree.fromstring(res[0]) # etree is for xml
tree = html.fromstring(document)
result = None
except:
result = (
"> {}\nFailed to parse URL as feed."
).format(url)
logging.debug("Failed to parse URL as feed for {}.".format(url))
result = {'link' : None,
'index' : None,
'name' : None,
'code' : None,
'error' : True,
'exist' : None}
if not result:
logging.debug(
"Feed auto-discovery engaged for {}".format(url))
logging.debug("Feed auto-discovery engaged for {}".format(url))
result = await feed_mode_auto_discovery(url, tree)
if not result:
logging.debug(
"Feed link scan mode engaged for {}".format(url))
logging.debug("Feed link scan mode engaged for {}".format(url))
result = await feed_mode_scan(url, tree)
if not result:
logging.debug(
"Feed arbitrary mode engaged for {}".format(url))
logging.debug("Feed arbitrary mode engaged for {}".format(url))
result = await feed_mode_guess(url, tree)
if not result:
logging.debug(
"No feeds were found for {}".format(url))
result = (
"> {}\nNo news feeds were found for URL."
).format(url)
logging.debug("No feeds were found for {}".format(url))
result = None
return result
@ -182,8 +184,9 @@ async def feed_mode_guess(url, tree):
) if '.rss' not in paths else -1
# if paths.index('.rss'):
# paths.extend([".atom", ".feed", ".rdf", ".rss"])
parted_url_path = parted_url.path if parted_url.path else '/'
for path in paths:
address = join_url(url, parted_url.path.split('/')[1] + path)
address = join_url(url, parted_url_path.split('/')[1] + path)
if address not in urls:
urls.extend([address])
# breakpoint()
@ -299,13 +302,15 @@ async def feed_mode_auto_discovery(url, tree):
async def process_feed_selection(url, urls):
feeds = {}
for i in urls:
res = await fetch.http(i)
status_code = res[1]
if status_code == 200:
try:
feeds[i] = [parse(res[0])]
except:
continue
result = await fetch.http(i)
if not result['error']:
document = result['content']
status_code = result['status_code']
if status_code == 200: # NOTE This line might be redundant
try:
feeds[i] = [parse(document)]
except:
continue
message = (
"Web feeds found for {}\n\n```\n"
).format(url)
@ -337,7 +342,7 @@ async def process_feed_selection(url, urls):
# URL has been fetched, so that the receiving
# function will scan that single URL instead of
# listing it as a message.
url = {'url' : feed_url,
url = {'link' : feed_url,
'index' : None,
'name' : feed_name,
'code' : status_code,

View file

@ -106,6 +106,7 @@ def http_response(url):
response = None
return response
async def http(url):
"""
Download content of given URL.
@ -120,13 +121,10 @@ async def http(url):
msg: list or str
Document or error message.
"""
user_agent = (
config.get_value(
"settings", "Network", "user-agent")
) or 'Slixfeed/0.1'
user_agent = (config.get_value("settings", "Network", "user-agent")
or 'Slixfeed/0.1')
headers = {'User-Agent': user_agent}
proxy = (config.get_value(
"settings", "Network", "http_proxy")) or ''
proxy = (config.get_value("settings", "Network", "http_proxy") or '')
timeout = ClientTimeout(total=10)
async with ClientSession(headers=headers) as session:
# async with ClientSession(trust_env=True) as session:
@ -136,36 +134,39 @@ async def http(url):
timeout=timeout
) as response:
status = response.status
if response.status == 200:
if status == 200:
try:
doc = await response.text()
# print (response.content_type)
msg = [doc, status]
document = await response.text()
result = {'charset': response.charset,
'content': document,
'content_length': response.content_length,
'content_type': response.content_type,
'error': False,
'message': None,
'original_url': url,
'status_code': status,
'response_url': response.url}
except:
# msg = [
# False,
# ("The content of this document "
# "doesn't appear to be textual."
# )
# ]
msg = [
False, "Document is too large or is not textual."
]
result = {'error': True,
'message': 'Could not get document.',
'original_url': url,
'status_code': status,
'response_url': response.url}
else:
msg = [
False, "HTTP Error: " + str(status)
]
result = {'error': True,
'message': 'HTTP Error:' + str(status),
'original_url': url,
'status_code': status,
'response_url': response.url}
except ClientError as e:
# print('Error', str(e))
msg = [
False, "Error: " + str(e)
]
result = {'error': True,
'message': 'Error:' + str(e),
'original_url': url}
except TimeoutError as e:
# print('Timeout:', str(e))
msg = [
False, "Timeout: " + str(e)
]
return msg
result = {'error': True,
'message': 'Timeout:' + str(e),
'original_url': url}
return result
async def magnet(link):

View file

@ -1,2 +1,2 @@
__version__ = '0.1.10'
__version_info__ = (0, 1, 10)
__version__ = '0.1.11'
__version_info__ = (0, 1, 11)

View file

@ -48,6 +48,8 @@ from slixmpp.plugins.xep_0048.stanza import Bookmarks
import slixfeed.action as action
import slixfeed.config as config
import slixfeed.crawl as crawl
import slixfeed.fetch as fetch
from slixfeed.dt import timestamp
import slixfeed.sqlite as sqlite
from slixfeed.version import __version__
@ -464,12 +466,15 @@ class Slixfeed(slixmpp.ClientXMPP):
# )
# if jid == config.get_value('accounts', 'XMPP', 'operator'):
self['xep_0050'].add_command(node='subscription',
name=' Add',
handler=self._handle_subscription_add)
self['xep_0050'].add_command(node='subscriptions',
name='📰️ Subscriptions',
handler=self._handle_subscriptions)
self['xep_0050'].add_command(node='promoted',
name='🔮️ Featured',
handler=self._handle_promoted)
self['xep_0050'].add_command(node='subscription',
name='🔗️ Add', # 🪶️
handler=self._handle_subscription_add)
# self['xep_0050'].add_command(node='subscriptions_cat',
# name='🔖️ Categories',
# handler=self._handle_subscription)
@ -479,29 +484,32 @@ class Slixfeed(slixmpp.ClientXMPP):
# self['xep_0050'].add_command(node='subscriptions_index',
# name='📑️ Index (A - Z)',
# handler=self._handle_subscription)
# TODO Join Filters and Settings into Preferences
self['xep_0050'].add_command(node='filters',
name='🛡️ Filters',
handler=self._handle_filters)
# TODO Join Filters, Schedule and Settings into Preferences
self['xep_0050'].add_command(node='settings',
name='📮️ Settings',
handler=self._handle_settings)
if not self.is_component: # This will be changed with XEP-0222 XEP-0223
self['xep_0050'].add_command(node='bookmarks',
name='📕 Bookmarks',
handler=self._handle_bookmarks)
self['xep_0050'].add_command(node='roster',
name='📓 Roster', # 📋
handler=self._handle_roster)
self['xep_0050'].add_command(node='filters',
name='🛡️ Filters',
handler=self._handle_filters)
self['xep_0050'].add_command(node='schedule',
name='📅 Schedule',
handler=self._handle_schedule)
self['xep_0050'].add_command(node='help',
name='📔️ Manual',
handler=self._handle_help)
self['xep_0050'].add_command(node='totd',
name='💡️ TOTD',
name='💡️ Tips',
handler=self._handle_totd)
self['xep_0050'].add_command(node='fotd',
name='🗓️ FOTD',
handler=self._handle_fotd)
if not self.is_component: # This will be changed with XEP-0222 XEP-0223
self['xep_0050'].add_command(node='subscribers',
name='🏡️ Subscribers', # 🎫
handler=self._handle_subscribers)
self['xep_0050'].add_command(node='bookmarks',
name='📕 Bookmarks',
handler=self._handle_bookmarks)
self['xep_0050'].add_command(node='roster',
name='📓 Roster', # 📋
handler=self._handle_contacts)
self['xep_0050'].add_command(node='activity',
name='📠️ Activity',
handler=self._handle_activity)
@ -542,8 +550,8 @@ class Slixfeed(slixmpp.ClientXMPP):
jid = session['from'].bare
jid_file = jid
db_file = config.get_pathname_to_database(jid_file)
form = self['xep_0004'].make_form('form', 'Filters')
form['instructions'] = '🛡️ Manage filters' # 🪄️
form = self['xep_0004'].make_form('form', 'Filter editor')
form['instructions'] = '🛡️ Edit and manage filters' # 🪄️
value = sqlite.get_filter_value(db_file, 'allow')
if value: value = str(value[0])
form.add_field(var='allow',
@ -582,7 +590,7 @@ class Slixfeed(slixmpp.ClientXMPP):
form = payload
jid = session['from'].bare
form = self['xep_0004'].make_form('result', 'Filters')
form = self['xep_0004'].make_form('result', 'Done')
form['instructions'] = ('✅️ Filters have been updated')
jid_file = jid
db_file = config.get_pathname_to_database(jid_file)
@ -611,7 +619,7 @@ class Slixfeed(slixmpp.ClientXMPP):
async def _handle_subscription_add(self, iq, session):
jid = session['from'].bare
form = self['xep_0004'].make_form('form', 'Add Subscriptions')
form = self['xep_0004'].make_form('form', 'Add Subscription')
form['instructions'] = '📰️ Add a new subscription'
options = form.add_field(var='subscription',
# TODO Make it possible to add several subscriptions at once;
@ -643,7 +651,7 @@ class Slixfeed(slixmpp.ClientXMPP):
result = await action.add_feed(db_file, url)
if isinstance(result, list):
results = result
form = self['xep_0004'].make_form('form', 'Subscriptions')
form = self['xep_0004'].make_form('form', 'Select subscription')
form['instructions'] = ('🔍️ Discovered {} subscriptions for {}'
.format(len(results), url))
options = form.add_field(var='subscriptions',
@ -652,7 +660,7 @@ class Slixfeed(slixmpp.ClientXMPP):
desc=('Select a subscription to add.'),
required=True)
for result in results:
options.addOption(result['name'], result['url'])
options.addOption(result['name'], result['link'])
session['payload'] = form
session['next'] = self._handle_subscription_editor
session['has_next'] = True
@ -660,45 +668,41 @@ class Slixfeed(slixmpp.ClientXMPP):
# response = ('News source "{}" is already listed '
# 'in the subscription list at index '
# '{}.\n{}'.format(result['name'], result['index'],
# result['url']))
# result['link']))
# session['notes'] = [['warn', response]] # Not supported by Gajim
# session['notes'] = [['info', response]]
form = self['xep_0004'].make_form('result', 'Subscriptions')
form['instructions'] = ('⚠️ Feed "{}" already exist as index {}'
.format(result['name'], result['index']))
form = self['xep_0004'].make_form('form', 'Edit subscription')
form['instructions'] = ('📰️ ' + result['name'])
options = form.add_field(var='subscriptions',
ftype='text-single',
label=result['url'],
desc='Choose next to edit subscription.',
value=result['url'])
# FIXME payload value does not pass, only [].
ftype='list-single',
label='Edit sbscription #{}'.format(result['index']),
# desc='Click URL to edit subscription.',
value=result['link'])
options.addOption(result['name'], result['link'])
session['payload'] = form
session['next'] = self._handle_subscription_editor
session['has_next'] = True
# session['has_next'] = False
elif result['error']:
response = ('Failed to load URL.'
response = ('Failed to load URL {}'
'\n\n'
'Reason: {}'
'\n\n'
'URL: {}'
.format(result['code'], url))
.format(url, result['code']))
session['notes'] = [['error', response]]
session['next'] = None
else:
# response = ('News source "{}" has been '
# 'added to subscription list.\n{}'
# .format(result['name'], result['url']))
# .format(result['name'], result['link']))
# session['notes'] = [['info', response]]
form = self['xep_0004'].make_form('result', 'Subscriptions')
form = self['xep_0004'].make_form('result', 'Done')
form['instructions'] = ('✅️ News source "{}" has been added to '
'subscription list as index {}'
.format(result['name'], result['index']))
options = form.add_field(var='subscriptions',
ftype='text-single',
label=result['url'],
label=result['link'],
desc='Choose next to edit subscription.',
value=result['url'])
# FIXME payload value does not pass, only [].
value=result['link'])
session['payload'] = form
session['next'] = self._handle_subscription_editor
session['has_next'] = True
@ -758,8 +762,8 @@ class Slixfeed(slixmpp.ClientXMPP):
# FIXME There are feeds that are missing (possibly because of sortings)
async def _handle_subscription(self, iq, session):
jid = session['from'].bare
form = self['xep_0004'].make_form('form', 'Subscriptions')
form['instructions'] = '📰️ Edit subscription'
form = self['xep_0004'].make_form('form', 'Subscription editor')
form['instructions'] = '📰️ Edit subscription preferences and properties'
# form.addField(var='interval',
# ftype='text-single',
# label='Interval period')
@ -829,7 +833,11 @@ class Slixfeed(slixmpp.ClientXMPP):
options.addOption('Delete {} subscriptions'.format(url_count), 'delete')
options.addOption('Export {} subscriptions'.format(url_count), 'export')
else:
url = urls[0]
if isinstance(urls, list):
url = urls[0]
# elif isinstance(urls, str):
else:
url = urls
feed_id = await sqlite.get_feed_id(db_file, url)
feed_id = feed_id[0]
title = sqlite.get_feed_title(db_file, feed_id)
@ -1019,7 +1027,7 @@ class Slixfeed(slixmpp.ClientXMPP):
count = await action.import_opml(db_file, url)
try:
int(count)
form = self['xep_0004'].make_form('result', 'Import')
form = self['xep_0004'].make_form('result', 'Done')
form['instructions'] = ('✅️ Feeds have been imported')
message = '{} feeds have been imported to {}.'.format(count, jid)
form.add_field(var='message',
@ -1058,7 +1066,7 @@ class Slixfeed(slixmpp.ClientXMPP):
async def _handle_export_complete(self, payload, session):
jid = session['from'].bare
jid_file = jid.replace('/', '_')
form = self['xep_0004'].make_form('result', 'Export')
form = self['xep_0004'].make_form('result', 'Done')
form['instructions'] = ('✅️ Feeds have been exported')
exts = payload['values']['filetype']
for ext in exts:
@ -1082,12 +1090,51 @@ class Slixfeed(slixmpp.ClientXMPP):
return session
async def _handle_fotd(self, iq, session):
text = ('Here we publish featured news feeds!')
async def _handle_schedule(self, iq, session):
text = ('Schedule')
text += '\n\n'
text += 'Set days and hours to receive news.'
session['notes'] = [['info', text]]
return session
# TODO Exclude feeds that are already in database or requester.
# TODO Attempt to look up for feeds of hostname of JID (i.e. scan
# jabber.de for feeds for julient@jabber.de)
async def _handle_promoted(self, iq, session):
url = action.pick_a_feed()
form = self['xep_0004'].make_form('form', 'Subscribe')
# NOTE Refresh button would be of use
form['instructions'] = '🔮️ Featured subscriptions' # 🎲️
options = form.add_field(var='subscription',
ftype="list-single",
label=url['name'],
value=url['link'])
options.addOption(url['name'], url['link'])
jid = session['from'].bare
if '@' in jid:
hostname = jid.split('@')[1]
url = 'http://' + hostname
result = await crawl.probe_page(url)
if not result:
url = {'url' : url,
'index' : None,
'name' : None,
'code' : None,
'error' : True,
'exist' : False}
elif isinstance(result, list):
for url in result:
if url['link']: options.addOption(url['name'], url['link'])
else:
url = result
# Automatically set priority to 5 (highest)
if url['link']: options.addOption(url['name'], url['link'])
session['payload'] = form
session['next'] = self._handle_subscription_new
return session
async def _handle_motd(self, iq, session):
# TODO add functionality to attach image.
text = ('Here you can add groupchat rules,post schedule, tasks or '
@ -1103,9 +1150,11 @@ class Slixfeed(slixmpp.ClientXMPP):
async def _handle_credit(self, iq, session):
wrjx = action.manual('information.toml', 'thanks')
form = self['xep_0004'].make_form('result', 'Credits')
form['instructions'] = "We are XMPP"
form.add_field(ftype="text-multi", value=action.manual('information.toml', 'thanks'))
form.add_field(ftype="text-multi",
value=wrjx)
# Gajim displays all form['instructions'] on top
# Psi ignore the latter form['instructions']
@ -1153,6 +1202,188 @@ class Slixfeed(slixmpp.ClientXMPP):
return session
async def _handle_subscribers(self, iq, session):
jid = session['from'].bare
if jid == config.get_value('accounts', 'XMPP', 'operator'):
form = self['xep_0004'].make_form('form', 'Subscribers')
form['instructions'] = '📖️ Organize subscribers'
options = form.add_field(var='jid',
ftype='list-single',
label='Contacts',
desc='Select a contact.',
required=True)
contacts = await XmppRoster.get(self)
for contact in contacts:
contact_name = contacts[contact]['name']
contact_name = contact_name if contact_name else contact
options.addOption(contact_name, contact)
options = form.add_field(var='action',
ftype='list-single',
label='Action',
value='message')
options.addOption('Resend authorization To', 'to')
options.addOption('Request authorization From', 'from')
options.addOption('Send message', 'message')
options.addOption('Remove', 'remove')
form.add_field(var='message',
ftype='text-multi',
label='Message',
desc='Add a descriptive message.')
session['payload'] = form
session['next'] = self._handle_subscribers_complete
session['has_next'] = True
else:
logging.warning('An unauthorized attempt to access bookmarks has '
'been detected!\n'
'Details:\n'
' Jabber ID: {}\n'
' Timestamp: {}\n'
.format(jid, timestamp()))
session['notes'] = [['warn', 'This resource is restricted.']]
return session
async def _handle_subscribers_complete(self, iq, session):
pass
async def _handle_contacts(self, iq, session):
jid = session['from'].bare
if jid == config.get_value('accounts', 'XMPP', 'operator'):
form = self['xep_0004'].make_form('form', 'Contacts')
form['instructions'] = '📖️ Organize contacts'
options = form.add_field(var='jid',
ftype='list-single',
label='Contact',
desc='Select a contact.',
required=True)
contacts = await XmppRoster.get(self)
for contact in contacts:
contact_name = contacts[contact]['name']
contact_name = contact_name if contact_name else contact
options.addOption(contact_name, contact)
options = form.add_field(var='action',
ftype='list-single',
label='Action',
value='view')
options.addOption('Display', 'view')
options.addOption('Edit', 'edit')
session['payload'] = form
session['next'] = self._handle_contact_action
session['has_next'] = True
else:
logging.warning('An unauthorized attempt to access bookmarks has '
'been detected!\n'
'Details:\n'
' Jabber ID: {}\n'
' Timestamp: {}\n'
.format(jid, timestamp()))
session['notes'] = [['warn', 'This resource is restricted.']]
return session
async def _handle_contact_action(self, payload, session):
jid = payload['values']['jid']
form = self['xep_0004'].make_form('form', 'Contact editor')
session['allow_complete'] = True
match payload['values']['action']:
case 'edit':
form['instructions'] = '📖️ Edit contact'
roster = await XmppRoster.get(self)
properties = roster[jid]
form.add_field(var='name',
ftype='text-single',
label='Name',
value=properties['name'])
case 'view':
session['has_next'] = False
session['next'] = None
session['allow_complete'] = None
form = self['xep_0004'].make_form('form', 'Contact info')
form['instructions'] = '📖️ Contact details'
roster = await XmppRoster.get(self)
properties = roster[jid]
contact_name = properties['name']
contact_name = contact_name if contact_name else jid
form.add_field(var='name',
ftype='text-single',
label='Name',
value=properties['name'])
form.add_field(var='from',
ftype='boolean',
label='From',
value=properties['from'])
form.add_field(var='to',
ftype='boolean',
label='To',
value=properties['to'])
form.add_field(var='pending_in',
ftype='boolean',
label='Pending in',
value=properties['pending_in'])
form.add_field(var='pending_out',
ftype='boolean',
label='Pending out',
value=properties['pending_out'])
form.add_field(var='whitelisted',
ftype='boolean',
label='Whitelisted',
value=properties['whitelisted'])
form.add_field(var='subscription',
ftype='fixed',
label='Subscription',
value=properties['subscription'])
session['payload'] = form
session['prev'] = self._handle_contacts
session['allow_prev'] = True
# session['next'] = None
# session['has_next'] = False
# session['allow_complete'] = True
return session
async def _handle_contacts_complete(self, payload, session):
pass
async def _handle_contacts_view(self, payload, session):
jid = payload['values']['jid']
roster = await XmppRoster.get(self)
properties = roster[jid]
form = self['xep_0004'].make_form('result', 'Contact')
contact_name = properties['name']
contact_name = contact_name if contact_name else jid
form['instructions'] = '📝️ Edit contact {}'.format(contact_name)
form.add_field(var='name',
ftype='boolean',
label='From',
value=properties['from'])
form.add_field(var='to',
ftype='boolean',
label='To',
value=properties['to'])
form.add_field(var='pending_in',
ftype='boolean',
label='Pending in',
value=properties['pending_in'])
form.add_field(var='pending_out',
ftype='boolean',
label='Pending out',
value=properties['pending_out'])
form.add_field(var='whitelisted',
ftype='boolean',
label='Whitelisted',
value=properties['whitelisted'])
form.add_field(var='subscription',
ftype='text-single',
label='Subscription',
value=properties['subscription'])
session['payload'] = form
session['next'] = self._handle_bookmarks_complete
session['has_next'] = True
return session
async def _handle_bookmarks(self, iq, session):
jid = session['from'].bare
if jid == config.get_value('accounts', 'XMPP', 'operator'):
@ -1175,15 +1406,14 @@ class Slixfeed(slixmpp.ClientXMPP):
' Jabber ID: {}\n'
' Timestamp: {}\n'
.format(jid, timestamp()))
session['notes'] = [['warn', 'You are not allowed to access this resource.']]
session['notes'] = [['warn', 'This resource is restricted.']]
return session
async def _handle_bookmarks_editor(self, payload, session):
jid = payload['values']['bookmarks']
properties = await XmppBookmark.properties(self, jid)
jid = session['from'].bare
form = self['xep_0004'].make_form('form', 'Edit bookmark')
form = self['xep_0004'].make_form('form', 'Bookmark editor')
form['instructions'] = '📝️ Edit bookmark {}'.format(properties['name'])
jid = properties['jid'].split('@')
room = jid[0]
@ -1248,7 +1478,7 @@ class Slixfeed(slixmpp.ClientXMPP):
here to persist across handler callbacks.
"""
form = self['xep_0004'].make_form('result', 'Bookmarks')
form = self['xep_0004'].make_form('result', 'Done')
form['instructions'] = ('✅️ Bookmark has been saved')
# In this case (as is typical), the payload is a form
values = payload['values']
@ -1280,7 +1510,7 @@ class Slixfeed(slixmpp.ClientXMPP):
jid = session['from'].bare
jid_file = jid
db_file = config.get_pathname_to_database(jid_file)
form = self['xep_0004'].make_form('form', 'Settings')
form = self['xep_0004'].make_form('form', 'Setting editor')
form['instructions'] = ('📮️ Customize news updates')
value = config.get_setting_value(db_file, 'enabled')
@ -1395,7 +1625,7 @@ class Slixfeed(slixmpp.ClientXMPP):
# form = payload
jid = session['from'].bare
form = self['xep_0004'].make_form('form', 'Settings')
form = self['xep_0004'].make_form('form', 'Done')
form['instructions'] = ('✅️ Settings have been saved')
jid_file = jid

View file

@ -546,13 +546,13 @@ async def message(self, message):
response += ("Title : {}\n"
"Link : {}\n"
"\n"
.format(result['name'], result['url']))
.format(result['name'], result['link']))
response += ('```\nTotal of {} feeds.'
.format(len(results)))
elif result['exist']:
response = ('> {}\nNews source "{}" is already '
'listed in the subscription list at '
'index {}'.format(result['url'],
'index {}'.format(result['link'],
result['name'],
result['index']))
elif result['error']:
@ -561,7 +561,7 @@ async def message(self, message):
else:
response = ('> {}\nNews source "{}" has been '
'added to subscription list.'
.format(result['url'], result['name']))
.format(result['link'], result['name']))
# task.clean_tasks_xmpp(self, jid, ['status'])
await task.start_tasks_xmpp(self, jid, ['status'])
# except:

View file

@ -12,6 +12,13 @@ TODO
class XmppRoster:
async def get(self):
await self.get_roster()
contacts = self.client_roster
return contacts
async def add(self, jid):
"""
Add JID to roster.