all repos — sunstroke @ eceadcf2f0e4150b131b14e7c3c9553f1169b87e

sort based on host preference, add startup script
Andronaco Marco marco.andronaco@olivetti.com
Wed, 12 Jul 2023 15:47:56 +0200
commit

eceadcf2f0e4150b131b14e7c3c9553f1169b87e

parent

dbe85ea6d39ca7b2037ecf8c439b7d5d8f97e939

4 files changed, 41 insertions(+), 9 deletions(-)

jump to
M Overpost.pyOverpost.py

@@ -69,10 +69,11 @@ feed = feedparser.parse(rss_url)

return [ parse_entry(entry) for entry in feed.entries ] def get_newspaper(prefix="", index=0): - links = get_links(RSS_URL) + all_links = get_links(RSS_URL) try: - daily = links[index][1] + daily = all_links[index][1] except IndexError: + print("Empty feed.") return {} return { k: v for k, v in daily.items() if k.startswith(prefix)}
A Sunstroke.bat

@@ -0,0 +1,2 @@

+@echo off +.\venv\Scripts\python.exe .\main.py
M main.pymain.py

@@ -1,25 +1,54 @@

from Overpost import get_newspaper from MyPyload import Pyload +from urllib.error import URLError from os import getenv +from datetime import datetime NEWSPAPER_PREFIX = getenv("NEWSPAPER_PREFIX") or "" +HOST_PREFERENCE = [ 'katfile.com', 'rapidgator.net', 'www.easybytez.com' ] -def scroll_dict(dictionary): +def scroll_list(array, buffer=1000): + array_len = len(array) i = 0 - for key, values in dictionary.items(): - if i >= len(values): + while i < buffer: + if i >= array_len: i = 0 - yield key, values[i] + yield array[i] i += 1 +def get_host(link): + return link.split("/")[2] + +def filter_links(links, host): + for link in links: + if get_host(link) == host: + return link + +def get_sorted_links(dictionary): + hosts = scroll_list(HOST_PREFERENCE) + return [ filter_links(links, next(hosts)) for _, links in dictionary.items() ] + def download_link(connection, name, link): return connection.addPackage(name=name, links=[link]) +def handle_links(name, links): + try: + con = Pyload() + return [ download_link(con, name, link) for link in links ] + except URLError: + print("Connessione a Pyload rifiutata.") + + print("Link da aggiungere manualmente:\n") + for x in links: + print(x) + print() + return [] + def main(): newspapers = get_newspaper(NEWSPAPER_PREFIX, 0) # 0 -> today - con = Pyload() - pids = [ download_link(con, NEWSPAPER_PREFIX, link) for _, link in scroll_dict(newspapers) ] - print(pids) + name = NEWSPAPER_PREFIX + datetime.today().strftime("%Y-%m-%d") + links = get_sorted_links(newspapers) + pids = handle_links(name, links) if __name__ == "__main__": exit(main())