dont archive site urls

master
Aevann1 2022-09-03 20:05:00 +02:00
parent 9b7b3fe805
commit 5fa854afd6
1 changed files with 4 additions and 6 deletions

View File

@ -33,14 +33,10 @@ def archiveorg(url):
except: pass except: pass
def archive_url(url): def archive_url(url):
if url.startswith(SITE_FULL): return
gevent.spawn(archiveorg, url) gevent.spawn(archiveorg, url)
if url.startswith('https://twitter.com/'): if url.startswith('https://twitter.com/'):
url = url.replace('https://twitter.com/', 'https://nitter.42l.fr/') url = url.replace('https://twitter.com/', 'https://nitter.42l.fr/')
gevent.spawn(archiveorg, url) gevent.spawn(archiveorg, url)
if url.startswith('https://instagram.com/'): if url.startswith('https://instagram.com/'):
url = newposturl.replace('https://instagram.com/', 'https://imginn.com/') url = newposturl.replace('https://instagram.com/', 'https://imginn.com/')
gevent.spawn(archiveorg, url) gevent.spawn(archiveorg, url)
@ -97,7 +93,7 @@ def execute_snappy(post, v):
body += "\n\n" body += "\n\n"
if post.url: if post.url and not post.url.startswith(SITE_FULL):
if post.url.startswith('https://old.reddit.com/r/'): if post.url.startswith('https://old.reddit.com/r/'):
rev = post.url.replace('https://old.reddit.com/', '') rev = post.url.replace('https://old.reddit.com/', '')
rev = f"* [unddit.com](https://unddit.com/{rev})\n" rev = f"* [unddit.com](https://unddit.com/{rev})\n"
@ -128,6 +124,8 @@ def execute_snappy(post, v):
for href, title in captured: for href, title in captured:
if href.startswith(SITE_FULL): continue
if "Snapshots:\n\n" not in body: body += "Snapshots:\n\n" if "Snapshots:\n\n" not in body: body += "Snapshots:\n\n"
if f'**[{title}]({href})**:\n\n' not in body: if f'**[{title}]({href})**:\n\n' not in body: