make snappy post unscored.arete.network links

pull/225/head
Aevann 2024-03-02 01:29:51 +02:00
parent 3b1e507c47
commit 47259a86c0
3 changed files with 29 additions and 28 deletions

View File

@ -51,23 +51,6 @@ def snappy_report(post, reason):
message = f'@Snappy reported {post.textlink})\n\n> {reason}'
send_repeatable_notification(post.author_id, message)
def get_archival_urls(href):
addition = ''
if href.startswith('https://old.reddit.com/r/'):
rev = href.replace('https://old.reddit.com/', '')
addition += f'* [undelete.pullpush.io](https://undelete.pullpush.io/{rev})\n\n'
elif href.startswith('https://old.reddit.com/user/'):
rev = href.replace('https://old.reddit.com/user/', '')
addition += f"* [search-new.pullpush.io](https://search-new.pullpush.io/?author={rev}&type=submission)\n\n"
elif href.startswith('https://boards.4chan.org/'):
rev = href.replace('https://boards.4chan.org/', '')
addition += f'* [archived.moe](https://archived.moe/{rev})\n\n'
addition += f'* [ghostarchive.org](https://ghostarchive.org/search?term={quote(href)})\n\n'
addition += f'* [archive.org](https://web.archive.org/{href})\n\n'
addition += f'* [archive.ph](https://archive.ph/?url={quote(href)}&run=1) (click to archive)\n\n'
return addition
def execute_snappy(post, v):
if post.hole and g.db.query(Exile.user_id).filter_by(user_id=SNAPPY_ID, hole=post.hole).one_or_none():
return
@ -210,28 +193,40 @@ def execute_snappy(post, v):
body += "\n\n"
if post.url and not post.url.startswith('/') and not post.url.startswith(f'{SITE_FULL}/') and not post.url.startswith(SITE_FULL_IMAGES):
body += f"Snapshots:\n\n{get_archival_urls(post.url)}"
archive_url(post.url)
captured = []
body_for_snappy = post.body_html.replace(' data-src="', ' src="')
if post.url and not post.url.startswith('/'):
captured.append((post.url, post.url))
for i in list(snappy_url_regex.finditer(body_for_snappy)):
for i in list(snappy_url_regex.finditer(post.body_html.replace(' data-src="', ' src="'))):
href = i.group(1)
if href in [x[0] for x in captured]: continue
title = i.group(2)
captured.append((href, title))
if captured:
body += "**Snapshots:**\n\n"
for href, title in captured:
if href == post.url: continue
if href.startswith(f'{SITE_FULL}/') or href.startswith(SITE_FULL_IMAGES): continue
if "Snapshots:\n\n" not in body: body += "Snapshots:\n\n"
if f'**[{title}]({href})**:\n\n' not in body:
addition = f'**[{title}]({href})**:\n\n'
addition += get_archival_urls(href)
if href.startswith('https://old.reddit.com/r/'):
rev = href.replace('https://old.reddit.com/', '')
addition += f'* [undelete.pullpush.io](https://undelete.pullpush.io/{rev})\n\n'
elif href.startswith('https://old.reddit.com/user/'):
rev = href.replace('https://old.reddit.com/user/', '')
addition += f"* [search-new.pullpush.io](https://search-new.pullpush.io/?author={rev}&type=submission)\n\n"
elif href.startswith('https://boards.4chan.org/'):
rev = href.replace('https://boards.4chan.org/', '')
addition += f'* [archived.moe](https://archived.moe/{rev})\n\n'
elif href.startswith('https://scored.co/') and href != 'https://scored.co/':
rev = href.replace('https://scored.co/', '')
addition += f'* [unscored.arete.network](https://unscored.arete.network/{rev})\n\n'
addition += f'* [ghostarchive.org](https://ghostarchive.org/search?term={quote(href)})\n\n'
addition += f'* [archive.org](https://web.archive.org/{href})\n\n'
addition += f'* [archive.ph](https://archive.ph/?url={quote(href)}&run=1) (click to archive)\n\n'
if len(f'{body}{addition}') > COMMENT_BODY_LENGTH_LIMIT: break
body += addition
archive_url(href)

View File

@ -743,6 +743,13 @@ domain_replacements = {
'https://lmgtfy.app/?q=': 'https://google.com/search?q=',
DONATE_LINK: f'{SITE_FULL}/donate',
'https://boards.4channel.org/': 'https://boards.4chan.org/',
'https://communities.win/': 'https://scored.co/',
'https://m.communities.win/': 'https://scored.co/',
'https://patriots.win/': 'https://scored.co/',
'https://greatawakening.win/': 'https://scored.co/',
'https://conspiracies.win/': 'https://scored.co/',
'https://kotakuinaction2.win/': 'https://scored.co/',
'https://omegacanada.win/': 'https://scored.co/',
}
def normalize_url(url):

View File

@ -168,8 +168,7 @@ def vote_post_comment(target_id, new, v, cls, vote_cls):
elif SITE == 'rdrama.net' and 6 <= datetime.fromtimestamp(target.created_utc).hour <= 10:
mul = 2
elif SITE == 'rdrama.net' and cls == Post:
if target.domain.endswith('.win') \
or any(i in target.domain for i in ('forum','community','chan','lemmy','mastodon')) \
if any(i in target.domain for i in ('forum','community','chan','lemmy','mastodon')) \
or (target.domain in BOOSTED_SITES and not target.url.startswith('/')):
mul = 2
elif target.hole in BOOSTED_HOLES: