forked from rDrama/rDrama
1
0
Fork 0

exclude snappy from fishylinks_regex

master
Aevann 2023-01-21 06:18:02 +02:00
parent 9c6c894ebd
commit df803791ef
2 changed files with 3 additions and 3 deletions

View File

@ -149,7 +149,7 @@ def execute_snappy(post:Submission, v:User):
archive_url(href) archive_url(href)
body = body.strip()[:COMMENT_BODY_LENGTH_LIMIT] body = body.strip()[:COMMENT_BODY_LENGTH_LIMIT]
body_html = sanitize(body) body_html = sanitize(body, snappy=True)
if len(body_html) == 0: if len(body_html) == 0:
return return

View File

@ -238,7 +238,7 @@ chud_images = [f'![](/i/chud/{f})' for f in chud_images]
chud_images.extend([':#trumpjaktalking:', ':#reposthorse:']) chud_images.extend([':#trumpjaktalking:', ':#reposthorse:'])
@with_sigalrm_timeout(10) @with_sigalrm_timeout(10)
def sanitize(sanitized, golden=True, limit_pings=0, showmore=True, count_marseys=False, torture=False, sidebar=False): def sanitize(sanitized, golden=True, limit_pings=0, showmore=True, count_marseys=False, torture=False, sidebar=False, snappy=False):
sanitized = sanitized.strip() sanitized = sanitized.strip()
sanitized = utm_regex.sub('', sanitized) sanitized = utm_regex.sub('', sanitized)
@ -318,7 +318,7 @@ def sanitize(sanitized, golden=True, limit_pings=0, showmore=True, count_marseys
for tag in soup.find_all("a"): for tag in soup.find_all("a"):
if not tag.contents or not str(tag.contents[0]).strip(): if not tag.contents or not str(tag.contents[0]).strip():
tag.extract() tag.extract()
if tag.get("href") and fishylinks_regex.fullmatch(str(tag.string)): if not snappy and tag.get("href") and fishylinks_regex.fullmatch(str(tag.string)):
tag.string = tag["href"] tag.string = tag["href"]