rDrama/files/helpers/sanitize.py

229 lines
8.2 KiB
Python
Raw Normal View History

2021-07-21 01:12:26 +00:00
import bleach
from bs4 import BeautifulSoup
from bleach.linkifier import LinkifyFilter
from functools import partial
from .get import *
2021-09-14 13:40:52 +00:00
from os import path, environ
2021-09-14 13:43:35 +00:00
import re
2021-07-21 01:12:26 +00:00
2021-08-05 14:41:32 +00:00
site = environ.get("DOMAIN").strip()
2021-08-02 14:27:20 +00:00
2021-10-08 14:47:31 +00:00
allowed_tags = tags = ['b',
2021-07-21 01:12:26 +00:00
'blockquote',
'br',
'code',
'del',
'em',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'hr',
'i',
'li',
'ol',
'p',
'pre',
'strong',
'sub',
'sup',
'table',
'tbody',
'th',
'thead',
'td',
'tr',
'ul',
'marquee',
'a',
'img',
'span',
]
2021-08-31 21:58:25 +00:00
no_images = ['b',
2021-08-31 21:54:34 +00:00
'blockquote',
'br',
'code',
'del',
'em',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'hr',
'i',
'li',
'ol',
'p',
'pre',
'strong',
'sub',
'sup',
'table',
'tbody',
'th',
'thead',
'td',
'tr',
'ul',
'marquee',
'a',
'span',
]
2021-10-08 14:47:31 +00:00
allowed_attributes = {'*': ['href', 'style', 'src', 'class', 'title', 'rel', 'data-bs-original-name', 'direction']}
2021-07-21 01:12:26 +00:00
2021-10-08 14:47:31 +00:00
allowed_protocols = ['http', 'https']
2021-10-13 13:05:58 +00:00
allowed_styles = ['color', 'font-weight', 'transform', '-webkit-transform']
2021-07-21 01:12:26 +00:00
2021-10-08 14:14:30 +00:00
def sanitize(sanitized, noimages=False):
2021-07-21 01:12:26 +00:00
2021-10-10 05:55:53 +00:00
sanitized = sanitized.replace("\ufeff", "").replace("m.youtube.com", "youtube.com")
2021-07-21 01:12:26 +00:00
2021-08-21 16:27:23 +00:00
for i in re.finditer('https://i.imgur.com/(([^_]*?)\.(jpg|png|jpeg))', sanitized):
2021-08-21 16:25:34 +00:00
sanitized = sanitized.replace(i.group(1), i.group(2) + "_d." + i.group(3) + "?maxwidth=9999")
2021-07-21 01:12:26 +00:00
2021-10-08 14:14:30 +00:00
if noimages:
sanitized = bleach.Cleaner(tags=no_images,
2021-10-08 14:47:31 +00:00
attributes=allowed_attributes,
protocols=allowed_protocols,
styles=allowed_styles,
2021-10-08 14:14:30 +00:00
filters=[partial(LinkifyFilter,
skip_tags=["pre"],
parse_email=False,
)
]
).clean(sanitized)
else:
2021-10-08 14:47:31 +00:00
sanitized = bleach.Cleaner(tags=allowed_tags,
attributes=allowed_attributes,
2021-10-08 14:14:30 +00:00
protocols=['http', 'https'],
styles=['color','font-weight','transform','-webkit-transform'],
filters=[partial(LinkifyFilter,
skip_tags=["pre"],
parse_email=False,
)
]
).clean(sanitized)
2021-07-21 01:12:26 +00:00
2021-08-21 12:30:54 +00:00
soup = BeautifulSoup(sanitized, features="html.parser")
2021-07-21 01:12:26 +00:00
2021-08-21 12:30:54 +00:00
for tag in soup.find_all("img"):
2021-07-21 01:12:26 +00:00
2021-10-08 13:28:57 +00:00
if tag.get("src") and "profile-pic-20" not in tag.get("class", ""):
2021-07-21 01:12:26 +00:00
2021-09-02 20:33:48 +00:00
tag["rel"] = "nofollow noopener noreferrer"
2021-10-08 07:46:00 +00:00
tag["class"] = "in-comment-image"
2021-09-03 14:08:32 +00:00
tag["loading"] = "lazy"
2021-10-06 09:14:08 +00:00
tag["data-src"] = tag["src"]
2021-10-13 13:16:50 +00:00
tag["src"] = "/assets/images/loading.webp"
2021-07-21 01:12:26 +00:00
2021-08-21 12:30:54 +00:00
link = soup.new_tag("a")
2021-10-08 13:28:57 +00:00
link["href"] = tag["data-src"]
2021-09-02 20:33:48 +00:00
link["rel"] = "nofollow noopener noreferrer"
2021-08-21 12:30:54 +00:00
link["target"] = "_blank"
2021-10-06 09:33:04 +00:00
link["onclick"] = f"expandDesktopImage('{tag['data-src']}');"
2021-09-26 09:04:49 +00:00
link["data-bs-toggle"] = "modal"
link["data-bs-target"] = "#expandImageModal"
2021-07-21 01:12:26 +00:00
2021-08-21 12:30:54 +00:00
tag.wrap(link)
2021-07-21 01:12:26 +00:00
2021-08-21 12:30:54 +00:00
for tag in soup.find_all("a"):
2021-10-08 13:28:57 +00:00
if tag["href"]:
tag["target"] = "_blank"
if site not in tag["href"]: tag["rel"] = "nofollow noopener noreferrer"
2021-07-21 01:12:26 +00:00
2021-10-08 13:28:57 +00:00
if re.match("https?://\S+", str(tag.string)):
try: tag.string = tag["href"]
except: tag.string = ""
2021-10-06 00:25:59 +00:00
2021-07-21 01:12:26 +00:00
2021-08-21 12:30:54 +00:00
sanitized = str(soup)
2021-07-21 01:12:26 +00:00
start = '<s>'
end = '</s>'
2021-09-05 19:47:05 +00:00
2021-09-06 22:26:28 +00:00
try:
if not session.get("favorite_emojis"): session["favorite_emojis"] = {}
except:
pass
2021-09-05 19:47:05 +00:00
2021-10-06 05:40:27 +00:00
if start in sanitized and end in sanitized and start in sanitized.split(end)[0] and end in sanitized.split(start)[1]: sanitized = sanitized.replace(start, '<span class="spoiler">').replace(end, '</span>')
2021-07-21 01:12:26 +00:00
2021-10-06 05:37:07 +00:00
for i in re.finditer("[^a]>\s*(:!?\w+:\s*)+<\/", sanitized):
2021-10-01 00:23:26 +00:00
old = i.group(0)
2021-10-08 13:28:57 +00:00
if 'marseylong1' in old or 'marseylong2' in old: new = old.lower().replace(">", " class='mb-0'>")
2021-10-06 09:12:58 +00:00
else: new = old.lower()
2021-10-07 06:24:59 +00:00
for i in re.finditer('(?<!"):([^ ]{1,30}?):', new):
2021-10-05 20:45:15 +00:00
emoji = i.group(1).lower()
2021-10-05 20:59:15 +00:00
if emoji.startswith("!"):
2021-10-05 21:26:16 +00:00
emoji = emoji[1:]
if path.isfile(f'./files/assets/images/emojis/{emoji}.webp'):
2021-10-08 13:28:57 +00:00
new = re.sub(f'(?<!"):!{emoji}:', f'<img loading="lazy" data-bs-toggle="tooltip" alt=":!{emoji}:" title=":!{emoji}:" delay="0" class="bigemoji mirrored" src="https://{site}/assets/images/emojis/{emoji}.webp" >', new)
2021-10-05 21:26:16 +00:00
2021-10-07 06:26:33 +00:00
if emoji in session["favorite_emojis"]: session["favorite_emojis"][emoji] += 1
else: session["favorite_emojis"][emoji] = 1
2021-10-05 21:26:16 +00:00
elif path.isfile(f'./files/assets/images/emojis/{emoji}.webp'):
2021-10-08 07:46:00 +00:00
new = re.sub(f'(?<!"):{emoji}:', f'<img loading="lazy" data-bs-toggle="tooltip" alt=":{emoji}:" title=":{emoji}:" delay="0" class="bigemoji" src="https://{site}/assets/images/emojis/{emoji}.webp" >', new)
2021-10-05 21:26:16 +00:00
2021-10-07 06:26:33 +00:00
if emoji in session["favorite_emojis"]: session["favorite_emojis"][emoji] += 1
else: session["favorite_emojis"][emoji] = 1
2021-10-01 00:21:33 +00:00
2021-10-05 20:59:15 +00:00
sanitized = sanitized.replace(old, new)
2021-10-01 00:13:20 +00:00
2021-10-01 00:24:39 +00:00
2021-10-07 06:24:59 +00:00
for i in re.finditer('(?<!"):([^ ]{1,30}?):', sanitized):
2021-10-01 00:24:39 +00:00
emoji = i.group(1).lower()
2021-10-05 20:59:15 +00:00
if emoji.startswith("!"):
2021-10-05 21:26:16 +00:00
emoji = emoji[1:]
if path.isfile(f'./files/assets/images/emojis/{emoji}.webp'):
2021-10-08 07:46:00 +00:00
sanitized = re.sub(f'(?<!"):!{emoji}:', f'<img loading="lazy" data-bs-toggle="tooltip" alt=":!{emoji}:" title=":!{emoji}:" delay="0" class="emoji mirrored" src="https://{site}/assets/images/emojis/{emoji}.webp">', sanitized)
2021-10-05 21:26:16 +00:00
2021-10-07 06:26:33 +00:00
if emoji in session["favorite_emojis"]: session["favorite_emojis"][emoji] += 1
else: session["favorite_emojis"][emoji] = 1
2021-10-05 21:26:16 +00:00
elif path.isfile(f'./files/assets/images/emojis/{emoji}.webp'):
2021-10-08 07:46:00 +00:00
sanitized = re.sub(f'(?<!"):{emoji}:', f'<img loading="lazy" data-bs-toggle="tooltip" alt=":{emoji}:" title=":{emoji}:" delay="0" class="emoji" src="https://{site}/assets/images/emojis/{emoji}.webp">', sanitized)
2021-10-05 21:26:16 +00:00
2021-10-07 06:26:33 +00:00
if emoji in session["favorite_emojis"]: session["favorite_emojis"][emoji] += 1
else: session["favorite_emojis"][emoji] = 1
2021-07-21 01:12:26 +00:00
2021-10-05 20:23:31 +00:00
2021-09-23 20:43:11 +00:00
sanitized = sanitized.replace("https://www.", "https://").replace("https://youtu.be/", "https://youtube.com/watch?v=").replace("https://music.youtube.com/watch?v=", "https://youtube.com/watch?v=").replace("https://open.spotify.com/", "https://open.spotify.com/embed/").replace("https://streamable.com/", "https://streamable.com/e/").replace("https://youtube.com/shorts/", "https://youtube.com/watch?v=").replace("https://mobile.twitter", "https://twitter").replace("https://m.facebook", "https://facebook").replace("https://m.wikipedia", "https://wikipedia").replace("https://m.youtube", "https://youtube")
2021-08-22 19:51:19 +00:00
for i in re.finditer('" target="_blank">(https://youtube.com/watch\?v\=.*?)</a>', sanitized):
url = i.group(1)
2021-10-06 02:06:07 +00:00
replacing = f'<a href="{url}" rel="nofollow noopener noreferrer" target="_blank">{url}</a>'
2021-10-13 13:10:07 +00:00
htmlsource = f'<iframe class="embedvid" loading="lazy" data-src="{url}" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>'
2021-08-22 19:51:19 +00:00
sanitized = sanitized.replace(replacing, htmlsource.replace("watch?v=", "embed/"))
for i in re.finditer('<a href="(https://streamable.com/e/.*?)"', sanitized):
2021-07-21 01:12:26 +00:00
url = i.group(1)
2021-10-06 02:06:07 +00:00
replacing = f'<a href="{url}" rel="nofollow noopener noreferrer" target="_blank">{url}</a>'
2021-10-13 13:10:07 +00:00
htmlsource = f'<iframe class="embedvid" loading="lazy" data-src="{url}" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>'
2021-07-21 01:12:26 +00:00
sanitized = sanitized.replace(replacing, htmlsource)
2021-08-22 19:51:19 +00:00
2021-10-01 20:30:05 +00:00
for i in re.finditer('<p>(https:.*?\.mp4)</p>', sanitized):
2021-10-13 13:10:07 +00:00
sanitized = sanitized.replace(i.group(0), f'<p><video controls loop preload="metadata" class="embedvid"><source data-src="{i.group(1)}" type="video/mp4"></video>')
2021-10-01 20:25:15 +00:00
2021-07-21 01:12:26 +00:00
for i in re.finditer('<a href="(https://open.spotify.com/embed/.*?)"', sanitized):
url = i.group(1)
2021-10-06 02:06:07 +00:00
replacing = f'<a href="{url}" rel="nofollow noopener noreferrer" target="_blank">{url}</a>'
2021-10-13 13:10:07 +00:00
htmlsource = f'<iframe data-src="{url}" class="spotify" frameBorder="0" allowtransparency="true" allow="encrypted-media"></iframe>'
2021-07-21 01:12:26 +00:00
sanitized = sanitized.replace(replacing, htmlsource)
for rd in ["https://reddit.com/", "https://new.reddit.com/", "https://www.reddit.com/", "https://redd.it/"]:
sanitized = sanitized.replace(rd, "https://old.reddit.com/")
2021-09-01 21:16:51 +00:00
2021-09-02 20:33:48 +00:00
sanitized = re.sub(' (https:\/\/[^ <>]*)', r' <a target="_blank" rel="nofollow noopener noreferrer" href="\1">\1</a>', sanitized)
2021-09-19 11:02:35 +00:00
sanitized = re.sub('<p>(https:\/\/[^ <>]*)', r'<p><a target="_blank" rel="nofollow noopener noreferrer" href="\1">\1</a></p>', sanitized)
2021-09-01 20:58:53 +00:00
return sanitized