forked from MarseyWorld/MarseyWorld
master
parent
810e80c010
commit
2c1d1aceff
|
@ -1004,7 +1004,7 @@ image_check_regex = re.compile(f'!\[\]\(((?!(https:\/\/([a-z0-9-]+\.)*({hosts})\
|
|||
video_sub_regex = re.compile(f'(<p>[^<]*)(https:\/\/([a-z0-9-]+\.)*({hosts})\/[\w:~,()\-.#&\/=?@%;+]*?\.(mp4|webm|mov))', flags=re.A)
|
||||
audio_sub_regex = re.compile(f'(<p>[^<]*)(https:\/\/([a-z0-9-]+\.)*({hosts})\/[\w:~,()\-.#&\/=?@%;+]*?\.(mp3|wav|ogg|aac|m4a|flac))', flags=re.A)
|
||||
|
||||
imgur_regex = re.compile('(https:\/\/i\.imgur\.com\/[a-z0-9]+)\.(jpg|png|jpeg|webp)(?!<\/(code|pre|a)>)', flags=re.I|re.A)
|
||||
imgur_regex = re.compile('(https:\/\/i\.imgur\.com\/[a-z0-9]+)\.(jpg|png|jpeg|webp)', flags=re.I|re.A)
|
||||
giphy_regex = re.compile('(https:\/\/media\.giphy\.com\/media\/[a-z0-9]+\/giphy)\.gif', flags=re.I|re.A)
|
||||
|
||||
youtube_regex = re.compile('(<p>[^<]*)(https:\/\/youtube\.com\/watch\?v\=([a-z0-9-_]{5,20})[\w\-.#&/=\?@%+]*)', flags=re.I|re.A)
|
||||
|
@ -1027,4 +1027,6 @@ ascii_only_regex = re.compile("[ -~]+", flags=re.A)
|
|||
|
||||
twitter_to_nitter_regex = re.compile("https:\/\/twitter.com\/(\w{4,15}(\/status\/\d+[^/]*)?)", flags=re.A)
|
||||
|
||||
reddit_domain_regex = re.compile("(^|\s)https:\/\/(reddit\.com|new\.reddit.com|www\.reddit.com|redd\.it|libredd\.it|teddit\.net)\/r\/", flags=re.A)
|
||||
|
||||
def make_name(*args, **kwargs): return request.base_url
|
||||
|
|
|
@ -171,6 +171,8 @@ def sanitize(sanitized, edit=False):
|
|||
|
||||
sanitized = sanitized.strip()
|
||||
|
||||
sanitized = normalize_url(sanitized)
|
||||
|
||||
if '```' not in sanitized and '<pre>' not in sanitized:
|
||||
sanitized = linefeeds_regex.sub(r'\1\n\n\2', sanitized)
|
||||
|
||||
|
@ -203,9 +205,6 @@ def sanitize(sanitized, edit=False):
|
|||
if not (v and v.any_block_exists(u)) or (v and v.admin_level >= 2):
|
||||
sanitized = sanitized.replace(i.group(0), f'''{i.group(1)}<a href="/id/{u.id}"><img loading="lazy" src="/pp/{u.id}">@{u.username}</a>''', 1)
|
||||
|
||||
|
||||
sanitized = normalize_url(sanitized)
|
||||
|
||||
soup = BeautifulSoup(sanitized, 'lxml')
|
||||
|
||||
for tag in soup.find_all("img"):
|
||||
|
@ -384,8 +383,7 @@ def filter_emojis_only(title, edit=False, graceful=False):
|
|||
else: return title
|
||||
|
||||
def normalize_url(url):
|
||||
for x in ["reddit.com", "new.reddit.com", "www.reddit.com", "redd.it", "ibredd.it", "teddit.net"]:
|
||||
url = url.replace(f'https://{x}/r/', "https://old.reddit.com/r/")
|
||||
url = reddit_domain_regex.sub(r'\1https://old.reddit.com/r/', url)
|
||||
|
||||
url = url.replace("https://youtu.be/", "https://youtube.com/watch?v=") \
|
||||
.replace("https://music.youtube.com/watch?v=", "https://youtube.com/watch?v=") \
|
||||
|
|
Loading…
Reference in New Issue