diff --git a/files/classes/comment.py b/files/classes/comment.py index 6e6cca65e..c8ba86bc2 100644 --- a/files/classes/comment.py +++ b/files/classes/comment.py @@ -341,7 +341,7 @@ class Comment(Base): if v.nitter and not '/i/' in body: body = body.replace("www.twitter.com", "nitter.net").replace("twitter.com", "nitter.net") if v and v.controversial: - for i in re.finditer('(/comments/.*?)"', body): + for i in re.finditer('(/comments/.*?)"', body, flags=re.A): url = i.group(1) p = urlparse(url).query p = parse_qs(p) @@ -402,7 +402,7 @@ class Comment(Base): if v and v.nitter and not '/i/' in body: body = body.replace("www.twitter.com", "nitter.net").replace("twitter.com", "nitter.net") if v and v.controversial: - for i in re.finditer('(/comments/.*?)"', body): + for i in re.finditer('(/comments/.*?)"', body, flags=re.A): url = i.group(1) p = urlparse(url).query p = parse_qs(p) diff --git a/files/helpers/alerts.py b/files/helpers/alerts.py index f995b53f3..c1d049432 100644 --- a/files/helpers/alerts.py +++ b/files/helpers/alerts.py @@ -68,7 +68,7 @@ def NOTIFY_USERS(text, v): if word in text.lower() and id not in notify_users and v.id != id: notify_users.add(id) soup = BeautifulSoup(text, 'lxml') - for mention in soup.find_all("a", href=re.compile("^\/id\/([0-9]+)")): + for mention in soup.find_all("a", href=re.compile("^\/id\/([0-9]+)", flags=re.A)): id = int(mention["href"].split("/id/")[1]) if id != v.id: user = g.db.query(User).filter_by(id=id).one_or_none() diff --git a/files/helpers/const.py b/files/helpers/const.py index 9d80bec87..8ec161eb1 100644 --- a/files/helpers/const.py +++ b/files/helpers/const.py @@ -668,5 +668,5 @@ db = db_session() marseys_const = [x[0] for x in db.query(Marsey.name).all()] + ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','0','1','2','3','4','5','6','7','8','9','exclamationpoint','period','questionmark'] db.close() -if SITE == 'PCM': valid_username_regex = re.compile("^[a-zA-Z0-9_\-А-я]{3,25}$") -else: valid_username_regex = re.compile("^[a-zA-Z0-9_\-]{3,25}$") \ No newline at end of file +if SITE == 'PCM': valid_username_regex = re.compile("^[a-zA-Z0-9_\-А-я]{3,25}$", flags=re.A) +else: valid_username_regex = re.compile("^[a-zA-Z0-9_\-]{3,25}$", flags=re.A) \ No newline at end of file diff --git a/files/helpers/sanitize.py b/files/helpers/sanitize.py index e55026e40..c8c39ffb1 100644 --- a/files/helpers/sanitize.py +++ b/files/helpers/sanitize.py @@ -125,7 +125,7 @@ def sanitize(sanitized, noimages=False, alert=False, comment=False, edit=False): sanitized = re.sub('(^|\s|\n|
)\/?(s\/(\w|-){3,25})', r'\1/\2', sanitized, flags=re.A) - for i in re.finditer('(^|\s|\n|
)@((\w|-){1,25})', sanitized, flags=re.A): + for i in re.finditer(valid_username_regex, sanitized, flags=re.A): u = get_user(i.group(2), graceful=True) if u and (not g.v.any_block_exists(u) or g.v.admin_level > 1): @@ -135,7 +135,7 @@ def sanitize(sanitized, noimages=False, alert=False, comment=False, edit=False): sanitized = sanitized.replace(i.group(0), f'''{i.group(1)}@{u.username}''', 1) - for i in re.finditer('https://i\.imgur\.com/(([^_]*?)\.(jpg|png|jpeg))(?!)', sanitized): + for i in re.finditer('https://i\.imgur\.com/(([^_]*?)\.(jpg|png|jpeg))(?!)', sanitized, flags=re.A): sanitized = sanitized.replace(i.group(1), i.group(2) + "_d.webp?maxwidth=9999&fidelity=high") if noimages: @@ -203,7 +203,7 @@ def sanitize(sanitized, noimages=False, alert=False, comment=False, edit=False): old = i.group(0) if 'marseylong1' in old or 'marseylong2' in old or 'marseyllama1' in old or 'marseyllama2' in old: new = old.lower().replace(">", " class='mb-0'>") else: new = old.lower() - for i in re.finditer('(?', new, flags=re.I) + new = re.sub(f'(?', new, flags=re.I|re.A) if comment: marseys_used.add(emoji) sanitized = sanitized.replace(old, new) - emojis = list(re.finditer('(? 20: edit = True captured = [] @@ -247,7 +247,7 @@ def sanitize(sanitized, noimages=False, alert=False, comment=False, edit=False): else: emoji = old if path.isfile(f'files/assets/images/emojis/{emoji}.webp'): - sanitized = re.sub(f'(?', sanitized, flags=re.I) + sanitized = re.sub(f'(?', sanitized, flags=re.I|re.A) if comment: marseys_used.add(emoji) else: classes = 'emoji' @@ -258,14 +258,14 @@ def sanitize(sanitized, noimages=False, alert=False, comment=False, edit=False): else: emoji = old if path.isfile(f'files/assets/images/emojis/{emoji}.webp'): - sanitized = re.sub(f'(?', sanitized, flags=re.I) + sanitized = re.sub(f'(?', sanitized, flags=re.I|re.A) if comment: marseys_used.add(emoji) sanitized = sanitized.replace("https://youtu.be/", "https://youtube.com/watch?v=").replace("https://music.youtube.com/watch?v=", "https://youtube.com/watch?v=").replace("https://streamable.com/", "https://streamable.com/e/").replace("https://youtube.com/shorts/", "https://youtube.com/watch?v=").replace("https://mobile.twitter", "https://twitter").replace("https://m.facebook", "https://facebook").replace("m.wikipedia.org", "wikipedia.org").replace("https://m.youtube", "https://youtube").replace("https://www.youtube", "https://youtube") if "https://youtube.com/watch?v=" in sanitized: sanitized = sanitized.replace("?t=", "&t=") - for i in re.finditer('" target="_blank">(https://youtube\.com/watch\?v\=(.*?))(?!)', sanitized): + for i in re.finditer('" target="_blank">(https://youtube\.com/watch\?v\=(.*?))(?!)', sanitized, flags=re.A): url = i.group(1) yt_id = i.group(2).split('&')[0].split('%')[0] replacing = f'{url}' @@ -281,17 +281,17 @@ def sanitize(sanitized, noimages=False, alert=False, comment=False, edit=False): sanitized = sanitized.replace(replacing, htmlsource) if not noimages: - for i in re.finditer('>(https://.*?\.(mp4|webm|mov|MP4|WEBM|MOV))
', sanitized): + for i in re.finditer('>(https://.*?\.(mp4|webm|mov|MP4|WEBM|MOV))', sanitized, flags=re.A): sanitized = sanitized.replace(f'', f'') - for i in re.finditer('
(https:.*?\.(mp4|webm|mov|MP4|WEBM|MOV))
', sanitized): + for i in re.finditer('(https:.*?\.(mp4|webm|mov|MP4|WEBM|MOV))
', sanitized, flags=re.A): sanitized = sanitized.replace(i.group(0), f'') for rd in ["://reddit.com", "://new.reddit.com", "://www.reddit.com", "://redd.it", "://libredd.it"]: sanitized = sanitized.replace(rd, "://old.reddit.com") sanitized = sanitized.replace("old.reddit.com/gallery", "new.reddit.com/gallery") - sanitized = re.sub(' (https:\/\/[^ <>]*)', r' \1', sanitized) - sanitized = re.sub('
(https:\/\/[^ <>]*)', r'
', sanitized) + sanitized = re.sub(' (https:\/\/[^ <>]*)', r' \1', sanitized, flags=re.A) + sanitized = re.sub('(https:\/\/[^ <>]*)', r'
', sanitized, flags=re.A) if comment: for marsey in g.db.query(Marsey).filter(Marsey.name.in_(marseys_used)).all(): @@ -315,7 +315,7 @@ def filter_emojis_only(title, edit=False, graceful=False): title = bleach.clean(title, tags=[]) - emojis = list(re.finditer('(? 20: edit = True captured = [] @@ -335,7 +335,7 @@ def filter_emojis_only(title, edit=False, graceful=False): else: emoji = old if path.isfile(f'files/assets/images/emojis/{emoji}.webp'): - title = re.sub(f'(?', title, flags=re.I) + title = re.sub(f'(?', title, flags=re.I|re.A) else: classes = 'emoji' @@ -346,9 +346,9 @@ def filter_emojis_only(title, edit=False, graceful=False): else: emoji = old if path.isfile(f'files/assets/images/emojis/{emoji}.webp'): - title = re.sub(f'(?', title, flags=re.I) + title = re.sub(f'(?', title, flags=re.I|re.A) - title = re.sub('~~(.*?)~~', r'