From fc85aab39e666fe7a56e21cbd55fcf23e42df954 Mon Sep 17 00:00:00 2001 From: atrc445 Date: Sun, 15 Aug 2021 21:17:19 +0200 Subject: [PATCH] fix controversial sorting setting for comments --- files/classes/comment.py | 16 +++++++++++++++- files/helpers/sanitize.py | 15 ++------------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/files/classes/comment.py b/files/classes/comment.py index 0d147612a..0051a6e0a 100644 --- a/files/classes/comment.py +++ b/files/classes/comment.py @@ -1,3 +1,5 @@ +import re +from urllib.parse import urlencode, urlparse, parse_qs from flask import * from sqlalchemy import * from sqlalchemy.orm import relationship, deferred @@ -247,6 +249,18 @@ class Comment(Base, Age_times, Scores, Stndrd, Fuzzing): if not v or v.slurreplacer: body = body.replace(" nigger"," 🏀").replace(" Nigger"," 🏀").replace(" NIGGER"," 🏀").replace(" pedo"," libertarian").replace(" Pedo"," Libertarian ").replace(" PEDO"," LIBERTARIAN ").replace(" tranny"," 🚄").replace(" Tranny"," 🚄").replace(" TRANNY"," 🚄").replace(" fag"," cute twink").replace(" Fag"," Cute twink").replace(" FAG"," CUTE TWINK").replace(" faggot"," cute twink").replace(" Faggot"," Cute twink").replace(" FAGGOT"," CUTE TWINK").replace(" trump"," DDR").replace(" Trump"," DDR").replace(" TRUMP"," DDR").replace(" biden"," DDD").replace(" Biden"," DDD").replace(" BIDEN"," DDD").replace(" steve akins"," penny verity oaken").replace(" Steve Akins"," Penny Verity Oaken").replace(" STEVE AKINS"," PENNY VERITY OAKEN").replace(" RETARD"," RSLUR").replace(" rapist"," male feminist").replace(" Rapist"," Male feminist").replace(" RAPIST"," MALE FEMINIST").replace(" RETARD"," RSLUR").replace(" rapist"," male feminist").replace(" Rapist"," Male feminist").replace(" RAPIST"," MALE FEMINIST").replace(" RETARD"," RSLUR").replace(" rapist"," male feminist").replace(" Rapist"," Male feminist").replace(" RAPIST"," MALE FEMINIST").replace(" kill yourself"," keep yourself safe").replace(" KILL YOURSELF"," KEEP YOURSELF SAFE").replace(" trannie"," 🚄").replace(" Trannie"," 🚄").replace(" TRANNIE"," 🚄").replace(" troon"," 🚄").replace(" Troon"," 🚄").replace(" TROON"," 🚄") if v and not v.oldreddit: body = body.replace("old.reddit.com", "reddit.com") + if v.controversial: + for i in re.finditer('(/comments/.*?)"', body): + url = i.group(1) + p = urlparse(url).query + p = parse_qs(p) + + if 'sort' not in p: + p['sort'] = ['controversial'] + + url_noquery = url.split('?')[0] + body = body.replace(url, f"{url_noquery}?{urlencode(p, True)}") + return body @property @@ -309,4 +323,4 @@ class Notification(Base): def __repr__(self): - return f"" \ No newline at end of file + return f"" diff --git a/files/helpers/sanitize.py b/files/helpers/sanitize.py index 7d95bc3e0..827d24fe5 100644 --- a/files/helpers/sanitize.py +++ b/files/helpers/sanitize.py @@ -1,7 +1,7 @@ import bleach from bs4 import BeautifulSoup from bleach.linkifier import LinkifyFilter -from urllib.parse import ParseResult, urlunparse, urlencode, urlparse, parse_qs +from urllib.parse import ParseResult, urlunparse, urlparse from functools import partial from .get import * from os import path @@ -204,18 +204,7 @@ def sanitize(text, linkgen=False): for rd in ["https://reddit.com/", "https://new.reddit.com/", "https://www.reddit.com/", "https://redd.it/"]: sanitized = sanitized.replace(rd, "https://old.reddit.com/") - for i in re.finditer('(/comments/.*?)"', sanitized): - url = i.group(1) - p = urlparse(url).query - p = parse_qs(p) - - if 'sort' not in p: - p['sort'] = ['controversial'] - - url_noquery = url.split('?')[0] - sanitized = sanitized.replace(url, f"{url_noquery}?{urlencode(p, True)}") - for i in re.finditer('

(https://.*)

', sanitized): sanitized = sanitized.replace(i.group(1), f"{i.group(1)}

") - return sanitized \ No newline at end of file + return sanitized