prevent retards from doxing themselves through tiktok links - comment edition

master
Aevann 2023-10-06 22:22:11 +03:00
parent 1fd6295e8c
commit f83427bb12
3 changed files with 38 additions and 9 deletions

View File

@ -6,6 +6,7 @@ from functools import partial
from os import path, listdir
from urllib.parse import parse_qs, urlparse, unquote, ParseResult, urlencode, urlunparse
import time
import requests
from sqlalchemy.sql import func
@ -833,6 +834,10 @@ def normalize_url(url):
return url
def normalize_url_gevent(url):
url = requests.get(url, headers=HEADERS, timeout=2, proxies=proxies).url
return normalize_url(url)
def validate_css(css):
if '@import' in css:
return False, "CSS @import statements are not allowed!"

View File

@ -431,6 +431,8 @@ def comment(v):
for sort in COMMENT_SORTS.keys():
cache.delete(f'post_{c.parent_post}_{sort}')
gevent.spawn(postprocess_comment, c.body, c.body_html, c.id)
if v.client: return c.json
return {"comment": render_template("comments.html", v=v, comments=[c])}
@ -740,8 +742,11 @@ def edit_comment(cid, v):
g.db.add(n)
push_notif({x}, f'New mention of you by @{c.author_name}', c.body, c)
g.db.flush()
gevent.spawn(postprocess_comment, c.body, c.body_html, c.id)
g.db.flush()
return {
"body": c.body,
"comment": c.realbody(v),
@ -765,3 +770,26 @@ def commenters(v, pid, time):
users = sorted(users, key=lambda x: x[1])
return render_template('commenters.html', v=v, users=users)
def postprocess_comment(comment_body, comment_body_html, cid):
with app.app_context():
li = list(reddit_s_url_regex.finditer(comment_body)) + list(tiktok_t_url_regex.finditer(comment_body))
for i in li:
old = i.group(0)
new = normalize_url_gevent(old)
comment_body = comment_body.replace(old, new)
comment_body_html = comment_body_html.replace(old, new)
g.db = db_session()
c = g.db.query(Comment).filter_by(id=cid).options(load_only(Comment.id)).one_or_none()
c.body = comment_body
c.body_html = comment_body_html
g.db.add(c)
g.db.commit()
g.db.close()
stdout.flush()

View File

@ -287,21 +287,16 @@ def expand_url(post_url, fragment_url):
return f"{post_url}/{fragment_url}"
def cancer_url_cleaner(url):
try: url = requests.get(url, headers=HEADERS, timeout=2, proxies=proxies).url
except: return url
return normalize_url(url)
def postprocess_post(post_url, post_body, post_body_html, pid, generate_thumb, edit):
with app.app_context():
if post_url and (reddit_s_url_regex.fullmatch(post_url) or tiktok_t_url_regex.fullmatch(post_url)):
post_url = cancer_url_cleaner(post_url)
post_url = normalize_url_gevent(post_url)
if post_body:
li = list(reddit_s_url_regex.finditer(post_body)) + list(tiktok_t_url_regex.finditer(post_body))
for i in li:
old = i.group(0)
new = cancer_url_cleaner(old)
new = normalize_url_gevent(old)
post_body = post_body.replace(old, new)
post_body_html = post_body_html.replace(old, new)
@ -399,7 +394,8 @@ def postprocess_post(post_url, post_body, post_body_html, pid, generate_thumb, e
g.db.commit()
g.db.close()
stdout.flush()
stdout.flush()
@app.post("/is_repost")