remotes/1693045480750635534/spooky-22
Aevann1 2021-10-20 16:37:46 +02:00
parent e705cba226
commit 8d00745dc3
18 changed files with 56 additions and 331 deletions

View File

@ -9,10 +9,9 @@ from sqlalchemy.orm import relationship, deferred, lazyload
from files.__main__ import Base
from files.classes.votes import CommentVote
from files.helpers.const import AUTOPOLLER_ACCOUNT
from files.helpers.const import AUTOPOLLER_ACCOUNT, censor_slurs
from files.helpers.lazy import lazy
from .flags import CommentFlag
from ..helpers.word_censor import censor_slurs
site = environ.get("DOMAIN").strip()
@ -53,8 +52,10 @@ class Comment(Base):
senttouser = relationship("User", primaryjoin="User.id==Comment.sentto", viewonly=True)
parent_comment = relationship("Comment", remote_side=[id], viewonly=True)
child_comments = relationship("Comment", remote_side=[parent_comment_id], viewonly=True)
awards = relationship("AwardRelationship", viewonly=True)
#awards = relationship("AwardRelationship", viewonly=True)
awards = None
def __init__(self, *args, **kwargs):
if "created_utc" not in kwargs:
@ -294,7 +295,9 @@ class Comment(Base):
return data
def realbody(self, v):
if self.post and self.post.club and not (v and v.paid_dues): return "<p>COUNTRY CLUB ONLY</p>"
if self.post and self.post.club and not (v and v.paid_dues):
if v: return f"<p>{v.username} dox/p>"
return "<p>COUNTRY CLUB ONLY</p>"
body = self.body_html
@ -320,7 +323,9 @@ class Comment(Base):
return body
def plainbody(self, v):
if self.post and self.post.club and not (v and v.paid_dues): return "<p>COUNTRY CLUB ONLY</p>"
if self.post and self.post.club and not (v and v.paid_dues):
if v: return f"<p>{v.username} dox/p>"
return "<p>COUNTRY CLUB ONLY</p>"
body = self.body

View File

@ -9,10 +9,9 @@ from sqlalchemy import *
from sqlalchemy.orm import relationship, deferred
from files.__main__ import Base
from files.helpers.const import AUTOPOLLER_ACCOUNT
from files.helpers.const import AUTOPOLLER_ACCOUNT, censor_slurs
from files.helpers.lazy import lazy
from .flags import Flag
from ..helpers.word_censor import censor_slurs
site = environ.get("DOMAIN").strip()
site_name = environ.get("SITE_NAME").strip()
@ -340,9 +339,11 @@ class Submission(Base):
else: return ""
def realbody(self, v):
if self.club and not (v and v.paid_dues): return "COUNTRY CLUB ONLY"
body = self.body_html
if self.club and not (v and v.paid_dues):
if v: return f"<p>{v.username} dox/p>"
return "<p>COUNTRY CLUB ONLY</p>"
body = self.body_html
body = censor_slurs(body, v)
if v and not v.oldreddit: body = body.replace("old.reddit.com", "reddit.com")
@ -350,9 +351,11 @@ class Submission(Base):
return body
def plainbody(self, v):
if self.club and not (v and v.paid_dues): return "COUNTRY CLUB ONLY"
body = self.body
if self.club and not (v and v.paid_dues):
if v: return f"<p>{v.username} dox/p>"
return "<p>COUNTRY CLUB ONLY</p>"
body = self.body
body = censor_slurs(body, v)
if v and not v.oldreddit: body = body.replace("old.reddit.com", "reddit.com")

View File

@ -1,39 +1,8 @@
from os import environ
import re
site = environ.get("DOMAIN", '').strip()
#####################
# Formatting rules: #
#####################
# If all letters are lowercase then it will match lowercase, first letter up in first or all the words and all letters up
# "dancing israelis" will match:
# - "dancing israelis"
# - "Dancing israelis"
# - "Dancing Israelis"
# - "DANCING ISRAELIS"
#
# If some letters are Uppercase, the same, but with the additional option of the original casing, and respecting already existing uppercase
# "NoNewNormal" will match:
# - "NoNewNormal"
# - "nonewnormal"
# - "Nonewnormal"
# - "NONEWNORMAL"
#
# Now on the replacement side, The replacement will have the same capitalization as the slur if the replacement is lowercase
# "kill yourself" -> "keep yourself safe"
# "Kill yourself" -> "Keep yourself safe"
# "Kill Yourself" -> "Keep Yourself Safe"
# "KILL YOURSELF" -> "KEEP YOURSELF SAFE"
#
# If the replacement side has some capitalization, then that capitalization will always be maintained
# for the pair: <"pajeet": "sexy Indian dude"> it will replace:
# "pajeet" -> "sexy Indian dude"
# "Pajeet" -> "Sexy Indian dude"
# "PAJEET" -> "SEXY INDIAN DUDE"
#
# There is a super special case that if the replacer starts with "http" then it never changes capitalization
#
# TL;DR: Just read the above once, or don't, and try to guess!
SLURS = {
"faggot": "cute twink",
"fag": "cute twink",
@ -146,4 +115,16 @@ else:
AUTOPOLLER_ACCOUNT = 6
PUSHER_INSTANCE_ID = '02ddcc80-b8db-42be-9022-44c546b4dce6'
PUSHER_KEY = environ.get("PUSHER_KEY", "").strip()
PUSHER_KEY = environ.get("PUSHER_KEY", "").strip()
single_words = "|".join([slur.lower() for slur in SLURS.keys()])
SLUR_REGEX = re.compile(rf"(?i)(?<=\s|>)({single_words})(?=[\s<,.])")
REPLACE_MAP = SLURS.items()
def sub_matcher(match: re.Match) -> str:
return REPLACE_MAP.get(match.group(0))
def censor_slurs(body: str, logged_user) -> str:
if not logged_user or logged_user.slurreplacer:
body = SLUR_REGEX.sub(sub_matcher, body)
return body

View File

@ -1,77 +0,0 @@
from collections import ChainMap
import re
from re import Match
from typing import Dict, Pattern
from files.helpers.const import SLURS
def first_upper(phrase: str) -> str:
"""Converts the first character of the phrase to uppercase, not messing with the others"""
return phrase[0].upper() + phrase[1:]
def first_all_upper(phrase: str) -> str:
"""Converts the first character of each word to uppercase, not messing with the others"""
if " " not in phrase:
return first_upper(phrase)
return " ".join([first_upper(word) for word in phrase.split(" ")])
def get_permutations_slur(slur: str, replacer: str = "_") -> Dict[str, str]:
"""
Given a slur and a replacer, it generates all the possible permutation on the original text and assigns them to the
corresponding substitution with case
"""
stripped = slur.strip()
is_link = replacer.startswith("http") # special case for the :marseymerchant:
# the order the things are added into the dict is important, so that the 'Correctest' version is written last
result = {
stripped.upper(): replacer.upper() if not is_link else replacer,
first_all_upper(stripped): first_all_upper(replacer) if not is_link else replacer,
stripped.lower(): replacer,
stripped: replacer,
first_upper(stripped): first_upper(replacer) if not is_link else replacer,
}
return result
def create_slur_regex() -> Pattern[str]:
"""Creates the regex that will find the slurs"""
single_words = "|".join([slur.lower() for slur in SLURS.keys()])
return re.compile(rf"(?i)(?<=\s|>)({single_words})(?=[\s<,.])")
def create_replace_map() -> Dict[str, str]:
"""Creates the map that will be used to get the matching replaced for the given slur"""
dicts = [get_permutations_slur(slur, replacer) for (slur, replacer) in SLURS.items()]
# flattens the list of dict to a single dict
return dict(ChainMap(*dicts))
SLUR_REGEX = create_slur_regex()
REPLACE_MAP = create_replace_map()
def sub_matcher(match: Match) -> str:
"""given a match returns the correct replacer string"""
found = match.group(0)
# if it does not find the correct capitalization, it tries the all lower, or return the original word
return REPLACE_MAP.get(found) or REPLACE_MAP.get(found.lower()) or found
def censor_slurs(body: str, logged_user) -> str:
"""Censors all the slurs in the body if the user is not logged-in or if they have the slurreplacer active"""
if not logged_user or logged_user.slurreplacer:
try:
body = SLUR_REGEX.sub(sub_matcher, body)
except Exception as e:
print(e)
return body

View File

@ -342,16 +342,13 @@ def award_comment(cid, v):
if request.referrer and len(request.referrer) > 1: return redirect(request.referrer)
else: return redirect("/")
@app.get("/admin/user_award")
@auth_required
@app.get("/admin/awards")
@admin_level_required(6)
def admin_userawards_get(v):
if v.admin_level < 6:
abort(403)
return render_template("admin/awards.html", awards=list(AWARDS.values()), v=v)
return render_template("admin/user_award.html", awards=list(AWARDS.values()), v=v)
@app.post("/admin/user_award")
@app.post("/admin/awards")
@limiter.limit("1/second")
@auth_required
@validate_formkey
@ -399,4 +396,4 @@ def admin_userawards_post(v):
g.db.commit()
return render_template("admin/user_award.html", awards=list(AWARDS.values()), v=v)
return render_template("admin/awards.html", awards=list(AWARDS.values()), v=v)

View File

@ -37,6 +37,13 @@ def post_pid_comment_cid(cid, pid=None, anything=None, v=None):
comment = get_comment(cid, v=v)
if v and request.values.get("read"):
notif = g.db.query(Notification).options(lazyload('*')).filter_by(comment_id=cid, user_id=v.id, read=False).first()
if notif:
notif.read = True
g.db.add(notif)
g.db.commit()
if comment.post and comment.post.club and not (v and v.paid_dues): abort(403)
if not comment.parent_submission and not (v and (comment.author.id == v.id or comment.sentto == v.id)) and not (v and v.admin_level == 6) : abort(403)
@ -525,7 +532,7 @@ def api_comment(v):
'notification': {
'title': f'New reply by @{v.username}',
'body': c.body,
'deep_link': f'http://{site}{c.permalink}?context=10#context',
'deep_link': f'http://{site}{c.permalink}?context=10&read=true#context',
},
},
},

View File

@ -145,7 +145,7 @@ def post_id(pid, anything=None, v=None):
elif sort == "controversial":
comments = comments.order_by(-1 * Comment.upvotes * Comment.downvotes * Comment.downvotes)
elif sort == "top":
comments = comments.order_by(Comment.downvotes - Comment.upvotes)
comments = comments.order_by(Comment.upvotes + Comment.downvotes)
elif sort == "bottom":
comments = comments.order_by(Comment.upvotes - Comment.downvotes)
@ -170,7 +170,7 @@ def post_id(pid, anything=None, v=None):
elif sort == "controversial":
comments = comments.order_by(-1 * Comment.upvotes * Comment.downvotes * Comment.downvotes)
elif sort == "top":
comments = comments.order_by(Comment.downvotes - Comment.upvotes)
comments = comments.order_by(Comment.upvotes + Comment.downvotes)
elif sort == "bottom":
comments = comments.order_by(Comment.upvotes - Comment.downvotes)

View File

@ -33,7 +33,7 @@
<h4>Grant</h4>
<ul>
<li><a href="/admin/user_award">Give User Award</a></li>
<li><a href="/admin/awards">Give User Award</a></li>
<li><a href="/admin/badge_grant">Badges</a></li>
</ul>

View File

@ -35,7 +35,7 @@
<pre></pre>
<h5>User Award Grant</h5>
<form action="/admin/user_award", method="post">
<form action="/admin/awards", method="post">
<input type="hidden" name="formkey" value="{{v.formkey}}">

View File

@ -10,7 +10,7 @@
<div class="modal desktop-expanded-image-modal" id="expandImageModal" tabindex="-1" role="dialog" aria-labelledby="expandImageModalTitle" aria-hidden="true">
<div class="modal-dialog modal-xl modal-dialog-centered mx-auto" role="document" style="width: fit-content;">
<div class="modal-content bg-transparent shadow-none m-5 m-md-0">
<div class="modal-content bg-transparent shadow-none m-4 m-md-0">
<div class="modal-body text-center p-0">
<div class="d-inline-block position-relative">

View File

@ -310,7 +310,7 @@
{% if not p.is_image and p.thumb_url and not p.embed_url %}
<div class="card-header bg-transparent border-0 d-none d-md-flex flex-row flex-nowrap pl-3 p-0">
<a rel="nofollow noopener noreferrer" href="{{p.realurl(v)}}" style="height: fit-content;" {% if not v or v.newtabexternal %}target="_blank"{% endif %}><img loading="lazy" src="/assets/images/loading.gif" data-src="{{p.thumb_url}}" class="post-img d-none d-md-block" alt="Unable to load image"></a>
<a rel="nofollow noopener noreferrer" href="{{p.realurl(v)}}" style="height: fit-content;" {% if not v or v.newtabexternal %}target="_blank"{% endif %}><img loading="lazy" src="{{p.thumb_url}}" class="post-img d-none d-md-block" alt="Unable to load image"></a>
</div>
{% endif %}

View File

@ -140,19 +140,19 @@
<img loading="lazy" src="/assets/images/emojis/marseyglow.webp" class="post-img">
{% elif not p.url %}
<a {% if v and v.newtab %}target="_blank"{% endif %} {% if v %}href="{{p.permalink}}"{% else %}href="/logged_out{{p.permalink}}"{% endif %}>
<img loading="lazy" src="/assets/images/loading.gif" data-src="{{p.thumb_url}}" class="post-img">
<img loading="lazy" src="{{p.thumb_url}}" class="post-img">
</a>
{% elif p.is_image %}
<a href="javascript:void(0)" data-bs-toggle="modal" data-bs-target="#expandImageModal" data-bs-url="{{p.realurl(v)}}" onclick="expandDesktopImage('{{ p.realurl(v) }}')">
<img loading="lazy" src="/assets/images/loading.gif" data-src="{{p.thumb_url}}" class="post-img">
<img loading="lazy" src="{{p.thumb_url}}" class="post-img">
</a>
{% elif (p.url and p.url.lower().endswith('.mp4')) or (p.embed_url and "youtu" in p.domain) or (p.url and "streamable.com/e/" in p.url) %}
<a href="javascript:void(0)" onclick="document.getElementById('video-{{p.id}}').classList.toggle('d-none')">
<img loading="lazy" src="/assets/images/loading.gif" data-src="{{p.thumb_url}}" class="post-img">
<img loading="lazy" src="{{p.thumb_url}}" class="post-img">
</a>
{% else %}
<a {% if not v or v.newtabexternal %}target="_blank"{% endif %} rel="nofollow noopener noreferrer" href="{{p.realurl(v)}}">
<img loading="lazy" src="/assets/images/loading.gif" data-src="{{p.thumb_url}}" class="post-img">
<img loading="lazy" src="{{p.thumb_url}}" class="post-img">
</a>
{% endif %}
</div>

View File

@ -21,7 +21,6 @@ requests
SQLAlchemy
psycopg2-binary
pusher_push_notifications
pytest
youtube-dl
yattag
webptools

4
setup
View File

@ -3,10 +3,6 @@ sudo apt update
sudo apt -y upgrade
sudo apt -y install postgresql postgresql-contrib
sudo apt -y install redis-server
cp redis.conf /etc/redis/redis.conf
sudo systemctl restart redis.service
cp pg_hba.conf /etc/postgresql/12/main/pg_hba.conf
sudo service postgresql restart
sudo psql -U postgres -f schema.sql postgres
sudo psql -U postgres -f seed-db.sql postgres
sudo apt -y install python3-pip

View File

View File

View File

@ -1,186 +0,0 @@
import re
from unittest.mock import patch
from assertpy import assert_that
from files.helpers import word_censor
from files.helpers.word_censor import create_replace_map, censor_slurs, sub_matcher, \
get_permutations_slur, first_upper, first_all_upper, create_slur_regex
def test_first_upper():
assert_that(first_upper("USS liberty")).is_equal_to("USS liberty")
assert_that(first_upper("uss liberty")).is_equal_to("Uss liberty")
assert_that(first_upper("uss Liberty")).is_equal_to("Uss Liberty")
def test_first_all_upper():
assert_that(first_all_upper("USS liberty")).is_equal_to("USS Liberty")
assert_that(first_all_upper("uss liberty")).is_equal_to("Uss Liberty")
assert_that(first_all_upper("uss Liberty")).is_equal_to("Uss Liberty")
def test_get_permutations_slur():
expected = {
"USS liberty incident": "Tragic accident aboard the USS Liberty",
"uss liberty incident": "tragic accident aboard the USS Liberty",
"USS Liberty Incident": "Tragic Accident Aboard The USS Liberty",
"USS LIBERTY INCIDENT": "TRAGIC ACCIDENT ABOARD THE USS LIBERTY",
}
result = get_permutations_slur("USS liberty incident", "tragic accident aboard the USS Liberty")
assert_that(result).is_equal_to(expected)
def test_get_permutations_slur_wiht_link_replacer():
expected = {
"kike": "https://sciencedirect.com/science/article/abs/pii/S016028960600033X",
"Kike": "https://sciencedirect.com/science/article/abs/pii/S016028960600033X",
"KIKE": "https://sciencedirect.com/science/article/abs/pii/S016028960600033X",
}
result = get_permutations_slur("kike", "https://sciencedirect.com/science/article/abs/pii/S016028960600033X")
assert_that(result).is_equal_to(expected)
@patch("files.helpers.word_censor.SLURS", {
"kill yourself": "keep yourself safe",
"faggot": "cute twink",
"nig": "πŸ€",
"retard": "r-slur",
})
def test_create_slur_regex():
expected = r"(?i)(?<=\s|>)(kill yourself|faggot|nig|retard)(?=[\s<,.])"
assert_that(create_slur_regex()).is_equal_to(re.compile(expected))
@patch("files.helpers.word_censor.SLURS", {
"tranny": "πŸš‚πŸšƒπŸšƒ",
"kill yourself": "keep yourself safe",
"faggot": "cute twink",
"NoNewNormal": "NoNewNormal",
"nig": "πŸ€",
})
def test_create_replace_map():
expected = {
"tranny": "πŸš‚πŸšƒπŸšƒ",
"Tranny": "πŸš‚πŸšƒπŸšƒ",
"TRANNY": "πŸš‚πŸšƒπŸšƒ",
"kill yourself": "keep yourself safe",
"Kill yourself": "Keep yourself safe",
"Kill Yourself": "Keep Yourself Safe",
"KILL YOURSELF": "KEEP YOURSELF SAFE",
"faggot": "cute twink",
"Faggot": "Cute twink",
"FAGGOT": "CUTE TWINK",
"NoNewNormal": "NoNewNormal",
"nonewnormal": "NoNewNormal",
"NONEWNORMAL": "NONEWNORMAL",
"nig": "πŸ€",
"Nig": "πŸ€",
"NIG": "πŸ€",
}
result = create_replace_map()
assert_that(result).is_equal_to(expected)
@patch("files.helpers.word_censor.REPLACE_MAP", {'retard': 'r-slur', 'Faggot': 'Cute twink', 'NIG': 'πŸ€'})
def test_sub_matcher():
regex = re.compile(r"(?i)(?<=\s|>)(kill yourself|retard|nig|faggot)(?=[\s<,.])")
match = regex.search("<p>retard</p>")
assert_that(sub_matcher(match)).is_equal_to("r-slur")
match = regex.search("<p>ReTaRd</p>")
assert_that(sub_matcher(match)).is_equal_to("r-slur")
match = regex.search("<p>NIG</p>")
assert_that(sub_matcher(match)).is_equal_to("πŸ€")
match = regex.search("<p>Faggot </p>")
assert_that(sub_matcher(match)).is_equal_to("Cute twink")
@patch("files.helpers.word_censor.SLURS", {
'retard': 'r-slur',
'manlet': 'little king',
'nig': 'πŸ€',
'i hate Carp': 'i love Carp',
'kike': 'https://sciencedirect.com/science/article/abs/pii/S016028960600033X'
})
def test_censor_slurs():
word_censor.REPLACE_MAP = create_replace_map()
word_censor.SLUR_REGEX = create_slur_regex()
assert_that(censor_slurs("<p>retard</p>", None)).is_equal_to("<p>r-slur</p>")
assert_that(censor_slurs('... ReTaRd ...', None)).is_equal_to('... r-slur ...')
assert_that(censor_slurs("<p>Manlet get out!</p>", None)).is_equal_to("<p>Little king get out!</p>")
assert_that(censor_slurs("... retard. other", None)).is_equal_to("... r-slur. other")
assert_that(censor_slurs("... retard, other", None)).is_equal_to("... r-slur, other")
# does not work:
assert_that(censor_slurs("<p>preretard</p>", None)).is_equal_to("<p>preretard</p>")
assert_that(censor_slurs("that is Retarded like", None)).is_equal_to("that is Retarded like")
assert_that(censor_slurs("that is SUPERRETARD like", None)).is_equal_to("that is SUPERRETARD like")
assert_that(censor_slurs('... "retard" ...', None)).is_equal_to('... "retard" ...')
assert_that(censor_slurs('... xretardx ...', None)).is_equal_to('... xretardx ...')
assert_that(censor_slurs("LLM is a manlet hehe", None)).is_equal_to("LLM is a little king hehe")
assert_that(censor_slurs("LLM is :marseycapitalistmanlet: hehe", None)) \
.is_equal_to("LLM is :marseycapitalistmanlet: hehe")
assert_that(censor_slurs('... Nig ...', None)).is_equal_to('... πŸ€ ...')
assert_that(censor_slurs('<p>NIG</p>', None)).is_equal_to('<p>πŸ€</p>')
assert_that(censor_slurs('... nigeria ...', None)).is_equal_to('... nigeria ...')
assert_that(censor_slurs('... i hate Carp ...', None)).is_equal_to('... i love Carp ...')
assert_that(censor_slurs('... i hate carp ...', None)).is_equal_to('... i love Carp ...')
assert_that(censor_slurs('... I hate Carp ...', None)).is_equal_to('... I love Carp ...')
assert_that(censor_slurs('... I Hate Carp ...', None)).is_equal_to('... I Love Carp ...')
assert_that(censor_slurs('... I HATE CARP ...', None)).is_equal_to('... I LOVE CARP ...')
assert_that(censor_slurs('... I Hate carp ...', None)).is_equal_to('... i love Carp ...')
assert_that(censor_slurs('... i Hate Carp ...', None)).is_equal_to('... i love Carp ...')
assert_that(censor_slurs('... i Hate carp ...', None)).is_equal_to('... i love Carp ...')
assert_that(censor_slurs('... i Hate carp ...', None)).is_equal_to('... i love Carp ...')
assert_that(censor_slurs('... i hate a carp ...', None)).is_equal_to('... i hate a carp ...')
assert_that(censor_slurs("<p>retard Manlet NIG</p>", None)).is_equal_to("<p>r-slur Little king πŸ€</p>")
assert_that(censor_slurs('... kike ...', None)) \
.is_equal_to('... https://sciencedirect.com/science/article/abs/pii/S016028960600033X ...')
assert_that(censor_slurs('... Kike ...', None)) \
.is_equal_to('... https://sciencedirect.com/science/article/abs/pii/S016028960600033X ...')
assert_that(censor_slurs('... KIKE ...', None)) \
.is_equal_to('... https://sciencedirect.com/science/article/abs/pii/S016028960600033X ...')
@patch("files.helpers.word_censor.SLURS", {'retard': 'r-slur', 'manlet': 'little king', 'nig': 'πŸ€'})
def test_censor_slurs_does_not_error_out_on_exception():
word_censor.REPLACE_MAP = create_replace_map()
word_censor.SLUR_REGEX = create_slur_regex()
word_censor.REPLACE_MAP["manlet"] = None
word_censor.REPLACE_MAP["Manlet"] = None
assert_that(censor_slurs(">retard Manlet NIG<", None)).is_equal_to(">r-slur Manlet πŸ€<")
@patch("files.helpers.word_censor.SLURS", {'retard': 'r-slur', 'manlet': 'little king'})
def test_censor_slurs_does_not_censor_on_flag_disabled():
word_censor.REPLACE_MAP = create_replace_map()
word_censor.SLUR_REGEX = create_slur_regex()
class User:
def __init__(self, slurreplacer):
self.slurreplacer = slurreplacer
logger_user = User(slurreplacer=False)
assert_that(censor_slurs("<p>retard</p>", logger_user)).is_equal_to("<p>retard</p>")
logger_user = User(slurreplacer=True)
assert_that(censor_slurs("<p>retard</p>", logger_user)).is_equal_to("<p>r-slur</p>")