2022-07-05 22:11:45 +00:00
import functools
2022-11-15 09:19:08 +00:00
import random
import re
import signal
from functools import partial
from os import path
2022-12-04 21:46:27 +00:00
from typing import Any
2022-11-15 09:19:08 +00:00
from urllib . parse import parse_qs , urlparse
2022-05-04 23:09:46 +00:00
import bleach
2022-05-25 00:27:41 +00:00
from bleach . css_sanitizer import CSSSanitizer
2022-07-15 13:27:45 +00:00
from bleach . linkifier import LinkifyFilter
2022-11-15 09:19:08 +00:00
from bs4 import BeautifulSoup
2022-05-04 23:09:46 +00:00
from mistletoe import markdown
2022-11-15 09:19:08 +00:00
from files . classes . domains import BannedDomain
2022-12-11 23:44:34 +00:00
from files . helpers . config . const import *
2022-11-15 09:19:08 +00:00
from files . helpers . const_stateful import *
from files . helpers . regex import *
from . get import *
2022-05-04 23:09:46 +00:00
2022-11-02 07:08:02 +00:00
TLDS = ( # Original gTLDs and ccTLDs
' ac ' , ' ad ' , ' ae ' , ' aero ' , ' af ' , ' ag ' , ' ai ' , ' al ' , ' am ' , ' an ' , ' ao ' , ' aq ' , ' ar ' , ' arpa ' , ' as ' , ' asia ' , ' at ' ,
' au ' , ' aw ' , ' ax ' , ' az ' , ' ba ' , ' bb ' , ' bd ' , ' be ' , ' bf ' , ' bg ' , ' bh ' , ' bi ' , ' biz ' , ' bj ' , ' bm ' , ' bn ' , ' bo ' , ' br ' ,
' bs ' , ' bt ' , ' bv ' , ' bw ' , ' by ' , ' bz ' , ' ca ' , ' cafe ' , ' cat ' , ' cc ' , ' cd ' , ' cf ' , ' cg ' , ' ch ' , ' ci ' , ' ck ' , ' cl ' ,
' cm ' , ' cn ' , ' co ' , ' com ' , ' coop ' , ' cr ' , ' cu ' , ' cv ' , ' cx ' , ' cy ' , ' cz ' , ' de ' , ' dj ' , ' dk ' , ' dm ' , ' do ' , ' dz ' , ' ec ' ,
' edu ' , ' ee ' , ' eg ' , ' er ' , ' es ' , ' et ' , ' eu ' , ' fi ' , ' fj ' , ' fk ' , ' fm ' , ' fo ' , ' fr ' , ' ga ' , ' gb ' , ' gd ' , ' ge ' , ' gf ' ,
' gg ' , ' gh ' , ' gi ' , ' gl ' , ' gm ' , ' gn ' , ' gov ' , ' gp ' , ' gq ' , ' gr ' , ' gs ' , ' gt ' , ' gu ' , ' gw ' , ' gy ' , ' hk ' , ' hm ' , ' hn ' ,
' hr ' , ' ht ' , ' hu ' , ' id ' , ' ie ' , ' il ' , ' im ' , ' in ' , ' info ' , ' int ' , ' io ' , ' iq ' , ' ir ' , ' is ' , ' it ' , ' je ' , ' jm ' , ' jo ' ,
' jobs ' , ' jp ' , ' ke ' , ' kg ' , ' kh ' , ' ki ' , ' km ' , ' kn ' , ' kp ' , ' kr ' , ' kw ' , ' ky ' , ' kz ' , ' la ' , ' lb ' , ' lc ' , ' li ' , ' lk ' ,
' lr ' , ' ls ' , ' lt ' , ' lu ' , ' lv ' , ' ly ' , ' ma ' , ' mc ' , ' md ' , ' me ' , ' mg ' , ' mh ' , ' mil ' , ' mk ' , ' ml ' , ' mm ' , ' mn ' , ' mo ' ,
' mobi ' , ' mp ' , ' mq ' , ' mr ' , ' ms ' , ' mt ' , ' mu ' , ' museum ' , ' mv ' , ' mw ' , ' mx ' , ' my ' , ' mz ' , ' na ' , ' name ' ,
' nc ' , ' ne ' , ' net ' , ' nf ' , ' ng ' , ' ni ' , ' nl ' , ' no ' , ' np ' , ' nr ' , ' nu ' , ' nz ' , ' om ' , ' org ' , ' pa ' , ' pe ' , ' pf ' , ' pg ' ,
' ph ' , ' pk ' , ' pl ' , ' pm ' , ' pn ' , ' post ' , ' pr ' , ' pro ' , ' ps ' , ' pt ' , ' pw ' , ' py ' , ' qa ' , ' re ' , ' ro ' , ' rs ' , ' ru ' , ' rw ' ,
' sa ' , ' sb ' , ' sc ' , ' sd ' , ' se ' , ' sg ' , ' sh ' , ' si ' , ' sj ' , ' sk ' , ' sl ' , ' sm ' , ' sn ' , ' so ' , ' social ' , ' sr ' , ' ss ' , ' st ' ,
' su ' , ' sv ' , ' sx ' , ' sy ' , ' sz ' , ' tc ' , ' td ' , ' tel ' , ' tf ' , ' tg ' , ' th ' , ' tj ' , ' tk ' , ' tl ' , ' tm ' , ' tn ' , ' to ' , ' tp ' ,
' tr ' , ' travel ' , ' tt ' , ' tv ' , ' tw ' , ' tz ' , ' ua ' , ' ug ' , ' uk ' , ' us ' , ' uy ' , ' uz ' , ' va ' , ' vc ' , ' ve ' , ' vg ' , ' vi ' , ' vn ' ,
' vu ' , ' wf ' , ' ws ' , ' xn ' , ' xxx ' , ' ye ' , ' yt ' , ' yu ' , ' za ' , ' zm ' , ' zw ' ,
# New gTLDs
' app ' , ' cleaning ' , ' club ' , ' dev ' , ' farm ' , ' florist ' , ' fun ' , ' gay ' , ' lgbt ' , ' life ' , ' lol ' ,
' moe ' , ' mom ' , ' monster ' , ' new ' , ' news ' , ' online ' , ' pics ' , ' press ' , ' pub ' , ' site ' ,
2022-12-02 10:38:07 +00:00
' vip ' , ' win ' , ' world ' , ' wtf ' , ' xyz ' , ' video ' , ' host ' , ' art ' , ' media '
2022-11-02 07:08:02 +00:00
)
2022-05-04 23:09:46 +00:00
2022-05-24 19:16:55 +00:00
allowed_tags = ( ' b ' , ' blockquote ' , ' br ' , ' code ' , ' del ' , ' em ' , ' h1 ' , ' h2 ' , ' h3 ' , ' h4 ' , ' h5 ' , ' h6 ' , ' hr ' , ' i ' ,
' li ' , ' ol ' , ' p ' , ' pre ' , ' strong ' , ' sub ' , ' sup ' , ' table ' , ' tbody ' , ' th ' , ' thead ' , ' td ' , ' tr ' , ' ul ' ,
2022-10-23 08:57:13 +00:00
' marquee ' , ' a ' , ' span ' , ' ruby ' , ' rp ' , ' rt ' , ' spoiler ' , ' img ' , ' lite-youtube ' , ' video ' , ' audio ' , ' g ' )
2022-05-04 23:09:46 +00:00
2022-06-30 22:18:05 +00:00
allowed_styles = [ ' color ' , ' background-color ' , ' font-weight ' , ' text-align ' , ' filter ' , ]
2022-05-25 00:27:41 +00:00
2022-05-04 23:09:46 +00:00
def allowed_attributes ( tag , name , value ) :
if name == ' style ' : return True
if tag == ' marquee ' :
2022-11-26 04:52:47 +00:00
if name in { ' direction ' , ' behavior ' , ' scrollamount ' } : return True
2022-05-04 23:09:46 +00:00
if name in { ' height ' , ' width ' } :
try : value = int ( value . replace ( ' px ' , ' ' ) )
except : return False
if 0 < value < = 250 : return True
2022-07-05 22:11:45 +00:00
2022-05-04 23:09:46 +00:00
if tag == ' a ' :
2022-12-06 01:06:04 +00:00
if name == ' href ' and ' \\ ' not in value and ' xn-- ' not in value :
2022-06-19 17:25:55 +00:00
return True
2022-10-29 21:46:30 +00:00
if name == ' rel ' and value == ' nofollow noopener ' : return True
2022-11-21 17:37:38 +00:00
if name == ' target ' and value == ' _blank ' : return True
2022-05-04 23:09:46 +00:00
if tag == ' img ' :
2022-11-26 04:52:47 +00:00
if name in { ' src ' , ' data-src ' } : return is_safe_url ( value )
2022-05-04 23:09:46 +00:00
if name == ' loading ' and value == ' lazy ' : return True
if name == ' data-bs-toggle ' and value == ' tooltip ' : return True
2022-11-26 04:52:47 +00:00
if name in { ' g ' , ' b ' , ' glow ' } and not value : return True
if name in { ' alt ' , ' title ' } : return True
2022-05-04 23:09:46 +00:00
if tag == ' lite-youtube ' :
if name == ' params ' and value . startswith ( ' autoplay=1&modestbranding=1 ' ) : return True
if name == ' videoid ' : return True
if tag == ' video ' :
if name == ' controls ' and value == ' ' : return True
if name == ' preload ' and value == ' none ' : return True
2022-05-25 18:29:22 +00:00
if name == ' src ' : return is_safe_url ( value )
2022-05-04 23:09:46 +00:00
2022-05-15 22:47:37 +00:00
if tag == ' audio ' :
2022-05-25 18:29:22 +00:00
if name == ' src ' : return is_safe_url ( value )
2022-05-15 22:47:37 +00:00
if name == ' controls ' and value == ' ' : return True
if name == ' preload ' and value == ' none ' : return True
2022-05-04 23:09:46 +00:00
if tag == ' p ' :
2022-12-23 22:22:41 +00:00
if name == ' class ' and value in { ' mb-0 ' , ' resizable ' } : return True
2022-05-04 23:09:46 +00:00
if tag == ' span ' :
if name == ' data-bs-toggle ' and value == ' tooltip ' : return True
if name == ' title ' : return True
if name == ' alt ' : return True
2022-12-09 21:04:22 +00:00
if tag == ' table ' :
if name == ' class ' and value == ' table ' : return True
2022-12-10 19:12:14 +00:00
return False
2022-05-04 23:09:46 +00:00
2022-11-02 07:08:02 +00:00
def build_url_re ( tlds , protocols ) :
2022-09-04 23:15:37 +00:00
""" Builds the url regex used by linkifier
If you want a different set of tlds or allowed protocols , pass those in
and stomp on the existing ` ` url_re ` ` : :
from bleach import linkifier
my_url_re = linkifier . build_url_re ( my_tlds_list , my_protocols )
linker = LinkifyFilter ( url_re = my_url_re )
"""
return re . compile (
r """ \ (*# Match any opening parentheses.
\b ( ? < ! [ @ . ] ) ( ? : ( ? : { 0 } ) : / { { 0 , 3 } } ( ? : ( ? : \w + : ) ? \w + @ ) ? ) ? # http://
2022-11-02 07:08:02 +00:00
( [ \w - ] + \. ) + ( ? : { 1 } ) ( ? : \: [ 0 - 9 ] + ) ? ( ? ! \. \w ) \b # xx.yy.tld(:##)?
2022-09-04 23:15:37 +00:00
( ? : [ / ? ] [ ^ #\s\{{\}}\|\\\^\[\]`<>"]*)?
# /path/zz (excluding "unsafe" chars from RFC 1738,
# except for ~, which happens in practice)
( ? : \#[^#\s\|\\\^\[\]`<>"]*)?
# #hash (excluding "unsafe" chars from RFC 1738,
# except for ~, which happens in practice)
2022-11-02 07:08:02 +00:00
""" .format(
" | " . join ( sorted ( protocols ) ) , " | " . join ( sorted ( tlds ) )
) ,
2022-09-04 23:15:37 +00:00
re . IGNORECASE | re . VERBOSE | re . UNICODE ,
)
2022-07-15 13:27:45 +00:00
2022-11-02 07:08:02 +00:00
url_re = build_url_re ( tlds = TLDS , protocols = [ ' http ' , ' https ' ] )
2022-05-04 23:09:46 +00:00
def callback ( attrs , new = False ) :
2022-05-17 18:59:07 +00:00
if ( None , " href " ) not in attrs :
return # Incorrect <a> tag
2022-05-04 23:09:46 +00:00
href = attrs [ ( None , " href " ) ]
2022-05-17 18:59:07 +00:00
# \ in href right after / makes most browsers ditch site hostname and allows for a host injection bypassing the check, see <a href="/\google.com">cool</a>
2022-06-19 17:25:55 +00:00
if " \\ " in href or not ascii_only_regex . fullmatch ( href ) :
2022-05-17 18:59:07 +00:00
attrs [ " _text " ] = href # Laugh at this user
del attrs [ ( None , " href " ) ] # Make unclickable and reset harmful payload
return attrs
2022-11-21 17:37:38 +00:00
if not href . startswith ( ' / ' ) and not href . startswith ( f ' { SITE_FULL } / ' ) :
attrs [ ( None , " target " ) ] = " _blank "
2022-10-29 21:46:30 +00:00
attrs [ ( None , " rel " ) ] = " nofollow noopener "
2022-05-04 23:09:46 +00:00
return attrs
2022-09-16 16:30:34 +00:00
def render_emoji ( html , regexp , golden , marseys_used , b = False ) :
2022-05-04 23:09:46 +00:00
emojis = list ( regexp . finditer ( html ) )
captured = set ( )
for i in emojis :
if i . group ( 0 ) in captured : continue
captured . add ( i . group ( 0 ) )
emoji = i . group ( 1 ) . lower ( )
attrs = ' '
if b : attrs + = ' b '
2022-09-16 16:30:34 +00:00
if golden and len ( emojis ) < = 20 and ( ' marsey ' in emoji or emoji in marseys_const2 ) :
2022-11-15 09:19:08 +00:00
if random . random ( ) < 0.0025 : attrs + = ' g '
elif random . random ( ) < 0.00125 : attrs + = ' glow '
2022-05-04 23:09:46 +00:00
old = emoji
emoji = emoji . replace ( ' ! ' , ' ' ) . replace ( ' # ' , ' ' )
2022-11-15 09:19:08 +00:00
if emoji == ' marseyrandom ' : emoji = random . choice ( marseys_const2 )
2022-05-04 23:09:46 +00:00
emoji_partial_pat = ' <img loading= " lazy " alt= " : {0} : " src= " {1} " {2} > '
emoji_partial = ' <img loading= " lazy " data-bs-toggle= " tooltip " alt= " : {0} : " title= " : {0} : " src= " {1} " {2} > '
emoji_html = None
2022-07-08 15:39:54 +00:00
if emoji . endswith ( ' pat ' ) and emoji != ' marseyunpettablepat ' :
2022-05-04 23:09:46 +00:00
if path . isfile ( f " files/assets/images/emojis/ { emoji . replace ( ' pat ' , ' ' ) } .webp " ) :
2022-06-22 15:51:19 +00:00
emoji_html = f ' <span data-bs-toggle= " tooltip " alt= " : { old } : " title= " : { old } : " ><img src= " /i/hand.webp " > { emoji_partial_pat . format ( old , f " /e/ { emoji [ : - 3 ] } .webp " , attrs ) } </span> '
2022-05-04 23:09:46 +00:00
elif emoji . startswith ( ' @ ' ) :
if u := get_user ( emoji [ 1 : - 3 ] , graceful = True ) :
2022-06-22 15:51:19 +00:00
emoji_html = f ' <span data-bs-toggle= " tooltip " alt= " : { old } : " title= " : { old } : " ><img src= " /i/hand.webp " > { emoji_partial_pat . format ( old , f " /pp/ { u . id } " , attrs ) } </span> '
2022-05-04 23:09:46 +00:00
elif path . isfile ( f ' files/assets/images/emojis/ { emoji } .webp ' ) :
emoji_html = emoji_partial . format ( old , f ' /e/ { emoji } .webp ' , attrs )
if emoji_html :
2022-05-17 18:59:07 +00:00
marseys_used . add ( emoji )
2022-12-03 22:26:05 +00:00
html = re . sub ( f ' (?<! " ) { i . group ( 0 ) } (?![^<]*< \ /(code|pre|a)>) ' , emoji_html , html )
2022-05-04 23:09:46 +00:00
return html
2022-07-05 22:11:45 +00:00
def with_sigalrm_timeout ( timeout : int ) :
' Use SIGALRM to raise an exception if the function executes for longer than timeout seconds '
2022-05-04 23:09:46 +00:00
2022-07-05 22:11:45 +00:00
# while trying to test this using time.sleep I discovered that gunicorn does in fact do some
# async so if we timeout on that (or on a db op) then the process is crashed without returning
# a proper 500 error. Oh well.
def sig_handler ( signum , frame ) :
print ( " Timeout! " , flush = True )
raise Exception ( " Timeout " )
2022-05-04 23:09:46 +00:00
2022-07-05 22:11:45 +00:00
def inner ( func ) :
2022-07-06 09:01:48 +00:00
@functools.wraps ( func )
2022-07-05 22:11:45 +00:00
def wrapped ( * args , * * kwargs ) :
signal . signal ( signal . SIGALRM , sig_handler )
signal . alarm ( timeout )
try :
return func ( * args , * * kwargs )
finally :
signal . alarm ( 0 )
return wrapped
return inner
2022-11-07 00:40:51 +00:00
def sanitize_raw_title ( sanitized : Optional [ str ] ) - > str :
2022-10-05 08:16:56 +00:00
if not sanitized : return " "
2022-10-05 08:04:32 +00:00
sanitized = sanitized . replace ( ' \u200e ' , ' ' ) . replace ( ' \u200b ' , ' ' ) . replace ( " \ufeff " , " " ) . replace ( " \r " , " " ) . replace ( " \n " , " " )
sanitized = sanitized . strip ( )
2022-10-05 08:35:35 +00:00
return sanitized [ : POST_TITLE_LENGTH_LIMIT ]
2022-10-05 08:04:32 +00:00
2022-11-07 00:40:51 +00:00
def sanitize_raw_body ( sanitized : Optional [ str ] , is_post : bool ) - > str :
2022-10-05 08:16:56 +00:00
if not sanitized : return " "
2022-10-20 23:06:55 +00:00
sanitized = html_comment_regex . sub ( ' ' , sanitized )
2022-10-05 08:16:56 +00:00
sanitized = sanitized . replace ( ' \u200e ' , ' ' ) . replace ( ' \u200b ' , ' ' ) . replace ( " \ufeff " , " " ) . replace ( " \r \n " , " \n " )
sanitized = sanitized . strip ( )
2022-10-09 12:54:46 +00:00
return sanitized [ : POST_BODY_LENGTH_LIMIT if is_post else COMMENT_BODY_LENGTH_LIMIT ]
2022-10-05 08:16:56 +00:00
2022-10-05 08:04:32 +00:00
2022-11-07 00:40:51 +00:00
def sanitize_settings_text ( sanitized : Optional [ str ] , max_length : Optional [ int ] = None ) - > str :
if not sanitized : return " "
sanitized = sanitized . replace ( ' \u200e ' , ' ' ) . replace ( ' \u200b ' , ' ' ) . replace ( " \ufeff " , " " ) . replace ( " \r " , " " ) . replace ( " \n " , " " )
sanitized = sanitized . strip ( )
if max_length : sanitized = sanitized [ : max_length ]
return sanitized
2022-12-15 19:31:30 +00:00
@with_sigalrm_timeout ( 10 )
2022-12-05 05:22:08 +00:00
def sanitize ( sanitized , golden = True , limit_pings = 0 , showmore = True , count_marseys = False , torture = False , sidebar = False ) :
2022-06-18 15:53:34 +00:00
sanitized = sanitized . strip ( )
2022-09-23 13:23:11 +00:00
sanitized = utm_regex . sub ( ' ' , sanitized )
sanitized = utm_regex2 . sub ( ' ' , sanitized )
2022-09-05 20:05:04 +00:00
if torture :
sanitized = torture_ap ( sanitized , g . v . username )
2022-12-30 14:59:05 +00:00
to_add = random . choice ( ( ' :#trumpjaktalking: ' , ' :#reposthorse: ' , ' ![](/i/supportjews.webp) ' , ' ![](/i/gluck.webp) ' , ' ![](/i/ack.webp) ' ) )
2022-12-23 01:55:11 +00:00
sanitized + = f ' \n { to_add } '
2022-09-05 20:05:04 +00:00
2022-06-23 19:43:49 +00:00
sanitized = normalize_url ( sanitized )
2022-05-27 18:28:54 +00:00
if ' ``` ' not in sanitized and ' <pre> ' not in sanitized :
2022-05-08 09:06:01 +00:00
sanitized = linefeeds_regex . sub ( r ' \ 1 \ n \ n \ 2 ' , sanitized )
2022-05-04 23:09:46 +00:00
2022-06-19 15:22:06 +00:00
sanitized = greentext_regex . sub ( r ' \ 1<g> \ > \ 2</g> ' , sanitized )
2022-06-11 12:21:59 +00:00
sanitized = image_regex . sub ( r ' \ 1![]( \ 2) \ 5 ' , sanitized )
2022-05-04 23:09:46 +00:00
sanitized = image_check_regex . sub ( r ' \ 1 ' , sanitized )
2022-06-25 05:28:43 +00:00
sanitized = link_fix_regex . sub ( r ' \ 1https:// \ 2 ' , sanitized )
2022-05-07 05:28:51 +00:00
2022-07-20 00:07:38 +00:00
if FEATURES [ ' MARKUP_COMMANDS ' ] :
sanitized = command_regex . sub ( command_regex_matcher , sanitized )
2022-07-11 12:14:18 +00:00
2022-05-04 23:09:46 +00:00
sanitized = markdown ( sanitized )
2022-06-28 05:52:29 +00:00
sanitized = strikethrough_regex . sub ( r ' \ 1<del> \ 2</del> ' , sanitized )
2022-10-20 14:44:32 +00:00
# replacing zero width characters, overlines, fake colons
sanitized = sanitized . replace ( ' \u200e ' , ' ' ) . replace ( ' \u200b ' , ' ' ) . replace ( " \ufeff " , " " ) . replace ( " \u033f " , " " ) . replace ( " \u0589 " , " : " )
2022-05-04 23:09:46 +00:00
2022-11-21 17:37:38 +00:00
sanitized = reddit_regex . sub ( r ' \ 1<a href= " https://old.reddit.com/ \ 2 " rel= " nofollow noopener " target= " _blank " >/ \ 2</a> ' , sanitized )
2022-06-22 22:12:47 +00:00
sanitized = sub_regex . sub ( r ' \ 1<a href= " / \ 2 " >/ \ 2</a> ' , sanitized )
2022-07-29 13:23:34 +00:00
v = getattr ( g , ' v ' , None )
2022-08-21 17:20:09 +00:00
names = set ( m . group ( 2 ) for m in mention_regex . finditer ( sanitized ) )
2022-10-12 09:36:29 +00:00
if limit_pings and len ( names ) > limit_pings and not v . admin_level > = PERMS [ ' POST_COMMENT_INFINITE_PINGS ' ] : abort ( 406 )
2022-08-21 17:20:09 +00:00
users_list = get_users ( names , graceful = True )
users_dict = { }
for u in users_list :
users_dict [ u . username . lower ( ) ] = u
if u . original_username :
users_dict [ u . original_username . lower ( ) ] = u
def replacer ( m ) :
u = users_dict . get ( m . group ( 2 ) . lower ( ) )
if not u :
return m . group ( 0 )
return f ' { m . group ( 1 ) } <a href= " /id/ { u . id } " ><img loading= " lazy " src= " /pp/ { u . id } " >@ { u . username } </a> '
sanitized = mention_regex . sub ( replacer , sanitized )
2022-05-04 23:09:46 +00:00
soup = BeautifulSoup ( sanitized , ' lxml ' )
for tag in soup . find_all ( " img " ) :
if tag . get ( " src " ) and not tag [ " src " ] . startswith ( ' /pp/ ' ) :
2022-07-12 20:30:00 +00:00
if not is_safe_url ( tag [ " src " ] ) :
2022-11-21 17:37:38 +00:00
a = soup . new_tag ( " a " , href = tag [ " src " ] , rel = " nofollow noopener " , target = " _blank " )
2022-07-12 20:30:00 +00:00
a . string = tag [ " src " ]
tag . replace_with ( a )
continue
2022-05-04 23:09:46 +00:00
tag [ " loading " ] = " lazy "
tag [ " data-src " ] = tag [ " src " ]
2022-06-22 15:51:19 +00:00
tag [ " src " ] = " /i/l.webp "
2022-05-04 23:09:46 +00:00
tag [ ' alt ' ] = f ' ![]( { tag [ " data-src " ] } ) '
2022-07-02 10:44:05 +00:00
2022-07-02 00:25:58 +00:00
if tag . parent . name != ' a ' :
2022-07-02 10:44:05 +00:00
a = soup . new_tag ( " a " , href = tag [ " data-src " ] )
if not is_site_url ( a [ " href " ] ) :
2022-10-29 21:46:30 +00:00
a [ " rel " ] = " nofollow noopener "
2022-11-21 17:37:38 +00:00
a [ " target " ] = " _blank "
2022-07-02 00:25:58 +00:00
tag = tag . replace_with ( a )
a . append ( tag )
2022-06-27 01:00:45 +00:00
2022-05-04 23:09:46 +00:00
for tag in soup . find_all ( " a " ) :
2022-07-02 00:54:59 +00:00
if not tag . contents or not str ( tag . contents [ 0 ] ) . strip ( ) :
tag . extract ( )
2022-06-22 21:59:30 +00:00
if tag . get ( " href " ) and fishylinks_regex . fullmatch ( str ( tag . string ) ) :
2022-05-04 23:09:46 +00:00
tag . string = tag [ " href " ]
sanitized = str ( soup )
2022-07-05 22:11:45 +00:00
2022-05-04 23:09:46 +00:00
sanitized = spoiler_regex . sub ( r ' <spoiler> \ 1</spoiler> ' , sanitized )
2022-07-05 22:11:45 +00:00
2022-05-04 23:09:46 +00:00
marseys_used = set ( )
emojis = list ( emoji_regex . finditer ( sanitized ) )
2022-09-16 16:30:34 +00:00
if len ( emojis ) > 20 : golden = False
2022-05-04 23:09:46 +00:00
captured = [ ]
for i in emojis :
if i . group ( 0 ) in captured : continue
captured . append ( i . group ( 0 ) )
old = i . group ( 0 )
if ' marseylong1 ' in old or ' marseylong2 ' in old or ' marseyllama1 ' in old or ' marseyllama2 ' in old : new = old . lower ( ) . replace ( " > " , " class= ' mb-0 ' > " )
else : new = old . lower ( )
2022-09-16 16:30:34 +00:00
new = render_emoji ( new , emoji_regex2 , golden , marseys_used , True )
2022-05-04 23:09:46 +00:00
sanitized = sanitized . replace ( old , new )
emojis = list ( emoji_regex2 . finditer ( sanitized ) )
2022-09-16 16:30:34 +00:00
if len ( emojis ) > 20 : golden = False
2022-05-04 23:09:46 +00:00
2022-09-16 16:30:34 +00:00
sanitized = render_emoji ( sanitized , emoji_regex2 , golden , marseys_used )
2022-05-04 23:09:46 +00:00
2022-05-22 10:20:11 +00:00
sanitized = sanitized . replace ( ' & ' , ' & ' )
2022-05-04 23:09:46 +00:00
captured = [ ]
for i in youtube_regex . finditer ( sanitized ) :
if i . group ( 0 ) in captured : continue
captured . append ( i . group ( 0 ) )
2022-09-01 20:46:57 +00:00
params = parse_qs ( urlparse ( i . group ( 2 ) ) . query , keep_blank_values = True )
2022-05-04 23:09:46 +00:00
t = params . get ( ' t ' , params . get ( ' start ' , [ 0 ] ) ) [ 0 ]
if isinstance ( t , str ) : t = t . replace ( ' s ' , ' ' )
htmlsource = f ' { i . group ( 1 ) } <lite-youtube videoid= " { i . group ( 3 ) } " params= " autoplay=1&modestbranding=1 '
2022-11-19 12:34:38 +00:00
if t :
try : htmlsource + = f ' &start= { int ( t ) } '
except : pass
2022-05-04 23:09:46 +00:00
htmlsource + = ' " ></lite-youtube> '
sanitized = sanitized . replace ( i . group ( 0 ) , htmlsource )
2022-12-10 19:12:14 +00:00
sanitized = video_sub_regex . sub ( r ' \ 1<p class= " resizable " ><video controls preload= " none " src= " \ 2 " ></video></p> ' , sanitized )
2022-10-01 17:42:34 +00:00
sanitized = audio_sub_regex . sub ( r ' \ 1<audio controls preload= " none " src= " \ 2 " ></audio> ' , sanitized )
2022-05-04 23:09:46 +00:00
2022-09-16 16:30:34 +00:00
if count_marseys :
2022-09-09 09:13:50 +00:00
for marsey in g . db . query ( Marsey ) . filter ( Marsey . submitter_id == None , Marsey . name . in_ ( marseys_used ) ) . all ( ) :
2022-05-04 23:09:46 +00:00
marsey . count + = 1
g . db . add ( marsey )
2022-05-15 08:45:57 +00:00
sanitized = sanitized . replace ( ' <p></p> ' , ' ' )
2022-05-04 23:09:46 +00:00
sanitized = sanitized . replace ( ' <html><body> ' , ' ' ) . replace ( ' </body></html> ' , ' ' )
2022-05-25 00:27:41 +00:00
css_sanitizer = CSSSanitizer ( allowed_css_properties = allowed_styles )
2022-05-04 23:09:46 +00:00
sanitized = bleach . Cleaner ( tags = allowed_tags ,
attributes = allowed_attributes ,
protocols = [ ' http ' , ' https ' ] ,
2022-05-25 00:27:41 +00:00
css_sanitizer = css_sanitizer ,
2022-07-05 22:11:45 +00:00
filters = [ partial ( LinkifyFilter , skip_tags = [ " pre " ] ,
2022-05-25 00:27:41 +00:00
parse_email = False , callbacks = [ callback ] , url_re = url_re ) ]
2022-05-04 23:09:46 +00:00
) . clean ( sanitized )
soup = BeautifulSoup ( sanitized , ' lxml ' )
links = soup . find_all ( " a " )
domain_list = set ( )
for link in links :
href = link . get ( " href " )
if not href : continue
url = urlparse ( href )
2022-10-28 18:12:37 +00:00
d = tldextract . extract ( href ) . registered_domain + url . path
2022-10-31 14:33:11 +00:00
domain_list . add ( d . lower ( ) )
2022-05-04 23:09:46 +00:00
2022-10-27 22:37:24 +00:00
banned_domains = g . db . query ( BannedDomain ) . all ( )
for x in banned_domains :
for y in domain_list :
if y . startswith ( x . domain ) :
2022-12-22 22:25:31 +00:00
abort ( 403 , f ' Remove the banned link " { x . domain } " and try again! \n Reason for link ban: " { x . reason } " ' )
2022-05-04 23:09:46 +00:00
2022-12-05 05:22:08 +00:00
if ' <pre> ' not in sanitized and not sidebar :
2022-06-30 23:01:10 +00:00
sanitized = sanitized . replace ( ' \n ' , ' ' )
2022-06-29 00:55:44 +00:00
2022-10-06 06:17:28 +00:00
if showmore and len ( sanitized ) > 3500 :
2022-08-30 21:19:53 +00:00
sanitized = showmore_regex . sub ( r ' \ 1<p><button class= " showmore " onclick= " showmore() " >SHOW MORE</button></p><d class= " d-none " > \ 2</d> ' , sanitized , count = 1 )
2022-05-04 23:09:46 +00:00
2022-07-02 10:12:52 +00:00
return sanitized . strip ( )
2022-05-04 23:09:46 +00:00
def allowed_attributes_emojis ( tag , name , value ) :
if tag == ' img ' :
2022-05-25 18:29:22 +00:00
if name == ' src ' and value . startswith ( ' / ' ) and ' \\ ' not in value : return True
2022-05-04 23:09:46 +00:00
if name == ' loading ' and value == ' lazy ' : return True
if name == ' data-bs-toggle ' and value == ' tooltip ' : return True
2022-11-26 04:52:47 +00:00
if name in { ' g ' , ' glow ' } and not value : return True
if name in { ' alt ' , ' title ' } : return True
2022-05-17 19:58:41 +00:00
if tag == ' span ' :
if name == ' data-bs-toggle ' and value == ' tooltip ' : return True
if name == ' title ' : return True
if name == ' alt ' : return True
2022-05-04 23:09:46 +00:00
return False
2022-07-05 22:11:45 +00:00
@with_sigalrm_timeout ( 1 )
2022-09-16 16:30:34 +00:00
def filter_emojis_only ( title , golden = True , count_marseys = False , graceful = False , torture = False ) :
2022-09-05 20:05:04 +00:00
title = title . strip ( )
if torture :
title = torture_ap ( title , g . v . username )
2022-05-04 23:09:46 +00:00
2022-12-27 05:37:21 +00:00
title = title . replace ( ' ' , ' ' ) . replace ( ' ' , ' ' ) . replace ( " \ufeff " , " " ) . replace ( " 𒐪 " , " " ) . replace ( " \n " , " " ) . replace ( " \r " , " " ) . replace ( " \t " , " " ) . replace ( " & " , " & " ) . replace ( ' < ' , ' < ' ) . replace ( ' > ' , ' > ' ) . replace ( ' " ' , ' " ' ) . replace ( " ' " , " ' " ) . replace ( " ﷽ " , " " ) . strip ( )
2022-05-04 23:09:46 +00:00
2022-06-13 18:05:24 +00:00
marseys_used = set ( )
2022-09-16 16:30:34 +00:00
title = render_emoji ( title , emoji_regex3 , golden , marseys_used )
2022-06-13 18:05:24 +00:00
2022-09-16 16:30:34 +00:00
if count_marseys :
2022-09-09 09:13:50 +00:00
for marsey in g . db . query ( Marsey ) . filter ( Marsey . submitter_id == None , Marsey . name . in_ ( marseys_used ) ) . all ( ) :
2022-06-13 18:05:24 +00:00
marsey . count + = 1
g . db . add ( marsey )
2022-05-04 23:09:46 +00:00
2022-06-28 05:41:21 +00:00
title = strikethrough_regex . sub ( r ' \ 1<del> \ 2</del> ' , title )
2022-05-04 23:09:46 +00:00
2022-10-05 08:35:35 +00:00
title = bleach . clean ( title , tags = [ ' img ' , ' del ' , ' span ' ] , attributes = allowed_attributes_emojis , protocols = [ ' http ' , ' https ' ] ) . replace ( ' \n ' , ' ' ) . strip ( )
2022-05-04 23:09:46 +00:00
2022-10-05 08:35:35 +00:00
if len ( title ) > POST_TITLE_HTML_LENGTH_LIMIT and not graceful : abort ( 400 )
else : return title
2022-05-25 08:43:16 +00:00
2022-06-10 20:02:15 +00:00
def normalize_url ( url ) :
2022-07-04 03:08:33 +00:00
url = reddit_domain_regex . sub ( r ' \ 1https://old.reddit.com/ \ 3/ ' , url )
2022-06-10 20:02:15 +00:00
2022-06-23 15:47:57 +00:00
url = url . replace ( " https://youtu.be/ " , " https://youtube.com/watch?v= " ) \
2022-05-25 08:43:16 +00:00
. replace ( " https://music.youtube.com/watch?v= " , " https://youtube.com/watch?v= " ) \
2022-08-24 22:02:06 +00:00
. replace ( " https://www.youtube.com " , " https://youtube.com " ) \
2022-05-25 08:43:16 +00:00
. replace ( " https://youtube.com/shorts/ " , " https://youtube.com/watch?v= " ) \
2022-08-24 22:02:06 +00:00
. replace ( " https://youtube.com/v/ " , " https://youtube.com/watch?v= " ) \
2022-06-23 15:47:57 +00:00
. replace ( " https://mobile.twitter.com " , " https://twitter.com " ) \
. replace ( " https://m.facebook.com " , " https://facebook.com " ) \
. replace ( " https://m.wikipedia.org " , " https://wikipedia.org " ) \
. replace ( " https://m.youtube.com " , " https://youtube.com " ) \
. replace ( " https://www.twitter.com " , " https://twitter.com " ) \
. replace ( " https://www.instagram.com " , " https://instagram.com " ) \
. replace ( " https://www.tiktok.com " , " https://tiktok.com " ) \
. replace ( " https://www.streamable.com " , " https://streamable.com " ) \
2022-06-10 14:35:09 +00:00
. replace ( " https://streamable.com/ " , " https://streamable.com/e/ " ) \
2022-07-15 13:00:51 +00:00
. replace ( " https://streamable.com/e/e/ " , " https://streamable.com/e/ " ) \
2022-08-13 05:06:53 +00:00
. replace ( " https://search.marsey.cat/# " , " https://camas.unddit.com/# " ) \
2022-09-29 05:36:10 +00:00
. replace ( " https://imgur.com/ " , " https://i.imgur.com/ " ) \
. replace ( " https://nitter.net/ " , " https://twitter.com/ " ) \
. replace ( " https://nitter.42l.fr/ " , " https://twitter.com/ " ) \
2022-12-25 00:54:47 +00:00
. replace ( " https://nitter.lacontrevoie.fr/ " , " https://twitter.com/ " ) \
2022-12-25 04:10:54 +00:00
. replace ( " /giphy.gif " , " /200w.webp " ) \
. replace ( " /giphy.webp " , " /200w.webp " ) \
2022-05-25 08:43:16 +00:00
2022-11-05 21:01:23 +00:00
url = imgur_regex . sub ( r ' \ 1_d.webp?maxwidth=9999&fidelity=grand ' , url )
2022-06-11 12:21:59 +00:00
url = giphy_regex . sub ( r ' \ 1.webp ' , url )
2022-05-25 08:43:16 +00:00
2022-06-11 09:56:16 +00:00
return url
2022-08-05 17:09:41 +00:00
def validate_css ( css ) :
if ' @import ' in css :
2022-10-06 05:08:48 +00:00
return False , " @import statements are not allowed! "
2022-08-05 17:09:41 +00:00
for i in css_url_regex . finditer ( css ) :
url = i . group ( 1 )
if not is_safe_url ( url ) :
domain = tldextract . extract ( url ) . registered_domain
return False , f " The domain ' { domain } ' is not allowed, please use one of these domains \n \n { approved_embed_hosts } . "
return True , " "
2022-12-04 21:46:27 +00:00
def sanitize_poll_options ( v : User , body : str , allow_bets : bool ) - > tuple [ str , List [ Any ] , List [ Any ] , List [ Any ] ] :
def sanitize_poll_type ( body : str , re : re . Pattern ) - > tuple [ str , List [ str ] ] :
opts = [ ]
2022-12-06 18:24:41 +00:00
for i in list ( re . finditer ( body ) ) [ : POLL_MAX_OPTIONS ] if POLL_MAX_OPTIONS else list ( re . finditer ( body ) ) :
2022-12-04 21:46:27 +00:00
opts . append ( filter_emojis_only ( i . group ( 1 ) ) )
body = body . replace ( i . group ( 0 ) , " " )
return ( body , opts )
bets = [ ]
if allow_bets and v and v . admin_level > = PERMS [ ' POST_BETS ' ] :
body , bets = sanitize_poll_type ( body , bet_regex )
body , options = sanitize_poll_type ( body , poll_regex )
body , choices = sanitize_poll_type ( body , choice_regex )
return ( body , bets , options , choices )