2022-06-02 14:33:41 +00:00
|
|
|
use crate::{error::LemmyError, IpAddr};
|
2020-09-14 15:29:50 +00:00
|
|
|
use actix_web::dev::ConnectionInfo;
|
2020-12-17 19:01:33 +00:00
|
|
|
use chrono::{DateTime, FixedOffset, NaiveDateTime};
|
2020-09-14 15:29:50 +00:00
|
|
|
use itertools::Itertools;
|
2021-11-22 18:58:31 +00:00
|
|
|
use once_cell::sync::Lazy;
|
2020-09-14 15:29:50 +00:00
|
|
|
use rand::{distributions::Alphanumeric, thread_rng, Rng};
|
2021-09-22 15:57:09 +00:00
|
|
|
use regex::Regex;
|
2021-06-18 18:38:34 +00:00
|
|
|
use url::Url;
|
2020-09-14 15:29:50 +00:00
|
|
|
|
2021-11-22 18:58:31 +00:00
|
|
|
static MENTIONS_REGEX: Lazy<Regex> = Lazy::new(|| {
|
|
|
|
Regex::new(r"@(?P<name>[\w.]+)@(?P<domain>[a-zA-Z0-9._:-]+)").expect("compile regex")
|
|
|
|
});
|
|
|
|
static VALID_ACTOR_NAME_REGEX: Lazy<Regex> =
|
|
|
|
Lazy::new(|| Regex::new(r"^[a-zA-Z0-9_]{3,}$").expect("compile regex"));
|
|
|
|
static VALID_POST_TITLE_REGEX: Lazy<Regex> =
|
2021-11-23 15:52:58 +00:00
|
|
|
Lazy::new(|| Regex::new(r".*\S{3,}.*").expect("compile regex"));
|
2021-11-22 18:58:31 +00:00
|
|
|
static VALID_MATRIX_ID_REGEX: Lazy<Regex> = Lazy::new(|| {
|
|
|
|
Regex::new(r"^@[A-Za-z0-9._=-]+:[A-Za-z0-9.-]+\.[A-Za-z]{2,}$").expect("compile regex")
|
|
|
|
});
|
|
|
|
// taken from https://en.wikipedia.org/wiki/UTM_parameters
|
|
|
|
static CLEAN_URL_PARAMS_REGEX: Lazy<Regex> = Lazy::new(|| {
|
|
|
|
Regex::new(r"^utm_source|utm_medium|utm_campaign|utm_term|utm_content|gclid|gclsrc|dclid|fbclid$")
|
|
|
|
.expect("compile regex")
|
|
|
|
});
|
2020-09-14 15:29:50 +00:00
|
|
|
|
|
|
|
pub fn naive_from_unix(time: i64) -> NaiveDateTime {
|
|
|
|
NaiveDateTime::from_timestamp(time, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn convert_datetime(datetime: NaiveDateTime) -> DateTime<FixedOffset> {
|
2020-12-17 19:01:33 +00:00
|
|
|
DateTime::<FixedOffset>::from_utc(datetime, FixedOffset::east(0))
|
2020-09-14 15:29:50 +00:00
|
|
|
}
|
|
|
|
|
2021-10-28 20:47:25 +00:00
|
|
|
pub fn remove_slurs(test: &str, slur_regex: &Option<Regex>) -> String {
|
|
|
|
if let Some(slur_regex) = slur_regex {
|
|
|
|
slur_regex.replace_all(test, "*removed*").to_string()
|
|
|
|
} else {
|
|
|
|
test.to_string()
|
|
|
|
}
|
2020-09-14 15:29:50 +00:00
|
|
|
}
|
|
|
|
|
2021-10-28 20:47:25 +00:00
|
|
|
pub(crate) fn slur_check<'a>(
|
|
|
|
test: &'a str,
|
|
|
|
slur_regex: &'a Option<Regex>,
|
|
|
|
) -> Result<(), Vec<&'a str>> {
|
|
|
|
if let Some(slur_regex) = slur_regex {
|
|
|
|
let mut matches: Vec<&str> = slur_regex.find_iter(test).map(|mat| mat.as_str()).collect();
|
|
|
|
|
|
|
|
// Unique
|
|
|
|
matches.sort_unstable();
|
|
|
|
matches.dedup();
|
|
|
|
|
|
|
|
if matches.is_empty() {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(matches)
|
|
|
|
}
|
2020-09-14 15:29:50 +00:00
|
|
|
} else {
|
2021-10-28 20:47:25 +00:00
|
|
|
Ok(())
|
2020-09-14 15:29:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-06 14:54:47 +00:00
|
|
|
pub fn check_slurs(text: &str, slur_regex: &Option<Regex>) -> Result<(), LemmyError> {
|
2021-10-28 20:47:25 +00:00
|
|
|
if let Err(slurs) = slur_check(text, slur_regex) {
|
2022-03-16 20:11:49 +00:00
|
|
|
Err(LemmyError::from_error_message(
|
|
|
|
anyhow::anyhow!("{}", slurs_vec_to_str(slurs)),
|
|
|
|
"slurs",
|
|
|
|
))
|
2021-10-28 20:47:25 +00:00
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-09-14 15:29:50 +00:00
|
|
|
}
|
|
|
|
|
2021-12-06 14:54:47 +00:00
|
|
|
pub fn check_slurs_opt(
|
|
|
|
text: &Option<String>,
|
|
|
|
slur_regex: &Option<Regex>,
|
|
|
|
) -> Result<(), LemmyError> {
|
2020-09-14 15:29:50 +00:00
|
|
|
match text {
|
2021-09-22 15:57:09 +00:00
|
|
|
Some(t) => check_slurs(t, slur_regex),
|
2020-09-14 15:29:50 +00:00
|
|
|
None => Ok(()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub(crate) fn slurs_vec_to_str(slurs: Vec<&str>) -> String {
|
|
|
|
let start = "No slurs - ";
|
|
|
|
let combined = &slurs.join(", ");
|
|
|
|
[start, combined].concat()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn generate_random_string() -> String {
|
2020-12-21 14:34:59 +00:00
|
|
|
thread_rng()
|
|
|
|
.sample_iter(&Alphanumeric)
|
|
|
|
.map(char::from)
|
|
|
|
.take(30)
|
|
|
|
.collect()
|
2020-09-14 15:29:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn markdown_to_html(text: &str) -> String {
|
|
|
|
comrak::markdown_to_html(text, &comrak::ComrakOptions::default())
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO nothing is done with community / group webfingers yet, so just ignore those for now
|
|
|
|
#[derive(Clone, PartialEq, Eq, Hash)]
|
|
|
|
pub struct MentionData {
|
|
|
|
pub name: String,
|
|
|
|
pub domain: String,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl MentionData {
|
2021-09-22 15:57:09 +00:00
|
|
|
pub fn is_local(&self, hostname: &str) -> bool {
|
|
|
|
hostname.eq(&self.domain)
|
2020-09-14 15:29:50 +00:00
|
|
|
}
|
|
|
|
pub fn full_name(&self) -> String {
|
|
|
|
format!("@{}@{}", &self.name, &self.domain)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn scrape_text_for_mentions(text: &str) -> Vec<MentionData> {
|
|
|
|
let mut out: Vec<MentionData> = Vec::new();
|
|
|
|
for caps in MENTIONS_REGEX.captures_iter(text) {
|
|
|
|
out.push(MentionData {
|
|
|
|
name: caps["name"].to_string(),
|
|
|
|
domain: caps["domain"].to_string(),
|
|
|
|
});
|
|
|
|
}
|
|
|
|
out.into_iter().unique().collect()
|
|
|
|
}
|
|
|
|
|
2021-11-23 15:52:58 +00:00
|
|
|
fn has_newline(name: &str) -> bool {
|
|
|
|
name.contains('\n')
|
|
|
|
}
|
|
|
|
|
2021-09-22 15:57:09 +00:00
|
|
|
pub fn is_valid_actor_name(name: &str, actor_name_max_length: usize) -> bool {
|
2021-11-23 15:52:58 +00:00
|
|
|
name.chars().count() <= actor_name_max_length
|
|
|
|
&& VALID_ACTOR_NAME_REGEX.is_match(name)
|
|
|
|
&& !has_newline(name)
|
2020-09-14 15:29:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Can't do a regex here, reverse lookarounds not supported
|
2021-09-22 15:57:09 +00:00
|
|
|
pub fn is_valid_display_name(name: &str, actor_name_max_length: usize) -> bool {
|
2021-04-01 18:09:53 +00:00
|
|
|
!name.starts_with('@')
|
|
|
|
&& !name.starts_with('\u{200b}')
|
|
|
|
&& name.chars().count() >= 3
|
2021-09-22 15:57:09 +00:00
|
|
|
&& name.chars().count() <= actor_name_max_length
|
2021-11-23 15:52:58 +00:00
|
|
|
&& !has_newline(name)
|
2020-09-14 15:29:50 +00:00
|
|
|
}
|
|
|
|
|
2021-04-07 11:38:00 +00:00
|
|
|
pub fn is_valid_matrix_id(matrix_id: &str) -> bool {
|
2021-11-23 15:52:58 +00:00
|
|
|
VALID_MATRIX_ID_REGEX.is_match(matrix_id) && !has_newline(matrix_id)
|
2021-04-07 11:38:00 +00:00
|
|
|
}
|
|
|
|
|
2020-09-14 15:29:50 +00:00
|
|
|
pub fn is_valid_post_title(title: &str) -> bool {
|
2021-11-23 15:52:58 +00:00
|
|
|
VALID_POST_TITLE_REGEX.is_match(title) && !has_newline(title)
|
2020-09-14 15:29:50 +00:00
|
|
|
}
|
|
|
|
|
2021-03-18 20:25:21 +00:00
|
|
|
pub fn get_ip(conn_info: &ConnectionInfo) -> IpAddr {
|
|
|
|
IpAddr(
|
|
|
|
conn_info
|
|
|
|
.realip_remote_addr()
|
|
|
|
.unwrap_or("127.0.0.1:12345")
|
|
|
|
.split(':')
|
|
|
|
.next()
|
|
|
|
.unwrap_or("127.0.0.1")
|
|
|
|
.to_string(),
|
|
|
|
)
|
2020-09-14 15:29:50 +00:00
|
|
|
}
|
2021-06-18 18:38:34 +00:00
|
|
|
|
|
|
|
pub fn clean_url_params(mut url: Url) -> Url {
|
2021-08-21 13:36:33 +00:00
|
|
|
if url.query().is_some() {
|
|
|
|
let new_query = url
|
|
|
|
.query_pairs()
|
|
|
|
.filter(|q| !CLEAN_URL_PARAMS_REGEX.is_match(&q.0))
|
|
|
|
.map(|q| format!("{}={}", q.0, q.1))
|
|
|
|
.join("&");
|
|
|
|
url.set_query(Some(&new_query));
|
|
|
|
}
|
2021-06-18 18:38:34 +00:00
|
|
|
url
|
|
|
|
}
|
|
|
|
|
2022-01-20 14:04:54 +00:00
|
|
|
pub fn clean_optional_text(text: &Option<String>) -> Option<String> {
|
2022-01-21 13:38:01 +00:00
|
|
|
if let Some(text) = text {
|
|
|
|
let trimmed = text.trim();
|
|
|
|
if trimmed.is_empty() {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some(trimmed.to_owned())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
2022-01-20 14:04:54 +00:00
|
|
|
}
|
|
|
|
|
2021-06-18 18:38:34 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2021-11-23 15:52:58 +00:00
|
|
|
use crate::utils::{clean_url_params, is_valid_post_title};
|
2021-06-18 18:38:34 +00:00
|
|
|
use url::Url;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_clean_url_params() {
|
|
|
|
let url = Url::parse("https://example.com/path/123?utm_content=buffercf3b2&utm_medium=social&username=randomuser&id=123").unwrap();
|
|
|
|
let cleaned = clean_url_params(url);
|
|
|
|
let expected = Url::parse("https://example.com/path/123?username=randomuser&id=123").unwrap();
|
|
|
|
assert_eq!(expected.to_string(), cleaned.to_string());
|
2021-08-21 13:36:33 +00:00
|
|
|
|
|
|
|
let url = Url::parse("https://example.com/path/123").unwrap();
|
|
|
|
let cleaned = clean_url_params(url.clone());
|
|
|
|
assert_eq!(url.to_string(), cleaned.to_string());
|
2021-06-18 18:38:34 +00:00
|
|
|
}
|
2021-11-23 15:52:58 +00:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn regex_checks() {
|
|
|
|
assert!(!is_valid_post_title("hi"));
|
|
|
|
assert!(is_valid_post_title("him"));
|
|
|
|
assert!(!is_valid_post_title("n\n\n\n\nanother"));
|
|
|
|
assert!(!is_valid_post_title("hello there!\n this is a test."));
|
|
|
|
assert!(is_valid_post_title("hello there! this is a test."));
|
|
|
|
}
|
2021-06-18 18:38:34 +00:00
|
|
|
}
|