added test for lemmy comment

pleroma-federation2
Felix Ableitner 2021-10-20 16:38:31 +02:00
parent 5504efa4a6
commit 60b472e3f0
3 changed files with 98 additions and 0 deletions

View File

@ -0,0 +1,36 @@
{
"@context": [
"https://www.w3.org/ns/activitystreams",
{
"matrixUserId": {
"type": "sc:Text",
"id": "as:alsoKnownAs"
},
"sensitive": "as:sensitive",
"stickied": "as:stickied",
"moderators": "as:moderators",
"sc": "http://schema.org#",
"pt": "https://join-lemmy.org#",
"comments_enabled": {
"type": "sc:Boolean",
"id": "pt:commentsEnabled"
}
},
"https://w3id.org/security/v1"
],
"type": "Note",
"id": "https://lemmy.ml/comment/38741",
"attributedTo": "https://lemmy.ml/u/nutomic",
"to": "https://www.w3.org/ns/activitystreams#Public",
"content": "While I very much get and respect the general sentiment, I think from the perspective of a Central European non-English person in a country with a significant number of, also non-English speaking Nazis, the current approach of filtering slurs based on an English regex is fatally flawed. You can happily use Lemmy to create a hostile far right community where everyone is easily able to use whatever hurtful slurs they want as long as they are not the few specifically blocked English ones. \n\nOn the other hand you create a situation where people feel the need to question the choice of software of their community because they read about censorship or whatever to be used in Lemmy and might stay away and move to other software even though the would maybe never be affected by the slur-filter as the number is not so large and the overlap with other languages not very big.\n\nSo I would argue that this specific implementation of a slur-filter just doesn't achieve what it aims to achieve and should be fundamentally rethought, maybe as configurable per instance.",
"mediaType": "text/html",
"source": {
"content": "While I very much get and respect the general sentiment, I think from the perspective of a Central European non-English person in a country with a significant number of, also non-English speaking Nazis, the current approach of filtering slurs based on an English regex is fatally flawed. You can happily use Lemmy to create a hostile far right community where everyone is easily able to use whatever hurtful slurs they want as long as they are not the few specifically blocked English ones. \n\nOn the other hand you create a situation where people feel the need to question the choice of software of their community because they read about censorship or whatever to be used in Lemmy and might stay away and move to other software even though the would maybe never be affected by the slur-filter as the number is not so large and the overlap with other languages not very big.\n\nSo I would argue that this specific implementation of a slur-filter just doesn't achieve what it aims to achieve and should be fundamentally rethought, maybe as configurable per instance.",
"mediaType": "text/markdown"
},
"inReplyTo": [
"https://lemmy.ml/post/55143"
],
"published": "2021-03-01T13:42:43.966208+00:00",
"updated": "2021-03-01T13:43:03.955787+00:00"
}

View File

@ -80,8 +80,10 @@ impl Note {
context: &LemmyContext, context: &LemmyContext,
request_counter: &mut i32, request_counter: &mut i32,
) -> Result<(ApubPost, Option<CommentId>), LemmyError> { ) -> Result<(ApubPost, Option<CommentId>), LemmyError> {
dbg!(10);
match &self.in_reply_to { match &self.in_reply_to {
CommentInReplyToMigration::Old(in_reply_to) => { CommentInReplyToMigration::Old(in_reply_to) => {
dbg!(11);
// This post, or the parent comment might not yet exist on this server yet, fetch them. // This post, or the parent comment might not yet exist on this server yet, fetch them.
let post_id = in_reply_to.get(0).context(location_info!())?; let post_id = in_reply_to.get(0).context(location_info!())?;
let post_id = ObjectId::new(post_id.clone()); let post_id = ObjectId::new(post_id.clone());
@ -89,6 +91,7 @@ impl Note {
// The 2nd item, if it exists, is the parent comment apub_id // The 2nd item, if it exists, is the parent comment apub_id
// Nested comments will automatically get fetched recursively // Nested comments will automatically get fetched recursively
dbg!(12);
let parent_id: Option<CommentId> = match in_reply_to.get(1) { let parent_id: Option<CommentId> = match in_reply_to.get(1) {
Some(comment_id) => { Some(comment_id) => {
let comment_id = ObjectId::<ApubComment>::new(comment_id.clone()); let comment_id = ObjectId::<ApubComment>::new(comment_id.clone());
@ -98,13 +101,16 @@ impl Note {
} }
None => None, None => None,
}; };
dbg!(13);
Ok((post, parent_id)) Ok((post, parent_id))
} }
CommentInReplyToMigration::New(in_reply_to) => { CommentInReplyToMigration::New(in_reply_to) => {
dbg!(14);
let parent = Box::pin(in_reply_to.dereference(context, request_counter).await?); let parent = Box::pin(in_reply_to.dereference(context, request_counter).await?);
match parent.deref() { match parent.deref() {
PostOrComment::Post(p) => { PostOrComment::Post(p) => {
dbg!(15);
// Workaround because I cant figure out how to get the post out of the box (and we dont // Workaround because I cant figure out how to get the post out of the box (and we dont
// want to stackoverflow in a deep comment hierarchy). // want to stackoverflow in a deep comment hierarchy).
let post_id = p.id; let post_id = p.id;
@ -112,6 +118,7 @@ impl Note {
Ok((post.into(), None)) Ok((post.into(), None))
} }
PostOrComment::Comment(c) => { PostOrComment::Comment(c) => {
dbg!(16);
let post_id = c.post_id; let post_id = c.post_id;
let post = blocking(context.pool(), move |conn| Post::read(conn, post_id)).await??; let post = blocking(context.pool(), move |conn| Post::read(conn, post_id)).await??;
Ok((post.into(), Some(c.id))) Ok((post.into(), Some(c.id)))
@ -262,12 +269,15 @@ impl FromApub for ApubComment {
expected_domain: &Url, expected_domain: &Url,
request_counter: &mut i32, request_counter: &mut i32,
) -> Result<ApubComment, LemmyError> { ) -> Result<ApubComment, LemmyError> {
dbg!(1);
let ap_id = Some(note.id(expected_domain)?.clone().into()); let ap_id = Some(note.id(expected_domain)?.clone().into());
let creator = note let creator = note
.attributed_to .attributed_to
.dereference(context, request_counter) .dereference(context, request_counter)
.await?; .await?;
dbg!(2);
let (post, parent_comment_id) = note.get_parents(context, request_counter).await?; let (post, parent_comment_id) = note.get_parents(context, request_counter).await?;
dbg!(2.5);
if post.locked { if post.locked {
return Err(anyhow!("Post is locked").into()); return Err(anyhow!("Post is locked").into());
} }
@ -275,6 +285,7 @@ impl FromApub for ApubComment {
let content = &note.source.content; let content = &note.source.content;
let content_slurs_removed = remove_slurs(content, &context.settings().slur_regex()); let content_slurs_removed = remove_slurs(content, &context.settings().slur_regex());
dbg!(3);
let form = CommentForm { let form = CommentForm {
creator_id: creator.id, creator_id: creator.id,
post_id: post.id, post_id: post.id,
@ -289,6 +300,51 @@ impl FromApub for ApubComment {
local: Some(false), local: Some(false),
}; };
let comment = blocking(context.pool(), move |conn| Comment::upsert(conn, &form)).await??; let comment = blocking(context.pool(), move |conn| Comment::upsert(conn, &form)).await??;
dbg!(4);
Ok(comment.into()) Ok(comment.into())
} }
} }
#[cfg(test)]
mod tests {
use super::*;
use crate::objects::{
community::ApubCommunity,
tests::{file_to_json_object, init_context},
};
use serial_test::serial;
async fn prepare_comment_test(url: &Url, context: &LemmyContext) {
let person_json = file_to_json_object("assets/lemmy-person.json");
ApubPerson::from_apub(&person_json, context, url, &mut 0)
.await
.unwrap();
let community_json = file_to_json_object("assets/lemmy-community.json");
ApubCommunity::from_apub(&community_json, context, url, &mut 0)
.await
.unwrap();
let post_json = file_to_json_object("assets/lemmy-post.json");
ApubPost::from_apub(&post_json, context, url, &mut 0)
.await
.unwrap();
}
#[actix_rt::test]
#[serial]
async fn test_fetch_lemmy_comment() {
let context = init_context();
let url = Url::parse("https://lemmy.ml/comment/38741").unwrap();
prepare_comment_test(&url, &context).await;
let json = file_to_json_object("assets/lemmy-comment.json");
let mut request_counter = 0;
let comment = ApubComment::from_apub(&json, &context, &url, &mut request_counter)
.await
.unwrap();
assert_eq!(comment.ap_id.clone().into_inner(), url);
assert_eq!(comment.content.len(), 1063);
assert!(!comment.local);
assert_eq!(request_counter, 0);
}
}

View File

@ -10,5 +10,11 @@ sed -i 's/https:\/\/lemmy.ml\/c\/announcements\/outbox/https:\\/\\/lemmy.ml\\/c\
sed -i 's/https:\/\/lemmy.ml\/c\/announcements\/moderators/https:\\/\\/lemmy.ml\\/c\\/announcements\\/not_moderators/g' crates/apub/assets/lemmy-community.json sed -i 's/https:\/\/lemmy.ml\/c\/announcements\/moderators/https:\\/\\/lemmy.ml\\/c\\/announcements\\/not_moderators/g' crates/apub/assets/lemmy-community.json
curl -H "Accept: application/activity+json" https://lemmy.ml/post/55143 | jq \ curl -H "Accept: application/activity+json" https://lemmy.ml/post/55143 | jq \
> crates/apub/assets/lemmy-post.json > crates/apub/assets/lemmy-post.json
curl -H "Accept: application/activity+json" https://lemmy.ml/comment/38741 | jq \
> crates/apub/assets/lemmy-comment.json
# replace attributed_to user, so that it takes the same one from above
sed -i 's/https:\/\/lemmy.ml\/u\/my_test/https:\/\/lemmy.ml\/u\/nutomic/g' crates/apub/assets/lemmy-comment.json
curl -H "Accept: application/activity+json" https://queer.hacktivis.me/users/lanodan | jq \ curl -H "Accept: application/activity+json" https://queer.hacktivis.me/users/lanodan | jq \
> crates/apub/assets/pleroma-person.json > crates/apub/assets/pleroma-person.json