mirror of https://github.com/LemmyNet/lemmy.git
Merge 3fb1725301
into 9120207314
commit
46b3bd6418
|
@ -3113,6 +3113,7 @@ name = "lemmy_federate"
|
||||||
version = "0.19.5"
|
version = "0.19.5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"activitypub_federation",
|
"activitypub_federation",
|
||||||
|
"actix-web",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"chrono",
|
"chrono",
|
||||||
"diesel",
|
"diesel",
|
||||||
|
@ -3131,6 +3132,7 @@ dependencies = [
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
"tracing",
|
"tracing",
|
||||||
|
"url",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -3219,7 +3221,6 @@ dependencies = [
|
||||||
"lettre",
|
"lettre",
|
||||||
"markdown-it",
|
"markdown-it",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"openssl",
|
|
||||||
"pretty_assertions",
|
"pretty_assertions",
|
||||||
"regex",
|
"regex",
|
||||||
"reqwest 0.11.27",
|
"reqwest 0.11.27",
|
||||||
|
|
|
@ -11,8 +11,6 @@ fi
|
||||||
export RUST_BACKTRACE=1
|
export RUST_BACKTRACE=1
|
||||||
export RUST_LOG="warn,lemmy_server=$LEMMY_LOG_LEVEL,lemmy_federate=$LEMMY_LOG_LEVEL,lemmy_api=$LEMMY_LOG_LEVEL,lemmy_api_common=$LEMMY_LOG_LEVEL,lemmy_api_crud=$LEMMY_LOG_LEVEL,lemmy_apub=$LEMMY_LOG_LEVEL,lemmy_db_schema=$LEMMY_LOG_LEVEL,lemmy_db_views=$LEMMY_LOG_LEVEL,lemmy_db_views_actor=$LEMMY_LOG_LEVEL,lemmy_db_views_moderator=$LEMMY_LOG_LEVEL,lemmy_routes=$LEMMY_LOG_LEVEL,lemmy_utils=$LEMMY_LOG_LEVEL,lemmy_websocket=$LEMMY_LOG_LEVEL"
|
export RUST_LOG="warn,lemmy_server=$LEMMY_LOG_LEVEL,lemmy_federate=$LEMMY_LOG_LEVEL,lemmy_api=$LEMMY_LOG_LEVEL,lemmy_api_common=$LEMMY_LOG_LEVEL,lemmy_api_crud=$LEMMY_LOG_LEVEL,lemmy_apub=$LEMMY_LOG_LEVEL,lemmy_db_schema=$LEMMY_LOG_LEVEL,lemmy_db_views=$LEMMY_LOG_LEVEL,lemmy_db_views_actor=$LEMMY_LOG_LEVEL,lemmy_db_views_moderator=$LEMMY_LOG_LEVEL,lemmy_routes=$LEMMY_LOG_LEVEL,lemmy_utils=$LEMMY_LOG_LEVEL,lemmy_websocket=$LEMMY_LOG_LEVEL"
|
||||||
|
|
||||||
export LEMMY_TEST_FAST_FEDERATION=1 # by default, the persistent federation queue has delays in the scale of 30s-5min
|
|
||||||
|
|
||||||
# pictrs setup
|
# pictrs setup
|
||||||
if [ ! -f "api_tests/pict-rs" ]; then
|
if [ ! -f "api_tests/pict-rs" ]; then
|
||||||
curl "https://git.asonix.dog/asonix/pict-rs/releases/download/v0.5.16/pict-rs-linux-amd64" -o api_tests/pict-rs
|
curl "https://git.asonix.dog/asonix/pict-rs/releases/download/v0.5.16/pict-rs-linux-amd64" -o api_tests/pict-rs
|
||||||
|
|
|
@ -75,6 +75,7 @@ impl LemmyContext {
|
||||||
.app_data(context)
|
.app_data(context)
|
||||||
// Dont allow any network fetches
|
// Dont allow any network fetches
|
||||||
.http_fetch_limit(0)
|
.http_fetch_limit(0)
|
||||||
|
.debug(true)
|
||||||
.build()
|
.build()
|
||||||
.await
|
.await
|
||||||
.expect("build federation config");
|
.expect("build federation config");
|
||||||
|
|
|
@ -885,7 +885,7 @@ diesel::table! {
|
||||||
send_community_followers_of -> Nullable<Int4>,
|
send_community_followers_of -> Nullable<Int4>,
|
||||||
send_all_instances -> Bool,
|
send_all_instances -> Bool,
|
||||||
actor_type -> ActorTypeEnum,
|
actor_type -> ActorTypeEnum,
|
||||||
actor_apub_id -> Nullable<Text>,
|
actor_apub_id -> Text,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,7 +65,7 @@ pub struct SentActivity {
|
||||||
pub send_community_followers_of: Option<CommunityId>,
|
pub send_community_followers_of: Option<CommunityId>,
|
||||||
pub send_all_instances: bool,
|
pub send_all_instances: bool,
|
||||||
pub actor_type: ActorType,
|
pub actor_type: ActorType,
|
||||||
pub actor_apub_id: Option<DbUrl>,
|
pub actor_apub_id: DbUrl,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature = "full", derive(Insertable))]
|
#[cfg_attr(feature = "full", derive(Insertable))]
|
||||||
|
|
|
@ -35,4 +35,5 @@ pub struct InstanceForm {
|
||||||
pub software: Option<String>,
|
pub software: Option<String>,
|
||||||
pub version: Option<String>,
|
pub version: Option<String>,
|
||||||
pub updated: Option<DateTime<Utc>>,
|
pub updated: Option<DateTime<Utc>>,
|
||||||
|
pub published: Option<DateTime<Utc>>,
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,3 +37,5 @@ tokio-util = "0.7.11"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
serial_test = { workspace = true }
|
serial_test = { workspace = true }
|
||||||
|
url.workspace = true
|
||||||
|
actix-web.workspace = true
|
||||||
|
|
|
@ -0,0 +1,152 @@
|
||||||
|
use chrono::{DateTime, TimeDelta, TimeZone, Utc};
|
||||||
|
use lemmy_api_common::context::LemmyContext;
|
||||||
|
use lemmy_db_schema::{
|
||||||
|
newtypes::CommunityId,
|
||||||
|
source::{activity::SentActivity, instance::Instance, site::Site},
|
||||||
|
};
|
||||||
|
use lemmy_db_views_actor::structs::CommunityFollowerView;
|
||||||
|
use lemmy_utils::error::LemmyResult;
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use reqwest::Url;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
|
/// interval with which new additions to community_followers are queried.
|
||||||
|
///
|
||||||
|
/// The first time some user on an instance follows a specific remote community (or, more precisely:
|
||||||
|
/// the first time a (followed_community_id, follower_inbox_url) tuple appears), this delay limits
|
||||||
|
/// the maximum time until the follow actually results in activities from that community id being
|
||||||
|
/// sent to that inbox url. This delay currently needs to not be too small because the DB load is
|
||||||
|
/// currently fairly high because of the current structure of storing inboxes for every person, not
|
||||||
|
/// having a separate list of shared_inboxes, and the architecture of having every instance queue be
|
||||||
|
/// fully separate. (see https://github.com/LemmyNet/lemmy/issues/3958)
|
||||||
|
static FOLLOW_ADDITIONS_RECHECK_DELAY: Lazy<TimeDelta> = Lazy::new(|| {
|
||||||
|
if cfg!(debug_assertions) {
|
||||||
|
TimeDelta::try_seconds(1).expect("TimeDelta out of bounds")
|
||||||
|
} else {
|
||||||
|
TimeDelta::try_minutes(2).expect("TimeDelta out of bounds")
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
/// The same as FOLLOW_ADDITIONS_RECHECK_DELAY, but triggering when the last person on an instance
|
||||||
|
/// unfollows a specific remote community. This is expected to happen pretty rarely and updating it
|
||||||
|
/// in a timely manner is not too important.
|
||||||
|
static FOLLOW_REMOVALS_RECHECK_DELAY: Lazy<chrono::TimeDelta> =
|
||||||
|
Lazy::new(|| chrono::TimeDelta::try_hours(1).expect("TimeDelta out of bounds"));
|
||||||
|
|
||||||
|
pub(crate) struct CommunityInboxCollector {
|
||||||
|
target: Instance,
|
||||||
|
// load site lazily because if an instance is first seen due to being on allowlist,
|
||||||
|
// the corresponding row in `site` may not exist yet since that is only added once
|
||||||
|
// `fetch_instance_actor_for_object` is called.
|
||||||
|
// (this should be unlikely to be relevant outside of the federation tests)
|
||||||
|
// TODO: use lazy
|
||||||
|
site_loaded: bool,
|
||||||
|
site: Option<Site>,
|
||||||
|
followed_communities: HashMap<CommunityId, HashSet<Url>>,
|
||||||
|
last_communities_fetch_full: DateTime<Utc>,
|
||||||
|
last_communities_fetch_incr: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CommunityInboxCollector {
|
||||||
|
pub fn new(target: Instance) -> Self {
|
||||||
|
Self {
|
||||||
|
target,
|
||||||
|
site_loaded: false,
|
||||||
|
site: None,
|
||||||
|
followed_communities: HashMap::new(),
|
||||||
|
last_communities_fetch_full: Utc.timestamp_nanos(0),
|
||||||
|
last_communities_fetch_incr: Utc.timestamp_nanos(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// get inbox urls of sending the given activity to the given instance
|
||||||
|
/// most often this will return 0 values (if instance doesn't care about the activity)
|
||||||
|
/// or 1 value (the shared inbox)
|
||||||
|
/// > 1 values only happens for non-lemmy software
|
||||||
|
pub async fn get_inbox_urls(
|
||||||
|
&mut self,
|
||||||
|
activity: &SentActivity,
|
||||||
|
context: &LemmyContext,
|
||||||
|
) -> LemmyResult<HashSet<Url>> {
|
||||||
|
let mut inbox_urls: HashSet<Url> = HashSet::new();
|
||||||
|
|
||||||
|
if activity.send_all_instances {
|
||||||
|
if !self.site_loaded {
|
||||||
|
self.site = Site::read_from_instance_id(&mut context.pool(), self.target.id).await?;
|
||||||
|
self.site_loaded = true;
|
||||||
|
}
|
||||||
|
if let Some(site) = &self.site {
|
||||||
|
// Nutomic: Most non-lemmy software wont have a site row. That means it cant handle these
|
||||||
|
// activities. So handling it like this is fine.
|
||||||
|
inbox_urls.insert(site.inbox_url.inner().clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(t) = &activity.send_community_followers_of {
|
||||||
|
if let Some(urls) = self.followed_communities.get(t) {
|
||||||
|
inbox_urls.extend(urls.iter().cloned());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
inbox_urls.extend(
|
||||||
|
activity
|
||||||
|
.send_inboxes
|
||||||
|
.iter()
|
||||||
|
.filter_map(std::option::Option::as_ref)
|
||||||
|
.filter(|&u| (u.domain() == Some(&self.target.domain)))
|
||||||
|
.map(|u| u.inner().clone()),
|
||||||
|
);
|
||||||
|
|
||||||
|
// TODO: also needs to send to user followers
|
||||||
|
|
||||||
|
Ok(inbox_urls)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_communities(&mut self, context: &LemmyContext) -> LemmyResult<()> {
|
||||||
|
// update to time before fetch to ensure overlap. subtract 10s to ensure overlap even if
|
||||||
|
// published date is not exact
|
||||||
|
let updated_fetch =
|
||||||
|
Utc::now() - chrono::TimeDelta::try_seconds(10).expect("TimeDelta out of bounds");
|
||||||
|
|
||||||
|
let full_fetch = Utc::now() - self.last_communities_fetch_full;
|
||||||
|
if full_fetch > *FOLLOW_REMOVALS_RECHECK_DELAY {
|
||||||
|
// process removals every hour
|
||||||
|
self.followed_communities = self
|
||||||
|
.get_communities(Utc.timestamp_nanos(0), context)
|
||||||
|
.await?;
|
||||||
|
self.last_communities_fetch_full = updated_fetch;
|
||||||
|
self.last_communities_fetch_incr = self.last_communities_fetch_full;
|
||||||
|
}
|
||||||
|
let incr_fetch = Utc::now() - self.last_communities_fetch_incr;
|
||||||
|
if incr_fetch > *FOLLOW_ADDITIONS_RECHECK_DELAY {
|
||||||
|
// process additions every minute
|
||||||
|
let added = self
|
||||||
|
.get_communities(self.last_communities_fetch_incr, context)
|
||||||
|
.await?;
|
||||||
|
self.followed_communities.extend(added);
|
||||||
|
self.last_communities_fetch_incr = updated_fetch;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// get a list of local communities with the remote inboxes on the given instance that cares about
|
||||||
|
/// them
|
||||||
|
async fn get_communities(
|
||||||
|
&mut self,
|
||||||
|
last_fetch: DateTime<Utc>,
|
||||||
|
context: &LemmyContext,
|
||||||
|
) -> LemmyResult<HashMap<CommunityId, HashSet<Url>>> {
|
||||||
|
let followed = CommunityFollowerView::get_instance_followed_community_inboxes(
|
||||||
|
&mut context.pool(),
|
||||||
|
self.target.id,
|
||||||
|
last_fetch,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(
|
||||||
|
followed
|
||||||
|
.into_iter()
|
||||||
|
.fold(HashMap::new(), |mut map, (c, u)| {
|
||||||
|
map.entry(c).or_default().insert(u.into());
|
||||||
|
map
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
|
@ -14,6 +14,7 @@ use tokio_util::sync::CancellationToken;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
use util::FederationQueueStateWithDomain;
|
use util::FederationQueueStateWithDomain;
|
||||||
|
|
||||||
|
mod inboxes;
|
||||||
mod stats;
|
mod stats;
|
||||||
mod util;
|
mod util;
|
||||||
mod worker;
|
mod worker;
|
||||||
|
|
|
@ -26,28 +26,16 @@ use std::{fmt::Debug, future::Future, pin::Pin, sync::Arc, time::Duration};
|
||||||
use tokio::{task::JoinHandle, time::sleep};
|
use tokio::{task::JoinHandle, time::sleep};
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
|
|
||||||
/// Decrease the delays of the federation queue.
|
|
||||||
/// Should only be used for federation tests since it significantly increases CPU and DB load of the
|
|
||||||
/// federation queue.
|
|
||||||
pub(crate) static LEMMY_TEST_FAST_FEDERATION: Lazy<bool> = Lazy::new(|| {
|
|
||||||
std::env::var("LEMMY_TEST_FAST_FEDERATION")
|
|
||||||
.map(|s| !s.is_empty())
|
|
||||||
.unwrap_or(false)
|
|
||||||
});
|
|
||||||
|
|
||||||
/// Recheck for new federation work every n seconds.
|
/// Recheck for new federation work every n seconds.
|
||||||
///
|
///
|
||||||
/// When the queue is processed faster than new activities are added and it reaches the current time
|
/// When the queue is processed faster than new activities are added and it reaches the current time
|
||||||
/// with an empty batch, this is the delay the queue waits before it checks if new activities have
|
/// with an empty batch, this is the delay the queue waits before it checks if new activities have
|
||||||
/// been added to the sent_activities table. This delay is only applied if no federated activity
|
/// been added to the sent_activities table. This delay is only applied if no federated activity
|
||||||
/// happens during sending activities of the last batch.
|
/// happens during sending activities of the last batch.
|
||||||
pub(crate) static WORK_FINISHED_RECHECK_DELAY: Lazy<Duration> = Lazy::new(|| {
|
#[cfg(debug_assertions)]
|
||||||
if *LEMMY_TEST_FAST_FEDERATION {
|
pub(crate) static WORK_FINISHED_RECHECK_DELAY: Duration = Duration::from_millis(100);
|
||||||
Duration::from_millis(100)
|
#[cfg(not(debug_assertions))]
|
||||||
} else {
|
pub(crate) static WORK_FINISHED_RECHECK_DELAY: Duration = Duration::from_secs(30);
|
||||||
Duration::from_secs(30)
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
/// A task that will be run in an infinite loop, unless it is cancelled.
|
/// A task that will be run in an infinite loop, unless it is cancelled.
|
||||||
/// If the task exits without being cancelled, an error will be logged and the task will be
|
/// If the task exits without being cancelled, an error will be logged and the task will be
|
||||||
|
@ -192,6 +180,7 @@ pub(crate) async fn get_latest_activity_id(pool: &mut DbPool<'_>) -> Result<Acti
|
||||||
|
|
||||||
/// the domain name is needed for logging, pass it to the stats printer so it doesn't need to look
|
/// the domain name is needed for logging, pass it to the stats printer so it doesn't need to look
|
||||||
/// up the domain itself
|
/// up the domain itself
|
||||||
|
#[derive(Debug)]
|
||||||
pub(crate) struct FederationQueueStateWithDomain {
|
pub(crate) struct FederationQueueStateWithDomain {
|
||||||
pub domain: String,
|
pub domain: String,
|
||||||
pub state: FederationQueueState,
|
pub state: FederationQueueState,
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
use crate::util::{
|
use crate::{
|
||||||
get_activity_cached,
|
inboxes::CommunityInboxCollector,
|
||||||
get_actor_cached,
|
util::{
|
||||||
get_latest_activity_id,
|
get_activity_cached,
|
||||||
FederationQueueStateWithDomain,
|
get_actor_cached,
|
||||||
LEMMY_TEST_FAST_FEDERATION,
|
get_latest_activity_id,
|
||||||
WORK_FINISHED_RECHECK_DELAY,
|
FederationQueueStateWithDomain,
|
||||||
|
WORK_FINISHED_RECHECK_DELAY,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
use activitypub_federation::{
|
use activitypub_federation::{
|
||||||
activity_sending::SendActivityTask,
|
activity_sending::SendActivityTask,
|
||||||
|
@ -16,20 +18,16 @@ use chrono::{DateTime, Days, TimeZone, Utc};
|
||||||
use lemmy_api_common::{context::LemmyContext, federate_retry_sleep_duration};
|
use lemmy_api_common::{context::LemmyContext, federate_retry_sleep_duration};
|
||||||
use lemmy_apub::{activity_lists::SharedInboxActivities, FEDERATION_CONTEXT};
|
use lemmy_apub::{activity_lists::SharedInboxActivities, FEDERATION_CONTEXT};
|
||||||
use lemmy_db_schema::{
|
use lemmy_db_schema::{
|
||||||
newtypes::{ActivityId, CommunityId, InstanceId},
|
newtypes::ActivityId,
|
||||||
source::{
|
source::{
|
||||||
activity::SentActivity,
|
activity::SentActivity,
|
||||||
federation_queue_state::FederationQueueState,
|
federation_queue_state::FederationQueueState,
|
||||||
instance::{Instance, InstanceForm},
|
instance::{Instance, InstanceForm},
|
||||||
site::Site,
|
|
||||||
},
|
},
|
||||||
utils::naive_now,
|
utils::naive_now,
|
||||||
};
|
};
|
||||||
use lemmy_db_views_actor::structs::CommunityFollowerView;
|
use lemmy_utils::error::LemmyResult;
|
||||||
use once_cell::sync::Lazy;
|
|
||||||
use reqwest::Url;
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::{HashMap, HashSet},
|
|
||||||
ops::{Add, Deref},
|
ops::{Add, Deref},
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
@ -43,42 +41,17 @@ use tracing::{debug, info, trace, warn};
|
||||||
static CHECK_SAVE_STATE_EVERY_IT: i64 = 100;
|
static CHECK_SAVE_STATE_EVERY_IT: i64 = 100;
|
||||||
/// Save state to db after this time has passed since the last state (so if the server crashes or is
|
/// Save state to db after this time has passed since the last state (so if the server crashes or is
|
||||||
/// SIGKILLed, less than X seconds of activities are resent)
|
/// SIGKILLed, less than X seconds of activities are resent)
|
||||||
static SAVE_STATE_EVERY_TIME: Duration = Duration::from_secs(60);
|
#[cfg(debug_assertions)]
|
||||||
/// interval with which new additions to community_followers are queried.
|
static SAVE_STATE_EVERY_TIME: chrono::Duration = chrono::Duration::seconds(1);
|
||||||
///
|
#[cfg(not(debug_assertions))]
|
||||||
/// The first time some user on an instance follows a specific remote community (or, more precisely:
|
static SAVE_STATE_EVERY_TIME: chrono::Duration = chrono::Duration::seconds(60);
|
||||||
/// the first time a (followed_community_id, follower_inbox_url) tuple appears), this delay limits
|
|
||||||
/// the maximum time until the follow actually results in activities from that community id being
|
|
||||||
/// sent to that inbox url. This delay currently needs to not be too small because the DB load is
|
|
||||||
/// currently fairly high because of the current structure of storing inboxes for every person, not
|
|
||||||
/// having a separate list of shared_inboxes, and the architecture of having every instance queue be
|
|
||||||
/// fully separate. (see https://github.com/LemmyNet/lemmy/issues/3958)
|
|
||||||
static FOLLOW_ADDITIONS_RECHECK_DELAY: Lazy<chrono::TimeDelta> = Lazy::new(|| {
|
|
||||||
if *LEMMY_TEST_FAST_FEDERATION {
|
|
||||||
chrono::TimeDelta::try_seconds(1).expect("TimeDelta out of bounds")
|
|
||||||
} else {
|
|
||||||
chrono::TimeDelta::try_minutes(2).expect("TimeDelta out of bounds")
|
|
||||||
}
|
|
||||||
});
|
|
||||||
/// The same as FOLLOW_ADDITIONS_RECHECK_DELAY, but triggering when the last person on an instance
|
|
||||||
/// unfollows a specific remote community. This is expected to happen pretty rarely and updating it
|
|
||||||
/// in a timely manner is not too important.
|
|
||||||
static FOLLOW_REMOVALS_RECHECK_DELAY: Lazy<chrono::TimeDelta> =
|
|
||||||
Lazy::new(|| chrono::TimeDelta::try_hours(1).expect("TimeDelta out of bounds"));
|
|
||||||
pub(crate) struct InstanceWorker {
|
pub(crate) struct InstanceWorker {
|
||||||
instance: Instance,
|
target: Instance,
|
||||||
// load site lazily because if an instance is first seen due to being on allowlist,
|
inboxes: CommunityInboxCollector,
|
||||||
// the corresponding row in `site` may not exist yet since that is only added once
|
|
||||||
// `fetch_instance_actor_for_object` is called.
|
|
||||||
// (this should be unlikely to be relevant outside of the federation tests)
|
|
||||||
site_loaded: bool,
|
|
||||||
site: Option<Site>,
|
|
||||||
followed_communities: HashMap<CommunityId, HashSet<Url>>,
|
|
||||||
stop: CancellationToken,
|
stop: CancellationToken,
|
||||||
context: Data<LemmyContext>,
|
context: Data<LemmyContext>,
|
||||||
stats_sender: UnboundedSender<FederationQueueStateWithDomain>,
|
stats_sender: UnboundedSender<FederationQueueStateWithDomain>,
|
||||||
last_full_communities_fetch: DateTime<Utc>,
|
|
||||||
last_incremental_communities_fetch: DateTime<Utc>,
|
|
||||||
state: FederationQueueState,
|
state: FederationQueueState,
|
||||||
last_state_insert: DateTime<Utc>,
|
last_state_insert: DateTime<Utc>,
|
||||||
}
|
}
|
||||||
|
@ -89,19 +62,16 @@ impl InstanceWorker {
|
||||||
context: Data<LemmyContext>,
|
context: Data<LemmyContext>,
|
||||||
stop: CancellationToken,
|
stop: CancellationToken,
|
||||||
stats_sender: UnboundedSender<FederationQueueStateWithDomain>,
|
stats_sender: UnboundedSender<FederationQueueStateWithDomain>,
|
||||||
) -> Result<(), anyhow::Error> {
|
) -> LemmyResult<()> {
|
||||||
let mut pool = context.pool();
|
let mut pool = context.pool();
|
||||||
let state = FederationQueueState::load(&mut pool, instance.id).await?;
|
let state = FederationQueueState::load(&mut pool, instance.id).await?;
|
||||||
|
let inboxes = CommunityInboxCollector::new(instance.clone());
|
||||||
let mut worker = InstanceWorker {
|
let mut worker = InstanceWorker {
|
||||||
instance,
|
target: instance,
|
||||||
site_loaded: false,
|
inboxes,
|
||||||
site: None,
|
|
||||||
followed_communities: HashMap::new(),
|
|
||||||
stop,
|
stop,
|
||||||
context,
|
context,
|
||||||
stats_sender,
|
stats_sender,
|
||||||
last_full_communities_fetch: Utc.timestamp_nanos(0),
|
|
||||||
last_incremental_communities_fetch: Utc.timestamp_nanos(0),
|
|
||||||
state,
|
state,
|
||||||
last_state_insert: Utc.timestamp_nanos(0),
|
last_state_insert: Utc.timestamp_nanos(0),
|
||||||
};
|
};
|
||||||
|
@ -110,21 +80,19 @@ impl InstanceWorker {
|
||||||
/// loop fetch new activities from db and send them to the inboxes of the given instances
|
/// loop fetch new activities from db and send them to the inboxes of the given instances
|
||||||
/// this worker only returns if (a) there is an internal error or (b) the cancellation token is
|
/// this worker only returns if (a) there is an internal error or (b) the cancellation token is
|
||||||
/// cancelled (graceful exit)
|
/// cancelled (graceful exit)
|
||||||
pub(crate) async fn loop_until_stopped(&mut self) -> Result<(), anyhow::Error> {
|
pub(crate) async fn loop_until_stopped(&mut self) -> LemmyResult<()> {
|
||||||
debug!("Starting federation worker for {}", self.instance.domain);
|
debug!("Starting federation worker for {}", self.target.domain);
|
||||||
let save_state_every = chrono::Duration::from_std(SAVE_STATE_EVERY_TIME).expect("not negative");
|
self.inboxes.update_communities(&self.context).await?;
|
||||||
|
|
||||||
self.update_communities().await?;
|
|
||||||
self.initial_fail_sleep().await?;
|
self.initial_fail_sleep().await?;
|
||||||
while !self.stop.is_cancelled() {
|
while !self.stop.is_cancelled() {
|
||||||
self.loop_batch().await?;
|
self.loop_batch().await?;
|
||||||
if self.stop.is_cancelled() {
|
if self.stop.is_cancelled() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (Utc::now() - self.last_state_insert) > save_state_every {
|
if (Utc::now() - self.last_state_insert) > SAVE_STATE_EVERY_TIME {
|
||||||
self.save_and_send_state().await?;
|
self.save_and_send_state().await?;
|
||||||
}
|
}
|
||||||
self.update_communities().await?;
|
self.inboxes.update_communities(&self.context).await?;
|
||||||
}
|
}
|
||||||
// final update of state in db
|
// final update of state in db
|
||||||
self.save_and_send_state().await?;
|
self.save_and_send_state().await?;
|
||||||
|
@ -177,7 +145,7 @@ impl InstanceWorker {
|
||||||
}
|
}
|
||||||
// no more work to be done, wait before rechecking
|
// no more work to be done, wait before rechecking
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
() = sleep(*WORK_FINISHED_RECHECK_DELAY) => {},
|
() = sleep(WORK_FINISHED_RECHECK_DELAY) => {},
|
||||||
() = self.stop.cancelled() => {}
|
() = self.stop.cancelled() => {}
|
||||||
}
|
}
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -193,7 +161,7 @@ impl InstanceWorker {
|
||||||
.await
|
.await
|
||||||
.context("failed reading activity from db")?
|
.context("failed reading activity from db")?
|
||||||
else {
|
else {
|
||||||
debug!("{}: {:?} does not exist", self.instance.domain, id);
|
debug!("{}: {:?} does not exist", self.target.domain, id);
|
||||||
self.state.last_successful_id = Some(id);
|
self.state.last_successful_id = Some(id);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
@ -220,23 +188,22 @@ impl InstanceWorker {
|
||||||
&mut self,
|
&mut self,
|
||||||
activity: &SentActivity,
|
activity: &SentActivity,
|
||||||
object: &SharedInboxActivities,
|
object: &SharedInboxActivities,
|
||||||
) -> Result<()> {
|
) -> LemmyResult<()> {
|
||||||
let inbox_urls = self
|
println!("send retry loop {:?}", activity.id);
|
||||||
.get_inbox_urls(activity)
|
let inbox_urls = self.inboxes.get_inbox_urls(activity, &self.context).await?;
|
||||||
.await
|
|
||||||
.context("failed figuring out inbox urls")?;
|
|
||||||
if inbox_urls.is_empty() {
|
if inbox_urls.is_empty() {
|
||||||
trace!("{}: {:?} no inboxes", self.instance.domain, activity.id);
|
trace!("{}: {:?} no inboxes", self.target.domain, activity.id);
|
||||||
self.state.last_successful_id = Some(activity.id);
|
self.state.last_successful_id = Some(activity.id);
|
||||||
self.state.last_successful_published_time = Some(activity.published);
|
self.state.last_successful_published_time = Some(activity.published);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let Some(actor_apub_id) = &activity.actor_apub_id else {
|
let actor = get_actor_cached(
|
||||||
return Ok(()); // activity was inserted before persistent queue was activated
|
&mut self.context.pool(),
|
||||||
};
|
activity.actor_type,
|
||||||
let actor = get_actor_cached(&mut self.context.pool(), activity.actor_type, actor_apub_id)
|
&activity.actor_apub_id,
|
||||||
.await
|
)
|
||||||
.context("failed getting actor instance (was it marked deleted / removed?)")?;
|
.await
|
||||||
|
.context("failed getting actor instance (was it marked deleted / removed?)")?;
|
||||||
|
|
||||||
let object = WithContext::new(object.clone(), FEDERATION_CONTEXT.deref().clone());
|
let object = WithContext::new(object.clone(), FEDERATION_CONTEXT.deref().clone());
|
||||||
let inbox_urls = inbox_urls.into_iter().collect();
|
let inbox_urls = inbox_urls.into_iter().collect();
|
||||||
|
@ -251,7 +218,7 @@ impl InstanceWorker {
|
||||||
let retry_delay: Duration = federate_retry_sleep_duration(self.state.fail_count);
|
let retry_delay: Duration = federate_retry_sleep_duration(self.state.fail_count);
|
||||||
info!(
|
info!(
|
||||||
"{}: retrying {:?} attempt {} with delay {retry_delay:.2?}. ({e})",
|
"{}: retrying {:?} attempt {} with delay {retry_delay:.2?}. ({e})",
|
||||||
self.instance.domain, activity.id, self.state.fail_count
|
self.target.domain, activity.id, self.state.fail_count
|
||||||
);
|
);
|
||||||
self.save_and_send_state().await?;
|
self.save_and_send_state().await?;
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
|
@ -264,105 +231,230 @@ impl InstanceWorker {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Activity send successful, mark instance as alive if it hasn't been updated in a while.
|
// Activity send successful, mark instance as alive if it hasn't been updated in a while.
|
||||||
let updated = self.instance.updated.unwrap_or(self.instance.published);
|
let updated = self.target.updated.unwrap_or(self.target.published);
|
||||||
|
dbg!(&updated);
|
||||||
if updated.add(Days::new(1)) < Utc::now() {
|
if updated.add(Days::new(1)) < Utc::now() {
|
||||||
self.instance.updated = Some(Utc::now());
|
self.target.updated = Some(Utc::now());
|
||||||
|
|
||||||
let form = InstanceForm::builder()
|
let form = InstanceForm::builder()
|
||||||
.domain(self.instance.domain.clone())
|
.domain(self.target.domain.clone())
|
||||||
.updated(Some(naive_now()))
|
.updated(Some(naive_now()))
|
||||||
.build();
|
.build();
|
||||||
Instance::update(&mut self.context.pool(), self.instance.id, form).await?;
|
Instance::update(&mut self.context.pool(), self.target.id, form).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// get inbox urls of sending the given activity to the given instance
|
|
||||||
/// most often this will return 0 values (if instance doesn't care about the activity)
|
|
||||||
/// or 1 value (the shared inbox)
|
|
||||||
/// > 1 values only happens for non-lemmy software
|
|
||||||
async fn get_inbox_urls(&mut self, activity: &SentActivity) -> Result<HashSet<Url>> {
|
|
||||||
let mut inbox_urls: HashSet<Url> = HashSet::new();
|
|
||||||
|
|
||||||
if activity.send_all_instances {
|
|
||||||
if !self.site_loaded {
|
|
||||||
self.site = Site::read_from_instance_id(&mut self.context.pool(), self.instance.id).await?;
|
|
||||||
self.site_loaded = true;
|
|
||||||
}
|
|
||||||
if let Some(site) = &self.site {
|
|
||||||
// Nutomic: Most non-lemmy software wont have a site row. That means it cant handle these
|
|
||||||
// activities. So handling it like this is fine.
|
|
||||||
inbox_urls.insert(site.inbox_url.inner().clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(t) = &activity.send_community_followers_of {
|
|
||||||
if let Some(urls) = self.followed_communities.get(t) {
|
|
||||||
inbox_urls.extend(urls.iter().cloned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
inbox_urls.extend(
|
|
||||||
activity
|
|
||||||
.send_inboxes
|
|
||||||
.iter()
|
|
||||||
.filter_map(std::option::Option::as_ref)
|
|
||||||
.filter(|&u| (u.domain() == Some(&self.instance.domain)))
|
|
||||||
.map(|u| u.inner().clone()),
|
|
||||||
);
|
|
||||||
Ok(inbox_urls)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_communities(&mut self) -> Result<()> {
|
|
||||||
if (Utc::now() - self.last_full_communities_fetch) > *FOLLOW_REMOVALS_RECHECK_DELAY {
|
|
||||||
// process removals every hour
|
|
||||||
(self.followed_communities, self.last_full_communities_fetch) = self
|
|
||||||
.get_communities(self.instance.id, Utc.timestamp_nanos(0))
|
|
||||||
.await?;
|
|
||||||
self.last_incremental_communities_fetch = self.last_full_communities_fetch;
|
|
||||||
}
|
|
||||||
if (Utc::now() - self.last_incremental_communities_fetch) > *FOLLOW_ADDITIONS_RECHECK_DELAY {
|
|
||||||
// process additions every minute
|
|
||||||
let (news, time) = self
|
|
||||||
.get_communities(self.instance.id, self.last_incremental_communities_fetch)
|
|
||||||
.await?;
|
|
||||||
self.followed_communities.extend(news);
|
|
||||||
self.last_incremental_communities_fetch = time;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// get a list of local communities with the remote inboxes on the given instance that cares about
|
|
||||||
/// them
|
|
||||||
async fn get_communities(
|
|
||||||
&mut self,
|
|
||||||
instance_id: InstanceId,
|
|
||||||
last_fetch: DateTime<Utc>,
|
|
||||||
) -> Result<(HashMap<CommunityId, HashSet<Url>>, DateTime<Utc>)> {
|
|
||||||
let new_last_fetch =
|
|
||||||
Utc::now() - chrono::TimeDelta::try_seconds(10).expect("TimeDelta out of bounds"); // update to time before fetch to ensure overlap. subtract 10s to ensure overlap even if
|
|
||||||
// published date is not exact
|
|
||||||
Ok((
|
|
||||||
CommunityFollowerView::get_instance_followed_community_inboxes(
|
|
||||||
&mut self.context.pool(),
|
|
||||||
instance_id,
|
|
||||||
last_fetch,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.fold(HashMap::new(), |mut map, (c, u)| {
|
|
||||||
map.entry(c).or_default().insert(u.into());
|
|
||||||
map
|
|
||||||
}),
|
|
||||||
new_last_fetch,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
async fn save_and_send_state(&mut self) -> Result<()> {
|
async fn save_and_send_state(&mut self) -> Result<()> {
|
||||||
self.last_state_insert = Utc::now();
|
self.last_state_insert = Utc::now();
|
||||||
FederationQueueState::upsert(&mut self.context.pool(), &self.state).await?;
|
FederationQueueState::upsert(&mut self.context.pool(), &self.state).await?;
|
||||||
self.stats_sender.send(FederationQueueStateWithDomain {
|
self.stats_sender.send(FederationQueueStateWithDomain {
|
||||||
state: self.state.clone(),
|
state: self.state.clone(),
|
||||||
domain: self.instance.domain.clone(),
|
domain: self.target.domain.clone(),
|
||||||
})?;
|
})?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[allow(clippy::unwrap_used)]
|
||||||
|
#[allow(clippy::indexing_slicing)]
|
||||||
|
mod test {
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use activitypub_federation::http_signatures::generate_actor_keypair;
|
||||||
|
use actix_web::{rt::System, web, App, HttpResponse, HttpServer};
|
||||||
|
use lemmy_api_common::utils::{generate_inbox_url, generate_shared_inbox_url};
|
||||||
|
use lemmy_db_schema::{
|
||||||
|
newtypes::DbUrl,
|
||||||
|
source::{
|
||||||
|
activity::{ActorType, SentActivityForm},
|
||||||
|
person::{Person, PersonInsertForm},
|
||||||
|
},
|
||||||
|
traits::Crud,
|
||||||
|
};
|
||||||
|
use reqwest::StatusCode;
|
||||||
|
use serde_json::Value;
|
||||||
|
use serial_test::serial;
|
||||||
|
use std::{fs::File, io::BufReader};
|
||||||
|
use tokio::{
|
||||||
|
select,
|
||||||
|
spawn,
|
||||||
|
sync::mpsc::{error::TryRecvError, unbounded_channel, UnboundedReceiver},
|
||||||
|
};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
|
struct Data {
|
||||||
|
context: activitypub_federation::config::Data<LemmyContext>,
|
||||||
|
instance: Instance,
|
||||||
|
person: Person,
|
||||||
|
stats_receiver: UnboundedReceiver<FederationQueueStateWithDomain>,
|
||||||
|
inbox_receiver: UnboundedReceiver<String>,
|
||||||
|
cancel: CancellationToken,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Data {
|
||||||
|
async fn init() -> LemmyResult<Self> {
|
||||||
|
let context = LemmyContext::init_test_context().await;
|
||||||
|
let instance = Instance::read_or_create(&mut context.pool(), "localhost".to_string()).await?;
|
||||||
|
|
||||||
|
let actor_keypair = generate_actor_keypair()?;
|
||||||
|
let actor_id: DbUrl = Url::parse("http://local.com/u/alice")?.into();
|
||||||
|
let person_form = PersonInsertForm::builder()
|
||||||
|
.name("alice".to_string())
|
||||||
|
.actor_id(Some(actor_id.clone()))
|
||||||
|
.private_key(Some(actor_keypair.private_key))
|
||||||
|
.public_key(actor_keypair.public_key)
|
||||||
|
.inbox_url(Some(generate_inbox_url(&actor_id)?))
|
||||||
|
.shared_inbox_url(Some(generate_shared_inbox_url(context.settings())?))
|
||||||
|
.instance_id(instance.id)
|
||||||
|
.build();
|
||||||
|
let person = Person::create(&mut context.pool(), &person_form).await?;
|
||||||
|
|
||||||
|
let cancel = CancellationToken::new();
|
||||||
|
let (stats_sender, stats_receiver) = unbounded_channel();
|
||||||
|
let (inbox_sender, inbox_receiver) = unbounded_channel();
|
||||||
|
|
||||||
|
// listen for received activities in background
|
||||||
|
let cancel_ = cancel.clone();
|
||||||
|
std::thread::spawn(move || System::new().block_on(listen_activities(inbox_sender, cancel_)));
|
||||||
|
|
||||||
|
spawn(InstanceWorker::init_and_loop(
|
||||||
|
instance.clone(),
|
||||||
|
context.reset_request_count(),
|
||||||
|
cancel.clone(),
|
||||||
|
stats_sender,
|
||||||
|
));
|
||||||
|
// wait for startup
|
||||||
|
sleep(WORK_FINISHED_RECHECK_DELAY).await;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
context,
|
||||||
|
instance,
|
||||||
|
person,
|
||||||
|
stats_receiver,
|
||||||
|
inbox_receiver,
|
||||||
|
cancel,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn cleanup(&self) -> LemmyResult<()> {
|
||||||
|
self.cancel.cancel();
|
||||||
|
sleep(WORK_FINISHED_RECHECK_DELAY).await;
|
||||||
|
Instance::delete_all(&mut self.context.pool()).await?;
|
||||||
|
Person::delete(&mut self.context.pool(), self.person.id).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[serial]
|
||||||
|
async fn test_stats() -> LemmyResult<()> {
|
||||||
|
let mut data = Data::init().await?;
|
||||||
|
|
||||||
|
// first receive at startup
|
||||||
|
let rcv = data.stats_receiver.recv().await.unwrap();
|
||||||
|
assert_eq!(data.instance.id, rcv.state.instance_id);
|
||||||
|
assert_eq!(Some(ActivityId(0)), rcv.state.last_successful_id);
|
||||||
|
|
||||||
|
let sent = send_activity(data.person.actor_id.clone(), &data.context).await?;
|
||||||
|
|
||||||
|
// receive for successfully sent activity
|
||||||
|
let inbox_rcv = data.inbox_receiver.recv().await.unwrap();
|
||||||
|
let parsed_activity = serde_json::from_str::<WithContext<Value>>(&inbox_rcv)?;
|
||||||
|
assert_eq!(&sent.data, parsed_activity.inner());
|
||||||
|
|
||||||
|
let rcv = data.stats_receiver.recv().await.unwrap();
|
||||||
|
assert_eq!(data.instance.id, rcv.state.instance_id);
|
||||||
|
assert_eq!(Some(sent.id), rcv.state.last_successful_id);
|
||||||
|
|
||||||
|
data.cleanup().await?;
|
||||||
|
|
||||||
|
// it also sends state on shutdown
|
||||||
|
let rcv = data.stats_receiver.try_recv();
|
||||||
|
assert!(rcv.is_ok());
|
||||||
|
|
||||||
|
// nothing further received
|
||||||
|
let rcv = data.stats_receiver.try_recv();
|
||||||
|
assert_eq!(Some(TryRecvError::Disconnected), rcv.err());
|
||||||
|
let inbox_rcv = data.inbox_receiver.try_recv();
|
||||||
|
assert_eq!(Some(TryRecvError::Empty), inbox_rcv.err());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[serial]
|
||||||
|
async fn test_update_instance() -> LemmyResult<()> {
|
||||||
|
let mut data = Data::init().await?;
|
||||||
|
|
||||||
|
let published = DateTime::from_timestamp_nanos(0);
|
||||||
|
let form = InstanceForm::builder()
|
||||||
|
.domain(data.instance.domain.clone())
|
||||||
|
.published(Some(published))
|
||||||
|
.updated(None)
|
||||||
|
.build();
|
||||||
|
Instance::update(&mut data.context.pool(), data.instance.id, form).await?;
|
||||||
|
|
||||||
|
send_activity(data.person.actor_id.clone(), &data.context).await?;
|
||||||
|
data.inbox_receiver.recv().await.unwrap();
|
||||||
|
|
||||||
|
let instance =
|
||||||
|
Instance::read_or_create(&mut data.context.pool(), data.instance.domain.clone()).await?;
|
||||||
|
|
||||||
|
assert!(instance.updated.is_some());
|
||||||
|
|
||||||
|
data.cleanup().await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn listen_activities(
|
||||||
|
inbox_sender: UnboundedSender<String>,
|
||||||
|
cancel: CancellationToken,
|
||||||
|
) -> LemmyResult<()> {
|
||||||
|
let run = HttpServer::new(move || {
|
||||||
|
App::new()
|
||||||
|
.app_data(actix_web::web::Data::new(inbox_sender.clone()))
|
||||||
|
.route(
|
||||||
|
"/inbox",
|
||||||
|
web::post().to(
|
||||||
|
|inbox_sender: actix_web::web::Data<UnboundedSender<String>>, body: String| async move {
|
||||||
|
inbox_sender.send(body.clone()).unwrap();
|
||||||
|
HttpResponse::new(StatusCode::OK)
|
||||||
|
},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.bind(("127.0.0.1", 8085))?
|
||||||
|
.run();
|
||||||
|
select! {
|
||||||
|
_ = run => {},
|
||||||
|
_ = cancel.cancelled() => {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn send_activity(actor_id: DbUrl, context: &LemmyContext) -> LemmyResult<SentActivity> {
|
||||||
|
// create outgoing activity
|
||||||
|
let file = File::open("../apub/assets/lemmy/activities/voting/like_note.json")?;
|
||||||
|
let reader = BufReader::new(file);
|
||||||
|
let form = SentActivityForm {
|
||||||
|
ap_id: Url::parse("http://local.com/activity/1")?.into(),
|
||||||
|
data: serde_json::from_reader(reader)?,
|
||||||
|
sensitive: false,
|
||||||
|
send_inboxes: vec![Some(Url::parse("http://localhost:8085/inbox")?.into())],
|
||||||
|
send_all_instances: false,
|
||||||
|
send_community_followers_of: None,
|
||||||
|
actor_type: ActorType::Person,
|
||||||
|
actor_apub_id: actor_id,
|
||||||
|
};
|
||||||
|
let sent = SentActivity::create(&mut context.pool(), form).await?;
|
||||||
|
|
||||||
|
sleep(WORK_FINISHED_RECHECK_DELAY * 2).await;
|
||||||
|
|
||||||
|
Ok(sent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -44,7 +44,6 @@ full = [
|
||||||
"dep:enum-map",
|
"dep:enum-map",
|
||||||
"dep:futures",
|
"dep:futures",
|
||||||
"dep:tokio",
|
"dep:tokio",
|
||||||
"dep:openssl",
|
|
||||||
"dep:html2text",
|
"dep:html2text",
|
||||||
"dep:lettre",
|
"dep:lettre",
|
||||||
"dep:uuid",
|
"dep:uuid",
|
||||||
|
@ -74,7 +73,6 @@ uuid = { workspace = true, features = ["serde", "v4"], optional = true }
|
||||||
rosetta-i18n = { workspace = true, optional = true }
|
rosetta-i18n = { workspace = true, optional = true }
|
||||||
tokio = { workspace = true, optional = true }
|
tokio = { workspace = true, optional = true }
|
||||||
urlencoding = { workspace = true, optional = true }
|
urlencoding = { workspace = true, optional = true }
|
||||||
openssl = { version = "0.10.64", optional = true }
|
|
||||||
html2text = { version = "0.12.5", optional = true }
|
html2text = { version = "0.12.5", optional = true }
|
||||||
deser-hjson = { version = "2.2.4", optional = true }
|
deser-hjson = { version = "2.2.4", optional = true }
|
||||||
smart-default = { version = "0.7.1", optional = true }
|
smart-default = { version = "0.7.1", optional = true }
|
||||||
|
|
|
@ -1,26 +0,0 @@
|
||||||
use openssl::{pkey::PKey, rsa::Rsa};
|
|
||||||
use std::io::{Error, ErrorKind};
|
|
||||||
|
|
||||||
pub struct Keypair {
|
|
||||||
pub private_key: String,
|
|
||||||
pub public_key: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate the asymmetric keypair for ActivityPub HTTP signatures.
|
|
||||||
pub fn generate_actor_keypair() -> Result<Keypair, Error> {
|
|
||||||
let rsa = Rsa::generate(2048)?;
|
|
||||||
let pkey = PKey::from_rsa(rsa)?;
|
|
||||||
let public_key = pkey.public_key_to_pem()?;
|
|
||||||
let private_key = pkey.private_key_to_pem_pkcs8()?;
|
|
||||||
let key_to_string = |key| match String::from_utf8(key) {
|
|
||||||
Ok(s) => Ok(s),
|
|
||||||
Err(e) => Err(Error::new(
|
|
||||||
ErrorKind::Other,
|
|
||||||
format!("Failed converting key to string: {e}"),
|
|
||||||
)),
|
|
||||||
};
|
|
||||||
Ok(Keypair {
|
|
||||||
private_key: key_to_string(private_key)?,
|
|
||||||
public_key: key_to_string(public_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -2,7 +2,6 @@ use cfg_if::cfg_if;
|
||||||
|
|
||||||
cfg_if! {
|
cfg_if! {
|
||||||
if #[cfg(feature = "full")] {
|
if #[cfg(feature = "full")] {
|
||||||
pub mod apub;
|
|
||||||
pub mod cache_header;
|
pub mod cache_header;
|
||||||
pub mod email;
|
pub mod email;
|
||||||
pub mod rate_limit;
|
pub mod rate_limit;
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
ALTER TABLE sent_activity
|
||||||
|
ALTER COLUMN actor_apub_id DROP NOT NULL;
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
ALTER TABLE sent_activity
|
||||||
|
ALTER COLUMN actor_apub_id SET NOT NULL;
|
||||||
|
|
Loading…
Reference in New Issue