dullbananas 2024-05-18 12:19:42 +05:30 committed by GitHub
commit 48238735b5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 3286 additions and 112 deletions

View File

@ -133,18 +133,6 @@ steps:
- diff config/defaults.hjson config/defaults_current.hjson
when: *slow_check_paths
check_diesel_schema:
image: *rust_image
environment:
CARGO_HOME: .cargo_home
DATABASE_URL: postgres://lemmy:password@database:5432/lemmy
commands:
- <<: *install_diesel_cli
- diesel migration run
- diesel print-schema --config-file=diesel.toml > tmp.schema
- diff tmp.schema crates/db_schema/src/schema.rs
when: *slow_check_paths
check_db_perf_tool:
image: *rust_image
environment:
@ -175,6 +163,19 @@ steps:
- mv target/debug/lemmy_server target/lemmy_server
when: *slow_check_paths
check_diesel_schema:
image: *rust_image
environment:
LEMMY_DATABASE_URL: postgres://lemmy:password@database:5432/lemmy
RUST_BACKTRACE: "1"
CARGO_HOME: .cargo_home
commands:
- target/lemmy_server migration run
- <<: *install_diesel_cli
- diesel print-schema --config-file=diesel.toml > tmp.schema
- diff tmp.schema crates/db_schema/src/schema.rs
when: *slow_check_paths
cargo_test:
image: *rust_image
environment:
@ -186,44 +187,6 @@ steps:
- cargo test --workspace --no-fail-fast
when: *slow_check_paths
check_diesel_migration:
# TODO: use willsquire/diesel-cli image when shared libraries become optional in lemmy_server
image: *rust_image
environment:
LEMMY_DATABASE_URL: postgres://lemmy:password@database:5432/lemmy
RUST_BACKTRACE: "1"
CARGO_HOME: .cargo_home
DATABASE_URL: postgres://lemmy:password@database:5432/lemmy
PGUSER: lemmy
PGPASSWORD: password
PGHOST: database
PGDATABASE: lemmy
commands:
# Install diesel_cli
- <<: *install_diesel_cli
# Run all migrations
- diesel migration run
# Dump schema to before.sqldump (PostgreSQL apt repo is used to prevent pg_dump version mismatch error)
- apt update && apt install -y lsb-release
- sh -c 'echo "deb https://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
- wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
- apt update && apt install -y postgresql-client-16
- psql -c "DROP SCHEMA IF EXISTS r CASCADE;"
- pg_dump --no-owner --no-privileges --no-table-access-method --schema-only --no-sync -f before.sqldump
# Make sure that the newest migration is revertable without the `r` schema
- diesel migration redo
# Run schema setup twice, which fails on the 2nd time if `DROP SCHEMA IF EXISTS r CASCADE` drops the wrong things
- alias lemmy_schema_setup="target/lemmy_server --disable-scheduled-tasks --disable-http-server --disable-activity-sending"
- lemmy_schema_setup
- lemmy_schema_setup
# Make sure that the newest migration is revertable with the `r` schema
- diesel migration redo
# Check for changes in the schema, which would be caused by an incorrect migration
- psql -c "DROP SCHEMA IF EXISTS r CASCADE;"
- pg_dump --no-owner --no-privileges --no-table-access-method --schema-only --no-sync -f after.sqldump
- diff before.sqldump after.sqldump
when: *slow_check_paths
run_federation_tests:
image: node:20-bookworm-slim
environment:

View File

@ -11,11 +11,6 @@ extern crate diesel_derive_newtype;
#[macro_use]
extern crate diesel_derive_enum;
// this is used in tests
#[cfg(feature = "full")]
#[macro_use]
extern crate diesel_migrations;
#[cfg(feature = "full")]
#[macro_use]
extern crate async_trait;
@ -45,7 +40,7 @@ pub mod traits;
pub mod utils;
#[cfg(feature = "full")]
mod schema_setup;
pub mod schema_setup;
use serde::{Deserialize, Serialize};
use strum_macros::{Display, EnumString};

View File

@ -800,6 +800,13 @@ diesel::table! {
}
}
diesel::table! {
previously_run_sql (id) {
id -> Bool,
content -> Text,
}
}
diesel::table! {
private_message (id) {
id -> Int4,
@ -1092,6 +1099,7 @@ diesel::allow_tables_to_appear_in_same_query!(
post_read,
post_report,
post_saved,
previously_run_sql,
private_message,
private_message_report,
received_activity,

View File

@ -1,64 +1,252 @@
use anyhow::Context;
use diesel::{connection::SimpleConnection, Connection, PgConnection};
use diesel_migrations::{EmbeddedMigrations, MigrationHarness};
use lemmy_utils::error::LemmyError;
use crate::schema::previously_run_sql;
use anyhow::{anyhow, Context};
use diesel::{
connection::SimpleConnection,
migration::{Migration, MigrationSource, MigrationVersion},
pg::Pg,
select,
update,
Connection,
ExpressionMethods,
NullableExpressionMethods,
PgConnection,
QueryDsl,
RunQueryDsl,
};
use diesel_migrations::MigrationHarness;
use lemmy_utils::error::{LemmyError, LemmyResult};
use std::time::Instant;
use tracing::info;
const MIGRATIONS: EmbeddedMigrations = embed_migrations!();
// In production, include migrations in the binary
#[cfg(not(debug_assertions))]
fn get_migration_source() -> diesel_migrations::EmbeddedMigrations {
// Using `const` here is required by the borrow checker
const MIGRATIONS: diesel_migrations::EmbeddedMigrations = diesel_migrations::embed_migrations!();
MIGRATIONS
}
// Avoid recompiling when migrations are changed
#[cfg(debug_assertions)]
fn get_migration_source() -> diesel_migrations::FileBasedMigrations {
diesel_migrations::FileBasedMigrations::find_migrations_directory()
.expect("failed to find migrations dir")
}
/// This SQL code sets up the `r` schema, which contains things that can be safely dropped and replaced
/// instead of being changed using migrations. It may not create or modify things outside of the `r` schema
/// (indicated by `r.` before the name), unless a comment says otherwise.
///
/// Currently, this code is only run after the server starts and there's at least 1 pending migration
/// to run. This means every time you change something here, you must also create a migration (a blank
/// up.sql file works fine). This behavior will be removed when we implement a better way to avoid
/// useless schema updates and locks.
///
/// If you add something that depends on something (such as a table) created in a new migration, then down.sql
/// must use `CASCADE` when dropping it. This doesn't need to be fixed in old migrations because the
/// "replaceable-schema" migration runs `DROP SCHEMA IF EXISTS r CASCADE` in down.sql.
const REPLACEABLE_SCHEMA: &[&str] = &[
"DROP SCHEMA IF EXISTS r CASCADE;",
"CREATE SCHEMA r;",
include_str!("../replaceable_schema/utils.sql"),
include_str!("../replaceable_schema/triggers.sql"),
];
pub fn run(db_url: &str) -> Result<(), LemmyError> {
// Migrations don't support async connection
struct MigrationHarnessWrapper<'a> {
conn: &'a mut PgConnection,
}
impl<'a> MigrationHarness<Pg> for MigrationHarnessWrapper<'a> {
fn run_migration(
&mut self,
migration: &dyn Migration<Pg>,
) -> diesel::migration::Result<MigrationVersion<'static>> {
let start_time = Instant::now();
let result = self.conn.run_migration(migration);
let duration = start_time.elapsed().as_millis();
let name = migration.name();
info!("{duration}ms run {name}");
result
}
fn revert_migration(
&mut self,
migration: &dyn Migration<Pg>,
) -> diesel::migration::Result<MigrationVersion<'static>> {
let start_time = Instant::now();
let result = self.conn.revert_migration(migration);
let duration = start_time.elapsed().as_millis();
let name = migration.name();
info!("{duration}ms revert {name}");
result
}
fn applied_migrations(&mut self) -> diesel::migration::Result<Vec<MigrationVersion<'static>>> {
self.conn.applied_migrations()
}
}
// TODO: remove when diesel either adds MigrationSource impl for references or changes functions to take reference
#[derive(Clone, Copy)]
struct MigrationSourceRef<T>(
// If this was `&T`, then the derive macros would add `Clone` and `Copy` bounds for `T`
T,
);
impl<'a, T: MigrationSource<Pg>> MigrationSource<Pg> for MigrationSourceRef<&'a T> {
fn migrations(&self) -> diesel::migration::Result<Vec<Box<dyn Migration<Pg>>>> {
self.0.migrations()
}
}
#[derive(Default)]
pub struct Options {
enable_forbid_diesel_cli_trigger: bool,
revert: bool,
revert_amount: Option<u64>,
redo_after_revert: bool,
}
impl Options {
#[cfg(test)]
fn enable_forbid_diesel_cli_trigger(mut self) -> Self {
self.enable_forbid_diesel_cli_trigger = true;
self
}
pub fn revert(mut self, amount: Option<u64>) -> Self {
self.revert = true;
self.revert_amount = amount;
self
}
pub fn redo(mut self, amount: Option<u64>) -> Self {
self.redo_after_revert = true;
self.revert(amount)
}
}
pub fn run(db_url: &str, options: Options) -> LemmyResult<()> {
// Migrations don't support async connection, and this function doesn't need to be async
let mut conn = PgConnection::establish(db_url).with_context(|| "Error connecting to database")?;
// Run all pending migrations except for the newest one, then run the newest one in the same transaction
// as `REPLACEABLE_SCHEMA`. This code will be becone less hacky when the conditional setup of things in
// `REPLACEABLE_SCHEMA` is done without using the number of pending migrations.
info!("Running Database migrations (This may take a long time)...");
let migrations = conn
.pending_migrations(MIGRATIONS)
.map_err(|e| anyhow::anyhow!("Couldn't determine pending migrations: {e}"))?;
for migration in migrations.iter().rev().skip(1).rev() {
conn
.run_migration(migration)
.map_err(|e| anyhow::anyhow!("Couldn't run DB Migrations: {e}"))?;
}
conn.transaction::<_, LemmyError, _>(|conn| {
if let Some(migration) = migrations.last() {
// Migration is run with a savepoint since there's already a transaction
conn
.run_migration(migration)
.map_err(|e| anyhow::anyhow!("Couldn't run DB Migrations: {e}"))?;
} else if !cfg!(debug_assertions) {
// In production, skip running `REPLACEABLE_SCHEMA` to avoid locking things in the schema. In
// CI, always run it because `diesel migration` commands would otherwise prevent it.
let new_sql = REPLACEABLE_SCHEMA.join("\n");
let migration_source = get_migration_source();
let migration_source_ref = MigrationSourceRef(&migration_source);
// If possible, skip locking the migrations table and recreating the "r" schema, so
// lemmy_server processes in a horizontally scaled setup can start without causing locks
if !(options.revert
|| conn
.has_pending_migration(migration_source_ref)
.map_err(|e| anyhow!("Couldn't check pending migrations: {e}"))?)
{
// The condition above implies that the migration that creates the previously_run_sql table was already run
let sql_unchanged: bool = select(
previously_run_sql::table
.select(previously_run_sql::content)
.single_value()
.assume_not_null()
.eq(&new_sql),
)
.get_result(&mut conn)?;
if sql_unchanged {
return Ok(());
}
conn
.batch_execute(&REPLACEABLE_SCHEMA.join("\n"))
.context("Couldn't run SQL files in crates/db_schema/replaceable_schema")?;
}
conn.transaction::<_, LemmyError, _>(|conn| {
let mut wrapper = MigrationHarnessWrapper { conn };
// * Prevent other lemmy_server processes from running this transaction simultaneously by repurposing
// the table created by `MigrationHarness::pending_migrations` as a lock target (this doesn't block
// normal use of the table)
// * Drop `r` schema, so migrations don't need to be made to work both with and without things in
// it existing
// * Disable the trigger that prevents the Diesel CLI from running migrations
info!("Waiting for lock...");
let enable_migrations = if options.enable_forbid_diesel_cli_trigger {
""
} else {
"SET LOCAL lemmy.enable_migrations TO 'on';"
};
wrapper.conn.batch_execute(&format!("LOCK __diesel_schema_migrations IN SHARE UPDATE EXCLUSIVE MODE;DROP SCHEMA IF EXISTS r CASCADE;{enable_migrations}"))?;
info!("Running Database migrations (This may take a long time)...");
(|| {
if options.revert {
if let Some(amount) = options.revert_amount {
for _ in 0..amount {
wrapper.revert_last_migration(migration_source_ref)?;
}
if options.redo_after_revert {
for _ in 0..amount {
wrapper.run_next_migration(migration_source_ref)?;
}
}
} else {
wrapper.revert_all_migrations(migration_source_ref)?;
if options.redo_after_revert {
wrapper.run_pending_migrations(migration_source_ref)?;
}
}
} else {
wrapper.run_pending_migrations(migration_source_ref)?;
}
diesel::migration::Result::Ok(())
})().map_err(|e| anyhow!("Couldn't run DB Migrations: {e}"))?;
// Run replaceable_schema if newest migration was applied
if !(options.revert && !options.redo_after_revert) {
wrapper.conn
.batch_execute(&new_sql)
.context("Couldn't run SQL files in crates/db_schema/replaceable_schema")?;
let num_rows_updated = update(previously_run_sql::table)
.set(previously_run_sql::content.eq(new_sql))
.execute(wrapper.conn)?;
debug_assert_eq!(num_rows_updated, 1);
}
Ok(())
})?;
info!("Database migrations complete.");
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use lemmy_utils::{error::LemmyResult, settings::SETTINGS};
use serial_test::serial;
#[test]
#[serial]
fn test_schema_setup() -> LemmyResult<()> {
let url = SETTINGS.get_database_url();
let mut conn = PgConnection::establish(&url)?;
// Start with consistent state by dropping everything
conn.batch_execute("DROP OWNED BY CURRENT_USER;")?;
// Run and revert all migrations, ensuring there's no mistakes in any down.sql file
run(&url, Options::default())?;
run(&url, Options::default().revert(None))?;
// TODO also don't drop r, and maybe just directly call the migrationharness method here
assert!(matches!(
run(&url, Options::default().enable_forbid_diesel_cli_trigger()),
Err(e) if e.to_string().contains("lemmy_server")
));
// Previous run shouldn't stop this one from working
run(&url, Options::default())?;
Ok(())
}
}

View File

@ -427,7 +427,7 @@ pub async fn build_db_pool() -> LemmyResult<ActualDbPool> {
}))
.build()?;
crate::schema_setup::run(&db_url)?;
crate::schema_setup::run(&db_url, Default::default())?;
Ok(pool)
}

View File

@ -1,5 +1,11 @@
DROP TABLE activity;
DROP VIEW community_view, community_mview;
DROP MATERIALIZED VIEW community_aggregates_mview;
DROP VIEW community_aggregates_view;
ALTER TABLE user_
DROP COLUMN actor_id,
DROP COLUMN private_key,
@ -15,3 +21,126 @@ ALTER TABLE community
DROP COLUMN local,
DROP COLUMN last_refreshed_at;
-- Views are the same as before, except `*` does not reference the dropped columns
CREATE VIEW community_aggregates_view AS
SELECT
c.*,
(
SELECT
name
FROM
user_ u
WHERE
c.creator_id = u.id) AS creator_name,
(
SELECT
avatar
FROM
user_ u
WHERE
c.creator_id = u.id) AS creator_avatar,
(
SELECT
name
FROM
category ct
WHERE
c.category_id = ct.id) AS category_name,
(
SELECT
count(*)
FROM
community_follower cf
WHERE
cf.community_id = c.id) AS number_of_subscribers,
(
SELECT
count(*)
FROM
post p
WHERE
p.community_id = c.id) AS number_of_posts,
(
SELECT
count(*)
FROM
comment co,
post p
WHERE
c.id = p.community_id
AND p.id = co.post_id) AS number_of_comments,
hot_rank ((
SELECT
count(*)
FROM community_follower cf
WHERE
cf.community_id = c.id), c.published) AS hot_rank
FROM
community c;
CREATE MATERIALIZED VIEW community_aggregates_mview AS
SELECT
*
FROM
community_aggregates_view;
CREATE UNIQUE INDEX idx_community_aggregates_mview_id ON community_aggregates_mview (id);
CREATE VIEW community_view AS
with all_community AS (
SELECT
ca.*
FROM
community_aggregates_view ca
)
SELECT
ac.*,
u.id AS user_id,
(
SELECT
cf.id::boolean
FROM
community_follower cf
WHERE
u.id = cf.user_id
AND ac.id = cf.community_id) AS subscribed
FROM
user_ u
CROSS JOIN all_community ac
UNION ALL
SELECT
ac.*,
NULL AS user_id,
NULL AS subscribed
FROM
all_community ac;
CREATE VIEW community_mview AS
with all_community AS (
SELECT
ca.*
FROM
community_aggregates_mview ca
)
SELECT
ac.*,
u.id AS user_id,
(
SELECT
cf.id::boolean
FROM
community_follower cf
WHERE
u.id = cf.user_id
AND ac.id = cf.community_id) AS subscribed
FROM
user_ u
CROSS JOIN all_community ac
UNION ALL
SELECT
ac.*,
NULL AS user_id,
NULL AS subscribed
FROM
all_community ac;

View File

@ -1,3 +1,15 @@
DROP VIEW post_mview;
DROP MATERIALIZED VIEW post_aggregates_mview;
DROP VIEW post_view, post_aggregates_view;
DROP VIEW user_mention_view, comment_view, user_mention_mview, reply_view, comment_mview;
DROP MATERIALIZED VIEW comment_aggregates_mview;
DROP VIEW comment_aggregates_view;
ALTER TABLE post
DROP COLUMN ap_id,
DROP COLUMN local;
@ -6,3 +18,526 @@ ALTER TABLE comment
DROP COLUMN ap_id,
DROP COLUMN local;
-- Views are the same as before, except `*` does not reference the dropped columns
CREATE VIEW post_aggregates_view AS
SELECT
p.*,
(
SELECT
u.banned
FROM
user_ u
WHERE
p.creator_id = u.id) AS banned,
(
SELECT
cb.id::bool
FROM
community_user_ban cb
WHERE
p.creator_id = cb.user_id
AND p.community_id = cb.community_id) AS banned_from_community,
(
SELECT
name
FROM
user_
WHERE
p.creator_id = user_.id) AS creator_name,
(
SELECT
avatar
FROM
user_
WHERE
p.creator_id = user_.id) AS creator_avatar,
(
SELECT
name
FROM
community
WHERE
p.community_id = community.id) AS community_name,
(
SELECT
removed
FROM
community c
WHERE
p.community_id = c.id) AS community_removed,
(
SELECT
deleted
FROM
community c
WHERE
p.community_id = c.id) AS community_deleted,
(
SELECT
nsfw
FROM
community c
WHERE
p.community_id = c.id) AS community_nsfw,
(
SELECT
count(*)
FROM
comment
WHERE
comment.post_id = p.id) AS number_of_comments,
coalesce(sum(pl.score), 0) AS score,
count(
CASE WHEN pl.score = 1 THEN
1
ELSE
NULL
END) AS upvotes,
count(
CASE WHEN pl.score = - 1 THEN
1
ELSE
NULL
END) AS downvotes,
hot_rank (coalesce(sum(pl.score), 0), (
CASE WHEN (p.published < ('now'::timestamp - '1 month'::interval)) THEN
p.published -- Prevents necro-bumps
ELSE
greatest (c.recent_comment_time, p.published)
END)) AS hot_rank,
(
CASE WHEN (p.published < ('now'::timestamp - '1 month'::interval)) THEN
p.published -- Prevents necro-bumps
ELSE
greatest (c.recent_comment_time, p.published)
END) AS newest_activity_time
FROM
post p
LEFT JOIN post_like pl ON p.id = pl.post_id
LEFT JOIN (
SELECT
post_id,
max(published) AS recent_comment_time
FROM
comment
GROUP BY
1) c ON p.id = c.post_id
GROUP BY
p.id,
c.recent_comment_time;
CREATE VIEW post_view AS
with all_post AS (
SELECT
pa.*
FROM
post_aggregates_view pa
)
SELECT
ap.*,
u.id AS user_id,
coalesce(pl.score, 0) AS my_vote,
(
SELECT
cf.id::bool
FROM
community_follower cf
WHERE
u.id = cf.user_id
AND cf.community_id = ap.community_id) AS subscribed,
(
SELECT
pr.id::bool
FROM
post_read pr
WHERE
u.id = pr.user_id
AND pr.post_id = ap.id) AS read,
(
SELECT
ps.id::bool
FROM
post_saved ps
WHERE
u.id = ps.user_id
AND ps.post_id = ap.id) AS saved
FROM
user_ u
CROSS JOIN all_post ap
LEFT JOIN post_like pl ON u.id = pl.user_id
AND ap.id = pl.post_id
UNION ALL
SELECT
ap.*,
NULL AS user_id,
NULL AS my_vote,
NULL AS subscribed,
NULL AS read,
NULL AS saved
FROM
all_post ap;
CREATE MATERIALIZED VIEW post_aggregates_mview AS
SELECT
*
FROM
post_aggregates_view;
CREATE UNIQUE INDEX idx_post_aggregates_mview_id ON post_aggregates_mview (id);
CREATE VIEW post_mview AS
with all_post AS (
SELECT
pa.*
FROM
post_aggregates_mview pa
)
SELECT
ap.*,
u.id AS user_id,
coalesce(pl.score, 0) AS my_vote,
(
SELECT
cf.id::bool
FROM
community_follower cf
WHERE
u.id = cf.user_id
AND cf.community_id = ap.community_id) AS subscribed,
(
SELECT
pr.id::bool
FROM
post_read pr
WHERE
u.id = pr.user_id
AND pr.post_id = ap.id) AS read,
(
SELECT
ps.id::bool
FROM
post_saved ps
WHERE
u.id = ps.user_id
AND ps.post_id = ap.id) AS saved
FROM
user_ u
CROSS JOIN all_post ap
LEFT JOIN post_like pl ON u.id = pl.user_id
AND ap.id = pl.post_id
UNION ALL
SELECT
ap.*,
NULL AS user_id,
NULL AS my_vote,
NULL AS subscribed,
NULL AS read,
NULL AS saved
FROM
all_post ap;
CREATE VIEW comment_aggregates_view AS
SELECT
c.*,
(
SELECT
community_id
FROM
post p
WHERE
p.id = c.post_id), (
SELECT
co.name
FROM
post p,
community co
WHERE
p.id = c.post_id
AND p.community_id = co.id) AS community_name,
(
SELECT
u.banned
FROM
user_ u
WHERE
c.creator_id = u.id) AS banned,
(
SELECT
cb.id::bool
FROM
community_user_ban cb,
post p
WHERE
c.creator_id = cb.user_id
AND p.id = c.post_id
AND p.community_id = cb.community_id) AS banned_from_community,
(
SELECT
name
FROM
user_
WHERE
c.creator_id = user_.id) AS creator_name,
(
SELECT
avatar
FROM
user_
WHERE
c.creator_id = user_.id) AS creator_avatar,
coalesce(sum(cl.score), 0) AS score,
count(
CASE WHEN cl.score = 1 THEN
1
ELSE
NULL
END) AS upvotes,
count(
CASE WHEN cl.score = - 1 THEN
1
ELSE
NULL
END) AS downvotes,
hot_rank (coalesce(sum(cl.score), 0), c.published) AS hot_rank
FROM
comment c
LEFT JOIN comment_like cl ON c.id = cl.comment_id
GROUP BY
c.id;
CREATE MATERIALIZED VIEW comment_aggregates_mview AS
SELECT
*
FROM
comment_aggregates_view;
CREATE UNIQUE INDEX idx_comment_aggregates_mview_id ON comment_aggregates_mview (id);
CREATE VIEW comment_mview AS
with all_comment AS (
SELECT
ca.*
FROM
comment_aggregates_mview ca
)
SELECT
ac.*,
u.id AS user_id,
coalesce(cl.score, 0) AS my_vote,
(
SELECT
cf.id::boolean
FROM
community_follower cf
WHERE
u.id = cf.user_id
AND ac.community_id = cf.community_id) AS subscribed,
(
SELECT
cs.id::bool
FROM
comment_saved cs
WHERE
u.id = cs.user_id
AND cs.comment_id = ac.id) AS saved
FROM
user_ u
CROSS JOIN all_comment ac
LEFT JOIN comment_like cl ON u.id = cl.user_id
AND ac.id = cl.comment_id
UNION ALL
SELECT
ac.*,
NULL AS user_id,
NULL AS my_vote,
NULL AS subscribed,
NULL AS saved
FROM
all_comment ac;
CREATE VIEW reply_view AS
with closereply AS (
SELECT
c2.id,
c2.creator_id AS sender_id,
c.creator_id AS recipient_id
FROM
comment c
INNER JOIN comment c2 ON c.id = c2.parent_id
WHERE
c2.creator_id != c.creator_id
-- Do union where post is null
UNION
SELECT
c.id,
c.creator_id AS sender_id,
p.creator_id AS recipient_id
FROM
comment c,
post p
WHERE
c.post_id = p.id
AND c.parent_id IS NULL
AND c.creator_id != p.creator_id
)
SELECT
cv.*,
closereply.recipient_id
FROM
comment_mview cv,
closereply
WHERE
closereply.id = cv.id;
CREATE VIEW user_mention_mview AS
with all_comment AS (
SELECT
ca.*
FROM
comment_aggregates_mview ca
)
SELECT
ac.id,
um.id AS user_mention_id,
ac.creator_id,
ac.post_id,
ac.parent_id,
ac.content,
ac.removed,
um.read,
ac.published,
ac.updated,
ac.deleted,
ac.community_id,
ac.community_name,
ac.banned,
ac.banned_from_community,
ac.creator_name,
ac.creator_avatar,
ac.score,
ac.upvotes,
ac.downvotes,
ac.hot_rank,
u.id AS user_id,
coalesce(cl.score, 0) AS my_vote,
(
SELECT
cs.id::bool
FROM
comment_saved cs
WHERE
u.id = cs.user_id
AND cs.comment_id = ac.id) AS saved,
um.recipient_id
FROM
user_ u
CROSS JOIN all_comment ac
LEFT JOIN comment_like cl ON u.id = cl.user_id
AND ac.id = cl.comment_id
LEFT JOIN user_mention um ON um.comment_id = ac.id
UNION ALL
SELECT
ac.id,
um.id AS user_mention_id,
ac.creator_id,
ac.post_id,
ac.parent_id,
ac.content,
ac.removed,
um.read,
ac.published,
ac.updated,
ac.deleted,
ac.community_id,
ac.community_name,
ac.banned,
ac.banned_from_community,
ac.creator_name,
ac.creator_avatar,
ac.score,
ac.upvotes,
ac.downvotes,
ac.hot_rank,
NULL AS user_id,
NULL AS my_vote,
NULL AS saved,
um.recipient_id
FROM
all_comment ac
LEFT JOIN user_mention um ON um.comment_id = ac.id;
CREATE VIEW comment_view AS
with all_comment AS (
SELECT
ca.*
FROM
comment_aggregates_view ca
)
SELECT
ac.*,
u.id AS user_id,
coalesce(cl.score, 0) AS my_vote,
(
SELECT
cf.id::boolean
FROM
community_follower cf
WHERE
u.id = cf.user_id
AND ac.community_id = cf.community_id) AS subscribed,
(
SELECT
cs.id::bool
FROM
comment_saved cs
WHERE
u.id = cs.user_id
AND cs.comment_id = ac.id) AS saved
FROM
user_ u
CROSS JOIN all_comment ac
LEFT JOIN comment_like cl ON u.id = cl.user_id
AND ac.id = cl.comment_id
UNION ALL
SELECT
ac.*,
NULL AS user_id,
NULL AS my_vote,
NULL AS subscribed,
NULL AS saved
FROM
all_comment ac;
CREATE VIEW user_mention_view AS
SELECT
c.id,
um.id AS user_mention_id,
c.creator_id,
c.post_id,
c.parent_id,
c.content,
c.removed,
um.read,
c.published,
c.updated,
c.deleted,
c.community_id,
c.community_name,
c.banned,
c.banned_from_community,
c.creator_name,
c.creator_avatar,
c.score,
c.upvotes,
c.downvotes,
c.hot_rank,
c.user_id,
c.my_vote,
c.saved,
um.recipient_id
FROM
user_mention um,
comment_view c
WHERE
um.comment_id = c.id;

View File

@ -1,3 +1,5 @@
DROP VIEW user_alias_1, user_alias_2;
ALTER TABLE community
DROP COLUMN followers_url;
@ -13,3 +15,16 @@ ALTER TABLE user_
ALTER TABLE user_
DROP COLUMN shared_inbox_url;
-- Views are the same as before, except `*` does not reference the dropped columns
CREATE VIEW user_alias_1 AS
SELECT
*
FROM
user_;
CREATE VIEW user_alias_2 AS
SELECT
*
FROM
user_;

View File

@ -34,3 +34,5 @@ INSERT INTO category (name)
ALTER TABLE community
ADD category_id int REFERENCES category ON UPDATE CASCADE ON DELETE CASCADE NOT NULL DEFAULT 1;
CREATE INDEX idx_community_category ON community (category_id);

View File

@ -260,6 +260,8 @@ FROM
WHERE
lu.person_id = u.id;
CREATE UNIQUE INDEX idx_user_email_lower ON user_ (lower(email));
CREATE VIEW user_alias_1 AS
SELECT
*

View File

@ -1,6 +1,6 @@
ALTER TABLE post
DROP COLUMN embed_url;
DROP COLUMN embed_video_url;
ALTER TABLE post
ADD COLUMN embed_video_url text;
ADD COLUMN embed_html text;

View File

@ -65,3 +65,15 @@ CREATE TRIGGER post_aggregates_stickied
WHEN (OLD.stickied IS DISTINCT FROM NEW.stickied)
EXECUTE PROCEDURE post_aggregates_stickied ();
CREATE INDEX idx_post_aggregates_stickied_newest_comment_time ON post_aggregates (stickied DESC, newest_comment_time DESC);
CREATE INDEX idx_post_aggregates_stickied_comments ON post_aggregates (stickied DESC, comments DESC);
CREATE INDEX idx_post_aggregates_stickied_hot ON post_aggregates (stickied DESC, hot_rank (score, published) DESC, published DESC);
CREATE INDEX idx_post_aggregates_stickied_active ON post_aggregates (stickied DESC, hot_rank (score, newest_comment_time_necro) DESC, newest_comment_time_necro DESC);
CREATE INDEX idx_post_aggregates_stickied_score ON post_aggregates (stickied DESC, score DESC);
CREATE INDEX idx_post_aggregates_stickied_published ON post_aggregates (stickied DESC, published DESC);

View File

@ -8,7 +8,7 @@ CREATE INDEX idx_post_aggregates_comments ON post_aggregates (comments DESC);
CREATE INDEX idx_post_aggregates_hot ON post_aggregates (hot_rank (score, published) DESC, published DESC);
CREATE INDEX idx_post_aggregates_active ON post_aggregates (hot_rank (score, newest_comment_time) DESC, newest_comment_time DESC);
CREATE INDEX idx_post_aggregates_active ON post_aggregates (hot_rank (score, newest_comment_time_necro) DESC, newest_comment_time_necro DESC);
CREATE INDEX idx_post_aggregates_score ON post_aggregates (score DESC);

View File

@ -1,3 +1,6 @@
ALTER TABLE local_user
ALTER default_sort_type DROP DEFAULT;
-- update the default sort type
UPDATE
local_user
@ -29,6 +32,9 @@ ALTER TABLE local_user
ALTER COLUMN default_sort_type TYPE sort_type_enum
USING default_sort_type::text::sort_type_enum;
ALTER TABLE local_user
ALTER default_sort_type SET DEFAULT 'Active';
-- drop the old enum
DROP TYPE sort_type_enum__;

View File

@ -1,3 +1,6 @@
ALTER TABLE local_user
ALTER default_sort_type DROP DEFAULT;
-- update the default sort type
UPDATE
local_user
@ -32,6 +35,9 @@ ALTER TABLE local_user
ALTER COLUMN default_sort_type TYPE sort_type_enum
USING default_sort_type::text::sort_type_enum;
ALTER TABLE local_user
ALTER default_sort_type SET DEFAULT 'Active';
-- drop the old enum
DROP TYPE sort_type_enum__;

View File

@ -26,3 +26,5 @@ DROP TABLE sent_activity;
DROP TABLE received_activity;
CREATE UNIQUE INDEX idx_activity_ap_id ON activity (ap_id);

View File

@ -14,3 +14,7 @@ WHERE
ALTER TABLE local_user
DROP COLUMN admin;
CREATE INDEX idx_person_admin ON person (admin)
WHERE
admin;

View File

@ -328,7 +328,9 @@ ALTER TABLE captcha_answer
ALTER COLUMN published TYPE timestamp
USING published;
CREATE OR REPLACE FUNCTION hot_rank (score numeric, published timestamp without time zone)
DROP FUNCTION hot_rank;
CREATE FUNCTION hot_rank (score numeric, published timestamp without time zone)
RETURNS integer
AS $$
DECLARE

View File

@ -0,0 +1,2 @@
DROP FUNCTION forbid_diesel_cli CASCADE;

View File

@ -0,0 +1,29 @@
-- This trigger prevents using the Diesel CLI to run or revert migrations, so the custom migration runner
-- can drop and recreate the `r` schema for new migrations.
--
-- This migration being seperate from the next migration (created in the same PR) guarantees that the
-- Diesel CLI will fail to bring the number of pending migrations to 0, which is one of the conditions
-- required to skip running replaceable_schema.
--
-- If the Diesel CLI could run or revert migrations, this scenario would be possible:
--
-- Run `diesel migration redo` when the newest migration has a new table with triggers. End up with triggers
-- being dropped and not replaced because triggers are created outside of up.sql. The custom migration runner
-- sees that there are no pending migrations and the value in the `previously_run_sql` trigger is correct, so
-- it doesn't rebuild the `r` schema. There is now incorrect behavior but no error messages.
CREATE FUNCTION forbid_diesel_cli ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
BEGIN
IF current_setting('lemmy.enable_migrations', TRUE) IS DISTINCT FROM 'on' THEN
RAISE 'migrations must be managed using lemmy_server instead of diesel CLI';
END IF;
RETURN NULL;
END;
$$;
CREATE TRIGGER forbid_diesel_cli
BEFORE INSERT OR UPDATE OR DELETE OR TRUNCATE ON __diesel_schema_migrations
EXECUTE FUNCTION forbid_diesel_cli ();

View File

@ -0,0 +1,2 @@
DROP TABLE previously_run_sql;

View File

@ -0,0 +1,12 @@
DROP SCHEMA IF EXISTS r CASCADE;
CREATE TABLE previously_run_sql (
-- For compatibility with Diesel
id boolean PRIMARY KEY,
-- Too big to be used as primary key
content text NOT NULL
);
INSERT INTO previously_run_sql (id, content)
VALUES (TRUE, '');

View File

@ -9,8 +9,8 @@ cd $CWD/../
source scripts/start_dev_db.sh
diesel migration run
pg_dump --no-owner --no-privileges --no-table-access-method --schema-only --no-sync -f schema.sqldump
cargo run --package lemmy_server -- migration run
pg_dump --no-owner --no-privileges --no-table-access-method --schema-only --exclude-schema=r --no-sync -f schema.sqldump
pg_ctl stop
rm -rf $PGDATA

View File

@ -23,7 +23,7 @@ use actix_web::{
HttpServer,
};
use actix_web_prom::PrometheusMetricsBuilder;
use clap::Parser;
use clap::{Parser, Subcommand};
use lemmy_api_common::{
context::LemmyContext,
lemmy_db_views::structs::SiteView,
@ -40,7 +40,7 @@ use lemmy_apub::{
VerifyUrlData,
FEDERATION_HTTP_FETCH_LIMIT,
};
use lemmy_db_schema::{source::secret::Secret, utils::build_db_pool};
use lemmy_db_schema::{schema_setup, source::secret::Secret, utils::build_db_pool};
use lemmy_federate::{start_stop_federation_workers_cancellable, Opts};
use lemmy_routes::{feeds, images, nodeinfo, webfinger};
use lemmy_utils::{
@ -70,6 +70,7 @@ use url::Url;
about = "A link aggregator for the fediverse",
long_about = "A link aggregator for the fediverse.\n\nThis is the Lemmy backend API server. This will connect to a PostgreSQL database, run any pending migrations and start accepting API requests."
)]
#[command(args_conflicts_with_subcommands = true)]
pub struct CmdArgs {
/// Don't run scheduled tasks.
///
@ -103,6 +104,23 @@ pub struct CmdArgs {
/// If set, make sure to set --federate-process-index differently for each.
#[arg(long, default_value_t = 1)]
federate_process_count: i32,
#[command(subcommand)]
subcommand: Option<CmdSubcommand>,
}
#[derive(Subcommand, Debug)]
enum CmdSubcommand {
/// Do something with migrations, then exit.
Migration {
#[command(subcommand)]
subcommand: MigrationSubcommand,
},
}
#[derive(Subcommand, Debug)]
enum MigrationSubcommand {
/// Run all pending migrations.
Run,
}
/// Placing the main function in lib.rs allows other crates to import it and embed Lemmy
@ -110,6 +128,16 @@ pub async fn start_lemmy_server(args: CmdArgs) -> LemmyResult<()> {
// Print version number to log
println!("Lemmy v{VERSION}");
if let Some(CmdSubcommand::Migration { subcommand }) = args.subcommand {
let options = match subcommand {
MigrationSubcommand::Run => schema_setup::Options::default(),
};
schema_setup::run(&SETTINGS.get_database_url(), options)?;
return Ok(());
}
// return error 503 while running db migrations and startup tasks
let mut startup_server_handle = None;
if !args.disable_http_server {