mas_storage_pg/
lib.rs

1// Copyright 2024, 2025 New Vector Ltd.
2// Copyright 2021-2024 The Matrix.org Foundation C.I.C.
3//
4// SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
5// Please see LICENSE files in the repository root for full details.
6
7//! An implementation of the storage traits for a PostgreSQL database
8//!
9//! This backend uses [`sqlx`] to interact with the database. Most queries are
10//! type-checked, using introspection data recorded in the `sqlx-data.json`
11//! file. This file is generated by the `sqlx` CLI tool, and should be updated
12//! whenever the database schema changes, or new queries are added.
13//!
14//! # Implementing a new repository
15//!
16//! When a new repository is defined in [`mas_storage`], it should be
17//! implemented here, with the PostgreSQL backend.
18//!
19//! A typical implementation will look like this:
20//!
21//! ```rust
22//! # use async_trait::async_trait;
23//! # use ulid::Ulid;
24//! # use rand::RngCore;
25//! # use mas_data_model::Clock;
26//! # use mas_storage_pg::{DatabaseError, ExecuteExt};
27//! # use sqlx::PgConnection;
28//! # use uuid::Uuid;
29//! #
30//! # // A fake data structure, usually defined in mas-data-model
31//! # #[derive(sqlx::FromRow)]
32//! # struct FakeData {
33//! #    id: Ulid,
34//! # }
35//! #
36//! # // A fake repository trait, usually defined in mas-storage
37//! # #[async_trait]
38//! # pub trait FakeDataRepository: Send + Sync {
39//! #     type Error;
40//! #     async fn lookup(&mut self, id: Ulid) -> Result<Option<FakeData>, Self::Error>;
41//! #     async fn add(
42//! #         &mut self,
43//! #         rng: &mut (dyn RngCore + Send),
44//! #         clock: &dyn Clock,
45//! #     ) -> Result<FakeData, Self::Error>;
46//! # }
47//! #
48//! /// An implementation of [`FakeDataRepository`] for a PostgreSQL connection
49//! pub struct PgFakeDataRepository<'c> {
50//!     conn: &'c mut PgConnection,
51//! }
52//!
53//! impl<'c> PgFakeDataRepository<'c> {
54//!     /// Create a new [`FakeDataRepository`] from an active PostgreSQL connection
55//!     pub fn new(conn: &'c mut PgConnection) -> Self {
56//!         Self { conn }
57//!     }
58//! }
59//!
60//! #[derive(sqlx::FromRow)]
61//! struct FakeDataLookup {
62//!     fake_data_id: Uuid,
63//! }
64//!
65//! impl From<FakeDataLookup> for FakeData {
66//!     fn from(value: FakeDataLookup) -> Self {
67//!         Self {
68//!             id: value.fake_data_id.into(),
69//!         }
70//!     }
71//! }
72//!
73//! #[async_trait]
74//! impl<'c> FakeDataRepository for PgFakeDataRepository<'c> {
75//!     type Error = DatabaseError;
76//!
77//!     #[tracing::instrument(
78//!         name = "db.fake_data.lookup",
79//!         skip_all,
80//!         fields(
81//!             db.query.text,
82//!             fake_data.id = %id,
83//!         ),
84//!         err,
85//!     )]
86//!     async fn lookup(&mut self, id: Ulid) -> Result<Option<FakeData>, Self::Error> {
87//!         // Note: here we would use the macro version instead, but it's not possible here in
88//!         // this documentation example
89//!         let res: Option<FakeDataLookup> = sqlx::query_as(
90//!             r#"
91//!                 SELECT fake_data_id
92//!                 FROM fake_data
93//!                 WHERE fake_data_id = $1
94//!             "#,
95//!         )
96//!         .bind(Uuid::from(id))
97//!         .traced()
98//!         .fetch_optional(&mut *self.conn)
99//!         .await?;
100//!
101//!         let Some(res) = res else { return Ok(None) };
102//!
103//!         Ok(Some(res.into()))
104//!     }
105//!
106//!     #[tracing::instrument(
107//!         name = "db.fake_data.add",
108//!         skip_all,
109//!         fields(
110//!             db.query.text,
111//!             fake_data.id,
112//!         ),
113//!         err,
114//!     )]
115//!     async fn add(
116//!         &mut self,
117//!         rng: &mut (dyn RngCore + Send),
118//!         clock: &dyn Clock,
119//!     ) -> Result<FakeData, Self::Error> {
120//!         let created_at = clock.now();
121//!         let id = Ulid::from_datetime_with_source(created_at.into(), rng);
122//!         tracing::Span::current().record("fake_data.id", tracing::field::display(id));
123//!
124//!         // Note: here we would use the macro version instead, but it's not possible here in
125//!         // this documentation example
126//!         sqlx::query(
127//!             r#"
128//!                 INSERT INTO fake_data (id)
129//!                 VALUES ($1)
130//!             "#,
131//!         )
132//!         .bind(Uuid::from(id))
133//!         .traced()
134//!         .execute(&mut *self.conn)
135//!         .await?;
136//!
137//!         Ok(FakeData {
138//!             id,
139//!         })
140//!     }
141//! }
142//! ```
143//!
144//! A few things to note with the implementation:
145//!
146//!  - All methods are traced, with an explicit, somewhat consistent name.
147//!  - The SQL statement is included as attribute, by declaring a
148//!    `db.query.text` attribute on the tracing span, and then calling
149//!    [`ExecuteExt::traced`].
150//!  - The IDs are all [`Ulid`], and generated from the clock and the random
151//!    number generated passed as parameters. The generated IDs are recorded in
152//!    the span.
153//!  - The IDs are stored as [`Uuid`] in PostgreSQL, so conversions are required
154//!  - "Not found" errors are handled by returning `Ok(None)` instead of an
155//!    error.
156//!
157//! [`Ulid`]: ulid::Ulid
158//! [`Uuid`]: uuid::Uuid
159
160#![deny(clippy::future_not_send, missing_docs)]
161#![allow(clippy::module_name_repetitions, clippy::blocks_in_conditions)]
162
163use std::collections::{BTreeMap, BTreeSet, HashSet};
164
165use ::tracing::{Instrument, debug, info, info_span, warn};
166use opentelemetry_semantic_conventions::trace::DB_QUERY_TEXT;
167use sqlx::{
168    Either, PgConnection,
169    migrate::{AppliedMigration, Migrate, MigrateError, Migration, Migrator},
170    postgres::{PgAdvisoryLock, PgAdvisoryLockKey},
171};
172
173pub mod app_session;
174pub mod compat;
175pub mod oauth2;
176pub mod personal;
177pub mod queue;
178pub mod upstream_oauth2;
179pub mod user;
180
181mod errors;
182pub(crate) mod filter;
183pub(crate) mod iden;
184pub(crate) mod pagination;
185pub(crate) mod policy_data;
186pub(crate) mod repository;
187pub(crate) mod telemetry;
188pub(crate) mod tracing;
189
190pub(crate) use self::errors::DatabaseInconsistencyError;
191pub use self::{
192    errors::DatabaseError,
193    repository::{PgRepository, PgRepositoryFactory},
194    tracing::ExecuteExt,
195};
196
197/// Embedded migrations in the binary
198pub static MIGRATOR: Migrator = sqlx::migrate!();
199
200fn available_migrations() -> BTreeMap<i64, &'static Migration> {
201    MIGRATOR.iter().map(|m| (m.version, m)).collect()
202}
203
204/// This is the list of migrations we've removed from the migration history but
205/// might have been applied in the past
206#[allow(clippy::inconsistent_digit_grouping)]
207const ALLOWED_MISSING_MIGRATIONS: &[i64] = &[
208    // https://github.com/matrix-org/matrix-authentication-service/pull/1585
209    20220709_210445,
210    20230330_210841,
211    20230408_110421,
212];
213
214fn allowed_missing_migrations() -> BTreeSet<i64> {
215    ALLOWED_MISSING_MIGRATIONS.iter().copied().collect()
216}
217
218/// This is a list of possible additional checksums from previous versions of
219/// migrations. The checksum we store in the database is 48 bytes long. We're
220/// not really concerned with partial hash collisions, and to avoid this file to
221/// be completely unreadable, we only store the upper 16 bytes of that hash.
222#[allow(clippy::inconsistent_digit_grouping)]
223const ALLOWED_ALTERNATE_CHECKSUMS: &[(i64, u128)] = &[
224    // https://github.com/element-hq/matrix-authentication-service/pull/5300
225    (20250410_000000, 0x8811_c3ef_dbee_8c00_5b49_25da_5d55_9c3f),
226    (20250410_000001, 0x7990_37b3_2193_8a5d_c72f_bccd_95fd_82e5),
227    (20250410_000002, 0xf2b8_f120_deae_27e7_60d0_79a3_0b77_eea3),
228    (20250410_000003, 0x06be_fc2b_cedc_acf4_b981_02c7_b40c_c469),
229    (20250410_000004, 0x0a90_9c6a_dba7_545c_10d9_60eb_6d30_2f50),
230    (20250410_000006, 0xcc7f_5152_6497_5729_d94b_be0d_9c95_8316),
231    (20250410_000007, 0x12e7_cfab_a017_a5a5_4f2c_18fa_541c_ce62),
232    (20250410_000008, 0x171d_62e5_ee1a_f0d9_3639_6c5a_277c_54cd),
233    (20250410_000009, 0xb1a0_93c7_6645_92ad_df45_b395_57bb_a281),
234    (20250410_000010, 0x8089_86ac_7cff_8d86_2850_d287_cdb1_2b57),
235    (20250410_000011, 0x8d9d_3fae_02c9_3d3f_81e4_6242_2b39_b5b8),
236    (20250410_000012, 0x9805_1372_41aa_d5b0_ebe1_ba9d_28c7_faf6),
237    (20250410_000013, 0x7291_9a97_e4d1_0d45_1791_6e8c_3f2d_e34d),
238    (20250410_000014, 0x811d_f965_8127_e168_4aa2_f177_a4e6_f077),
239    (20250410_000015, 0xa639_0780_aab7_d60d_5fcb_771d_13ed_73ee),
240    (20250410_000016, 0x22b6_e909_6de4_39e3_b2b9_c684_7417_fe07),
241    (20250410_000017, 0x9dfe_b6d3_89e4_e509_651b_2793_8d8d_cd32),
242    (20250410_000018, 0x638f_bdbc_2276_5094_020b_cec1_ab95_c07f),
243    (20250410_000019, 0xa283_84bc_5fd5_7cbd_b5fb_b5fe_0255_6845),
244    (20250410_000020, 0x17d1_54b1_7c6e_fc48_61dd_da3d_f8a5_9546),
245    (20250410_000022, 0xbc36_af82_994a_6f93_8aca_a46b_fc3c_ffde),
246    (20250410_000023, 0x54ec_3b07_ac79_443b_9e18_a2b3_2d17_5ab9),
247    (20250410_000024, 0x8ab4_4f80_00b6_58b2_d757_c40f_bc72_3d87),
248    (20250410_000025, 0x5dc4_2ff3_3042_2f45_046d_10af_ab3a_b583),
249    (20250410_000026, 0x5263_c547_0b64_6425_5729_48b2_ce84_7cad),
250    (20250410_000027, 0x0aad_cb50_1d6a_7794_9017_d24d_55e7_1b9d),
251    (20250410_000028, 0x8fc1_92f8_68df_ca4e_3e2b_cddf_bc12_cffe),
252    (20250410_000029, 0x416c_9446_b6a3_1b49_2940_a8ac_c1c2_665a),
253    (20250410_000030, 0x83a5_e51e_25a6_77fb_2b79_6ea5_db1e_364f),
254    (20250410_000031, 0xfa18_a707_9438_dbc7_2cde_b5f1_ee21_5c7e),
255    (20250410_000032, 0xd669_662e_8930_838a_b142_c3fa_7b39_d2a0),
256    (20250410_000033, 0x4019_1053_cabc_191c_c02e_9aa9_407c_0de5),
257    (20250410_000034, 0xdd59_e595_24e6_4dad_c5f7_fef2_90b8_df57),
258    (20250410_000035, 0x09b4_ea53_2da4_9c39_eb10_db33_6a6d_608b),
259    (20250410_000036, 0x3ca5_9c78_8480_e342_d729_907c_d293_2049),
260    (20250410_000037, 0xc857_2a10_450b_0612_822c_2b86_535a_ea7d),
261    (20250410_000038, 0x1642_39da_9c3b_d9fd_b1e1_72b1_db78_b978),
262    (20250410_000039, 0xdd70_b211_6016_bb84_0d84_f04e_eb8a_59d9),
263    (20250410_000040, 0xe435_ead6_c363_a0b6_e048_dd85_0ecb_9499),
264    (20250410_000041, 0xe9f3_122f_70d4_9839_c818_4b18_0192_ae26),
265    (20250410_000043, 0xec5e_1400_483d_c4bf_6014_aba4_ffc3_6236),
266    (20250410_000044, 0x4750_5eba_4095_6664_78d0_27f9_64bf_64f4),
267    (20250410_000045, 0x9a53_bd70_4cad_2bf1_61d4_f143_0c82_681d),
268    (20250410_121612, 0x25f0_9d20_a897_df18_162d_1c47_b68e_81bd),
269    (20250602_212101, 0xd1a8_782c_b3f0_5045_3f46_49a0_bab0_822b),
270    (20250708_155857, 0xb78e_6957_a588_c16a_d292_a0c7_cae9_f290),
271    (20250915_092635, 0x6854_d58b_99d7_3ac5_82f8_25e5_b1c3_cc0b),
272    (20251127_145951, 0x3bcd_d92e_8391_2a2c_8a18_1d76_354f_96c6),
273];
274
275fn alternate_checksums_map() -> BTreeMap<i64, HashSet<u128>> {
276    let mut map = BTreeMap::new();
277    for (version, checksum) in ALLOWED_ALTERNATE_CHECKSUMS {
278        map.entry(*version)
279            .or_insert_with(HashSet::new)
280            .insert(*checksum);
281    }
282    map
283}
284
285/// Load the list of applied migrations into a map.
286///
287/// It's important to use a [`BTreeMap`] so that the migrations are naturally
288/// ordered by version.
289async fn applied_migrations_map(
290    conn: &mut PgConnection,
291) -> Result<BTreeMap<i64, AppliedMigration>, MigrateError> {
292    let applied_migrations = conn
293        .list_applied_migrations()
294        .await?
295        .into_iter()
296        .map(|m| (m.version, m))
297        .collect();
298
299    Ok(applied_migrations)
300}
301
302/// Checks if the migration table exists
303async fn migration_table_exists(conn: &mut PgConnection) -> Result<bool, sqlx::Error> {
304    sqlx::query_scalar!(
305        r#"
306            SELECT EXISTS (
307                SELECT 1
308                FROM information_schema.tables
309                WHERE table_name = '_sqlx_migrations'
310            ) AS "exists!"
311        "#,
312    )
313    .fetch_one(conn)
314    .await
315}
316
317/// Run the migrations on the given connection
318///
319/// This function acquires an advisory lock on the database to ensure that only
320/// one migrator is running at a time.
321///
322/// # Errors
323///
324/// This function returns an error if the migration fails.
325#[::tracing::instrument(name = "db.migrate", skip_all, err)]
326pub async fn migrate(conn: &mut PgConnection) -> Result<(), MigrateError> {
327    // Get the database name and use it to derive an advisory lock key. This
328    // is the same lock key used by SQLx default migrator, so that it works even
329    // with older versions of MAS, and when running through `cargo sqlx migrate run`
330    let database_name = sqlx::query_scalar!(r#"SELECT current_database() as "current_database!""#)
331        .fetch_one(&mut *conn)
332        .await
333        .map_err(MigrateError::from)?;
334
335    let lock =
336        PgAdvisoryLock::with_key(PgAdvisoryLockKey::BigInt(generate_lock_id(&database_name)));
337
338    // Try to acquire the migration lock in a loop.
339    //
340    // The reason we do that with a `try_acquire` is because in Postgres, `CREATE
341    // INDEX CONCURRENTLY` will *not* complete whilst an advisory lock is being
342    // acquired on another connection. This then means that if we run two
343    // migration process at the same time, one of them will go through and block
344    // on concurrent index creations, because the other will get stuck trying to
345    // acquire this lock.
346    //
347    // To avoid this, we use `try_acquire`/`pg_advisory_lock_try` in a loop, which
348    // will fail immediately if the lock is held by another connection, allowing
349    // potential 'CREATE INDEX CONCURRENTLY' statements to complete.
350    let mut backoff = std::time::Duration::from_millis(250);
351    let mut conn = conn;
352    let mut locked_connection = loop {
353        match lock.try_acquire(conn).await? {
354            Either::Left(guard) => break guard,
355            Either::Right(conn_) => {
356                warn!(
357                    "Another process is already running migrations on the database, waiting {duration}s and trying again…",
358                    duration = backoff.as_secs_f32()
359                );
360                tokio::time::sleep(backoff).await;
361                backoff = std::cmp::min(backoff * 2, std::time::Duration::from_secs(5));
362                conn = conn_;
363            }
364        }
365    };
366
367    // Creates the migration table if missing
368    // We check if the table exists before calling `ensure_migrations_table` to
369    // avoid the pesky 'relation "_sqlx_migrations" already exists, skipping' notice
370    if !migration_table_exists(locked_connection.as_mut()).await? {
371        locked_connection.as_mut().ensure_migrations_table().await?;
372    }
373
374    for migration in pending_migrations(locked_connection.as_mut()).await? {
375        info!(
376            "Applying migration {version}: {description}",
377            version = migration.version,
378            description = migration.description
379        );
380        locked_connection
381            .as_mut()
382            .apply(migration)
383            .instrument(info_span!(
384                "db.migrate.run_migration",
385                db.migration.version = migration.version,
386                db.migration.description = &*migration.description,
387                { DB_QUERY_TEXT } = &*migration.sql,
388            ))
389            .await?;
390    }
391
392    locked_connection.release_now().await?;
393
394    Ok(())
395}
396
397/// Get the list of pending migrations
398///
399/// # Errors
400///
401/// This function returns an error if there is a problem checking the applied
402/// migrations
403pub async fn pending_migrations(
404    conn: &mut PgConnection,
405) -> Result<Vec<&'static Migration>, MigrateError> {
406    // Load the maps of available migrations, applied migrations, migrations that
407    // are allowed to be missing, alternate checksums for migrations that changed
408    let available_migrations = available_migrations();
409    let allowed_missing = allowed_missing_migrations();
410    let alternate_checksums = alternate_checksums_map();
411    let applied_migrations = if migration_table_exists(&mut *conn).await? {
412        applied_migrations_map(&mut *conn).await?
413    } else {
414        BTreeMap::new()
415    };
416
417    // Check that all applied migrations are still valid
418    for applied_migration in applied_migrations.values() {
419        // Check that we know about the applied migration
420        if let Some(migration) = available_migrations.get(&applied_migration.version) {
421            // Check the migration checksum
422            if applied_migration.checksum != migration.checksum {
423                // The checksum we have in the database doesn't match the one we
424                // have embedded. This might be because a migration was
425                // intentionally changed, so we check the alternate checksums
426                if let Some(alternates) = alternate_checksums.get(&applied_migration.version) {
427                    // This converts the first 16 bytes of the checksum into a u128
428                    let Some(applied_checksum_prefix) = applied_migration
429                        .checksum
430                        .get(..16)
431                        .and_then(|bytes| bytes.try_into().ok())
432                        .map(u128::from_be_bytes)
433                    else {
434                        return Err(MigrateError::ExecuteMigration(
435                            sqlx::Error::InvalidArgument(
436                                "checksum stored in database is invalid".to_owned(),
437                            ),
438                            applied_migration.version,
439                        ));
440                    };
441
442                    if !alternates.contains(&applied_checksum_prefix) {
443                        warn!(
444                            "The database has a migration applied ({version}) which has known alternative checksums {alternates:x?}, but none of them matched {applied_checksum_prefix:x}",
445                            version = applied_migration.version,
446                        );
447                        return Err(MigrateError::VersionMismatch(applied_migration.version));
448                    }
449                } else {
450                    return Err(MigrateError::VersionMismatch(applied_migration.version));
451                }
452            }
453        } else if allowed_missing.contains(&applied_migration.version) {
454            // The migration is missing, but allowed to be missing
455            debug!(
456                "The database has a migration applied ({version}) that doesn't exist anymore, but it was intentionally removed",
457                version = applied_migration.version
458            );
459        } else {
460            // The migration is missing, warn about it
461            warn!(
462                "The database has a migration applied ({version}) that doesn't exist anymore! This should not happen, unless rolling back to an older version of MAS.",
463                version = applied_migration.version
464            );
465        }
466    }
467
468    Ok(available_migrations
469        .values()
470        .copied()
471        .filter(|migration| {
472            !migration.migration_type.is_down_migration()
473                && !applied_migrations.contains_key(&migration.version)
474        })
475        .collect())
476}
477
478// Copied from the sqlx source code, so that we generate the same lock ID
479fn generate_lock_id(database_name: &str) -> i64 {
480    const CRC_IEEE: crc::Crc<u32> = crc::Crc::<u32>::new(&crc::CRC_32_ISO_HDLC);
481    // 0x3d32ad9e chosen by fair dice roll
482    0x3d32_ad9e * i64::from(CRC_IEEE.checksum(database_name.as_bytes()))
483}