From b64861394d108448f209cb297e0150b11cbe4090 Mon Sep 17 00:00:00 2001 From: Pietro Albini Date: Wed, 18 Dec 2019 17:17:30 +0100 Subject: [PATCH 1/9] db.file: allow overriding the region This is useful when testing against play.minio.io, as it requires the us-east-1 region name. --- src/db/file.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/db/file.rs b/src/db/file.rs index d5e09e1f0..2f72d6dfc 100644 --- a/src/db/file.rs +++ b/src/db/file.rs @@ -133,7 +133,8 @@ fn s3_client() -> Option { rusoto_core::request::HttpClient::new().unwrap(), creds, std::env::var("S3_ENDPOINT").ok().map(|e| Region::Custom { - name: "us-west-1".to_owned(), + name: std::env::var("S3_REGION") + .unwrap_or_else(|| "us-west-1".to_owned()), endpoint: e, }).unwrap_or(Region::UsWest1), )) From 21030d8a40be3ba054886b314ff7faa709c4479e Mon Sep 17 00:00:00 2001 From: Pietro Albini Date: Wed, 18 Dec 2019 17:18:06 +0100 Subject: [PATCH 2/9] db.file: show s3 upload errors Without this commit debugging S3 upload errors is a pain, as there is no way to see them. --- src/db/file.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/db/file.rs b/src/db/file.rs index 2f72d6dfc..69038c374 100644 --- a/src/db/file.rs +++ b/src/db/file.rs @@ -232,15 +232,19 @@ pub fn add_path_into_database>(conn: &Connection, if !futures.is_empty() { attempts += 1; - if rt.block_on(::futures::future::join_all(futures)).is_ok() { - // this batch was successful, start another batch if there are still more files - batch_size = cmp::min(to_upload.len(), MAX_CONCURRENT_UPLOADS); - currently_uploading = to_upload.drain(..batch_size).collect(); - attempts = 0; - } else { - // if any futures error, leave `currently_uploading` in tact so that we can retry the batch - if attempts > 2 { - panic!("failed to upload 3 times, exiting"); + match rt.block_on(::futures::future::join_all(futures)) { + Ok(_) => { + // this batch was successful, start another batch if there are still more files + batch_size = cmp::min(to_upload.len(), MAX_CONCURRENT_UPLOADS); + currently_uploading = to_upload.drain(..batch_size).collect(); + attempts = 0; + }, + Err(err) => { + error!("failed to upload to s3: {:?}", err); + // if any futures error, leave `currently_uploading` in tact so that we can retry the batch + if attempts > 2 { + panic!("failed to upload 3 times, exiting"); + } } } } else { From bd908560275ba164344ec7d3adca273ba19855b4 Mon Sep 17 00:00:00 2001 From: Pietro Albini Date: Wed, 18 Dec 2019 17:44:01 +0100 Subject: [PATCH 3/9] db: add the `cratesfyi database delete-crate` command --- src/bin/cratesfyi.rs | 9 +++- src/db/delete_crate.rs | 115 +++++++++++++++++++++++++++++++++++++++++ src/db/file.rs | 12 +++-- src/db/mod.rs | 2 + 4 files changed, 132 insertions(+), 6 deletions(-) create mode 100644 src/db/delete_crate.rs diff --git a/src/bin/cratesfyi.rs b/src/bin/cratesfyi.rs index 2b06d0048..ba5167c79 100644 --- a/src/bin/cratesfyi.rs +++ b/src/bin/cratesfyi.rs @@ -111,8 +111,11 @@ pub fn main() { .subcommand(SubCommand::with_name("update-release-activity")) .about("Updates montly release activity \ chart") - .subcommand(SubCommand::with_name("update-search-index")) + .subcommand(SubCommand::with_name("update-search-index") .about("Updates search index")) + .subcommand(SubCommand::with_name("delete-crate") + .about("Removes a whole crate from the database") + .arg(Arg::with_name("CRATE_NAME").help("Name of the crate to delete")))) .subcommand(SubCommand::with_name("queue") .about("Interactions with the build queue") .subcommand(SubCommand::with_name("add") @@ -225,6 +228,10 @@ pub fn main() { count, total ); } + } else if let Some(matches) = matches.subcommand_matches("delete-crate") { + let name = matches.value_of("CRATE_NAME").expect("missing crate name"); + let conn = db::connect_db().expect("failed to connect to the database"); + db::delete_crate(&conn, &name).expect("failed to delete the crate"); } } else if let Some(matches) = matches.subcommand_matches("start-web-server") { start_web_server(Some(matches.value_of("SOCKET_ADDR").unwrap_or("0.0.0.0:3000"))); diff --git a/src/db/delete_crate.rs b/src/db/delete_crate.rs new file mode 100644 index 000000000..39def363c --- /dev/null +++ b/src/db/delete_crate.rs @@ -0,0 +1,115 @@ +use super::file::{s3_client, S3_BUCKET_NAME}; +use failure::Error; +use postgres::Connection; +use rusoto_s3::{DeleteObjectsRequest, ListObjectsV2Request, ObjectIdentifier, S3Client, S3}; + +/// List of directories in docs.rs's underlying storage (either the database or S3) containing a +/// subdirectory named after the crate. Those subdirectories will be deleted. +static STORAGE_PATHS_TO_DELETE: &[&str] = &["rustdoc", "sources"]; + +#[derive(Debug, Fail)] +enum CrateDeletionError { + #[fail(display = "crate is missing: {}", _0)] + MissingCrate(String), +} + +pub fn delete_crate(conn: &Connection, name: &str) -> Result<(), Error> { + let crate_id_res = conn.query("SELECT id FROM crates WHERE name = $1", &[&name])?; + let crate_id = if crate_id_res.is_empty() { + return Err(CrateDeletionError::MissingCrate(name.into()).into()); + } else { + crate_id_res.get(0).get("id") + }; + + delete_from_database(conn, name, crate_id)?; + if let Some(s3) = s3_client() { + delete_from_s3(&s3, name)?; + } + + Ok(()) +} + +fn delete_from_database(conn: &Connection, name: &str, crate_id: i32) -> Result<(), Error> { + let transaction = conn.transaction()?; + + transaction.execute( + "DELETE FROM sandbox_overrides WHERE crate_name = $1", + &[&name], + )?; + transaction.execute( + "DELETE FROM author_rels WHERE rid IN (SELECT id FROM releases WHERE crate_id = $1);", + &[&crate_id], + )?; + transaction.execute( + "DELETE FROM owner_rels WHERE cid IN (SELECT id FROM releases WHERE crate_id = $1);", + &[&crate_id], + )?; + transaction.execute( + "DELETE FROM keyword_rels WHERE rid IN (SELECT id FROM releases WHERE crate_id = $1);", + &[&crate_id], + )?; + transaction.execute( + "DELETE FROM builds WHERE rid IN (SELECT id FROM releases WHERE crate_id = $1);", + &[&crate_id], + )?; + transaction.execute("DELETE FROM releases WHERE crate_id = $1;", &[&crate_id])?; + transaction.execute("DELETE FROM crates WHERE id = $1;", &[&crate_id])?; + + for prefix in STORAGE_PATHS_TO_DELETE { + transaction.execute( + "DELETE FROM files WHERE path LIKE $1;", + &[&format!("{}/{}/%", prefix, name)], + )?; + } + + // Transactions automatically rollback when not committing, so if any of the previous queries + // fail the whole transaction will be aborted. + transaction.commit()?; + Ok(()) +} + +fn delete_from_s3(s3: &S3Client, name: &str) -> Result<(), Error> { + for prefix in STORAGE_PATHS_TO_DELETE { + delete_prefix_from_s3(s3, &format!("{}/{}/", prefix, name))?; + } + Ok(()) +} + +fn delete_prefix_from_s3(s3: &S3Client, name: &str) -> Result<(), Error> { + let mut continuation_token = None; + loop { + let list = s3 + .list_objects_v2(ListObjectsV2Request { + bucket: S3_BUCKET_NAME.into(), + prefix: Some(name.into()), + continuation_token, + ..ListObjectsV2Request::default() + }) + .sync()?; + + let to_delete = list + .contents + .unwrap_or_else(Vec::new) + .into_iter() + .filter_map(|o| o.key) + .map(|key| ObjectIdentifier { + key, + version_id: None, + }) + .collect::>(); + s3.delete_objects(DeleteObjectsRequest { + bucket: S3_BUCKET_NAME.into(), + delete: rusoto_s3::Delete { + objects: to_delete, + quiet: None, + }, + ..DeleteObjectsRequest::default() + }) + .sync()?; + + continuation_token = list.continuation_token; + if continuation_token.is_none() { + return Ok(()); + } + } +} diff --git a/src/db/file.rs b/src/db/file.rs index 69038c374..262d96f89 100644 --- a/src/db/file.rs +++ b/src/db/file.rs @@ -20,6 +20,8 @@ use rusoto_credential::DefaultCredentialsProvider; const MAX_CONCURRENT_UPLOADS: usize = 1000; +pub(super) static S3_BUCKET_NAME: &str = "rust-docs-rs"; + fn get_file_list_from_dir>(path: P, files: &mut Vec) @@ -69,7 +71,7 @@ pub struct Blob { pub fn get_path(conn: &Connection, path: &str) -> Option { if let Some(client) = s3_client() { let res = client.get_object(GetObjectRequest { - bucket: "rust-docs-rs".into(), + bucket: S3_BUCKET_NAME.into(), key: path.into(), ..Default::default() }).sync(); @@ -116,7 +118,7 @@ pub fn get_path(conn: &Connection, path: &str) -> Option { } } -fn s3_client() -> Option { +pub(super) fn s3_client() -> Option { // If AWS keys aren't configured, then presume we should use the DB exclusively // for file storage. if std::env::var_os("AWS_ACCESS_KEY_ID").is_none() && std::env::var_os("FORCE_S3").is_none() { @@ -134,7 +136,7 @@ fn s3_client() -> Option { creds, std::env::var("S3_ENDPOINT").ok().map(|e| Region::Custom { name: std::env::var("S3_REGION") - .unwrap_or_else(|| "us-west-1".to_owned()), + .unwrap_or_else(|_| "us-west-1".to_owned()), endpoint: e, }).unwrap_or(Region::UsWest1), )) @@ -203,7 +205,7 @@ pub fn add_path_into_database>(conn: &Connection, if let Some(client) = &client { futures.push(client.put_object(PutObjectRequest { - bucket: "rust-docs-rs".into(), + bucket: S3_BUCKET_NAME.into(), key: bucket_path.clone(), body: Some(content.clone().into()), content_type: Some(mime.clone()), @@ -295,7 +297,7 @@ pub fn move_to_s3(conn: &Connection, n: usize) -> Result { let content: Vec = row.get(2); let path_1 = path.clone(); futures.push(client.put_object(PutObjectRequest { - bucket: "rust-docs-rs".into(), + bucket: S3_BUCKET_NAME.into(), key: path.clone(), body: Some(content.into()), content_type: Some(mime), diff --git a/src/db/mod.rs b/src/db/mod.rs index a83ce902a..616947528 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -4,6 +4,7 @@ pub(crate) use self::add_package::add_package_into_database; pub(crate) use self::add_package::add_build_into_database; pub use self::file::add_path_into_database; pub use self::migrate::migrate; +pub use self::delete_crate::delete_crate; use postgres::{Connection, TlsMode}; use postgres::error::Error; @@ -14,6 +15,7 @@ use r2d2_postgres; mod add_package; pub mod file; mod migrate; +mod delete_crate; /// Connects to database From 2df41e1354c9374b3c85a61828d214046d5c67dd Mon Sep 17 00:00:00 2001 From: Pietro Albini Date: Thu, 19 Dec 2019 10:48:06 +0100 Subject: [PATCH 4/9] db: show errors happening when deleting a file from S3 --- src/db/delete_crate.rs | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/src/db/delete_crate.rs b/src/db/delete_crate.rs index 39def363c..98ceeed1d 100644 --- a/src/db/delete_crate.rs +++ b/src/db/delete_crate.rs @@ -97,15 +97,22 @@ fn delete_prefix_from_s3(s3: &S3Client, name: &str) -> Result<(), Error> { version_id: None, }) .collect::>(); - s3.delete_objects(DeleteObjectsRequest { - bucket: S3_BUCKET_NAME.into(), - delete: rusoto_s3::Delete { - objects: to_delete, - quiet: None, - }, - ..DeleteObjectsRequest::default() - }) - .sync()?; + let resp = s3 + .delete_objects(DeleteObjectsRequest { + bucket: S3_BUCKET_NAME.into(), + delete: rusoto_s3::Delete { + objects: to_delete, + quiet: None, + }, + ..DeleteObjectsRequest::default() + }) + .sync()?; + if let Some(errs) = resp.errors { + for err in &errs { + log::error!("error deleting file from s3: {:?}", err); + } + failure::bail!("uploading to s3 failed"); + } continuation_token = list.continuation_token; if continuation_token.is_none() { From 82cc5cdda68d329b29d6da8bf9adc9ff5a11e420 Mon Sep 17 00:00:00 2001 From: Pietro Albini Date: Thu, 19 Dec 2019 14:34:36 +0100 Subject: [PATCH 5/9] db: add database tests --- .github/workflows/ci.yml | 8 +++ README.md | 13 ++++ src/bin/cratesfyi.rs | 3 +- src/db/add_package.rs | 103 ++++++++++++++++++++--------- src/db/delete_crate.rs | 57 ++++++++++++++++ src/db/migrate.rs | 91 +++++++++++++++++-------- src/db/mod.rs | 3 +- src/docbuilder/rustwide_builder.rs | 42 +++++++----- src/lib.rs | 2 + src/test/fakes.rs | 93 ++++++++++++++++++++++++++ src/test/mod.rs | 43 ++++++++++++ src/utils/cargo_metadata.rs | 11 +++ src/utils/mod.rs | 3 + 13 files changed, 397 insertions(+), 75 deletions(-) create mode 100644 src/test/fakes.rs create mode 100644 src/test/mod.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 76b5209a7..840411d2b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,6 +16,14 @@ jobs: - name: Install stable Rust run: rustup update stable && rustup default stable + - name: Install PostgreSQL + run: | + sudo apt-get update && DEBIAN_FRONTEND=noninteractive sudo apt-get install -y postgresql + sudo systemctl start postgresql + sudo -u postgres createuser $(whoami) -w + sudo -u postgres createdb $(whoami) -O $(whoami) + echo "::set-env name=CRATESFYI_DATABASE_URL::postgresql://$(whoami)@%2Fvar%2Frun%2Fpostgresql/$(whoami)" + - name: Build docs.rs run: cargo build diff --git a/README.md b/README.md index 7b87bf6c4..f1c9f2716 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,19 @@ If you need to store big files in the repository's directory it's recommended to put them in the `ignored/` subdirectory, which is ignored both by git and Docker. +### Running tests + +Tests are run outside of the docker-compose environment, and can be run with: + +``` +cargo test +``` + +Some tests require access to the database. To run them, set the +`CRATESFYI_DATABASE_URL` to the url of a PostgreSQL database. You don't have to +run the migrations on it or ensure it's empty, as all the tests use temporary +tables to prevent conflicts with each other or existing data. + ### Docker-Compose #### Rebuilding Containers diff --git a/src/bin/cratesfyi.rs b/src/bin/cratesfyi.rs index ba5167c79..990a2a653 100644 --- a/src/bin/cratesfyi.rs +++ b/src/bin/cratesfyi.rs @@ -202,7 +202,8 @@ pub fn main() { if let Some(matches) = matches.subcommand_matches("migrate") { let version = matches.value_of("VERSION").map(|v| v.parse::() .expect("Version should be an integer")); - db::migrate(version).expect("Failed to run database migrations"); + db::migrate(version, &connect_db().expect("failed to connect to the database")) + .expect("Failed to run database migrations"); } else if let Some(_) = matches.subcommand_matches("update-github-fields") { cratesfyi::utils::github_updater().expect("Failed to update github fields"); } else if let Some(matches) = matches.subcommand_matches("add-directory") { diff --git a/src/db/add_package.rs b/src/db/add_package.rs index 94c8e3a41..359361a22 100644 --- a/src/db/add_package.rs +++ b/src/db/add_package.rs @@ -1,5 +1,4 @@ -use Metadata; use utils::MetadataPackage; use docbuilder::BuildResult; use regex::Regex; @@ -9,6 +8,7 @@ use std::io::BufReader; use std::path::Path; use std::fs; +use time::Timespec; use rustc_serialize::json::{Json, ToJson}; use slug::slugify; use reqwest::Client; @@ -28,6 +28,8 @@ pub(crate) fn add_package_into_database(conn: &Connection, res: &BuildResult, files: Option, doc_targets: Vec, + default_target: &Option, + cratesio_data: &CratesIoData, has_docs: bool, has_examples: bool) -> Result { @@ -36,9 +38,7 @@ pub(crate) fn add_package_into_database(conn: &Connection, let dependencies = convert_dependencies(metadata_pkg); let rustdoc = get_rustdoc(metadata_pkg, source_dir).unwrap_or(None); let readme = get_readme(metadata_pkg, source_dir).unwrap_or(None); - let (release_time, yanked, downloads) = get_release_time_yanked_downloads(metadata_pkg)?; let is_library = metadata_pkg.is_library(); - let metadata = Metadata::from_source_dir(source_dir)?; let release_id: i32 = { let rows = conn.query("SELECT id FROM releases WHERE crate_id = $1 AND version = $2", @@ -61,10 +61,10 @@ pub(crate) fn add_package_into_database(conn: &Connection, RETURNING id", &[&crate_id, &metadata_pkg.version, - &release_time, + &cratesio_data.release_time, &dependencies.to_json(), &metadata_pkg.package_name(), - &yanked, + &cratesio_data.yanked, &res.successful, &has_docs, &false, // TODO: Add test status somehow @@ -77,13 +77,13 @@ pub(crate) fn add_package_into_database(conn: &Connection, &metadata_pkg.authors.to_json(), &metadata_pkg.keywords.to_json(), &has_examples, - &downloads, + &cratesio_data.downloads, &files, &doc_targets.to_json(), &is_library, &res.rustc_version, &metadata_pkg.documentation, - &metadata.default_target])?; + &default_target])?; // return id rows.get(0).get(0) @@ -115,10 +115,10 @@ pub(crate) fn add_package_into_database(conn: &Connection, WHERE crate_id = $1 AND version = $2", &[&crate_id, &format!("{}", metadata_pkg.version), - &release_time, + &cratesio_data.release_time, &dependencies.to_json(), &metadata_pkg.package_name(), - &yanked, + &cratesio_data.yanked, &res.successful, &has_docs, &false, // TODO: Add test status somehow @@ -131,13 +131,13 @@ pub(crate) fn add_package_into_database(conn: &Connection, &metadata_pkg.authors.to_json(), &metadata_pkg.keywords.to_json(), &has_examples, - &downloads, + &cratesio_data.downloads, &files, &doc_targets.to_json(), &is_library, &res.rustc_version, &metadata_pkg.documentation, - &metadata.default_target])?; + &default_target])?; rows.get(0).get(0) } }; @@ -145,7 +145,7 @@ pub(crate) fn add_package_into_database(conn: &Connection, add_keywords_into_database(&conn, &metadata_pkg, &release_id)?; add_authors_into_database(&conn, &metadata_pkg, &release_id)?; - add_owners_into_database(&conn, &metadata_pkg, &crate_id)?; + add_owners_into_database(&conn, &cratesio_data.owners, &crate_id)?; // Update versions @@ -284,6 +284,27 @@ fn read_rust_doc(file_path: &Path) -> Result> { } +pub(crate) struct CratesIoData { + pub(crate) release_time: Timespec, + pub(crate) yanked: bool, + pub(crate) downloads: i32, + pub(crate) owners: Vec, +} + +impl CratesIoData { + pub(crate) fn get_from_network(pkg: &MetadataPackage) -> Result { + let (release_time, yanked, downloads) = get_release_time_yanked_downloads(pkg)?; + let owners = get_owners(pkg)?; + + Ok(Self { + release_time, + yanked, + downloads, + owners, + }) + } +} + /// Get release_time, yanked and downloads from crates.io fn get_release_time_yanked_downloads( @@ -394,9 +415,15 @@ fn add_authors_into_database(conn: &Connection, pkg: &MetadataPackage, release_i } +pub(crate) struct CrateOwner { + pub(crate) avatar: String, + pub(crate) email: String, + pub(crate) login: String, + pub(crate) name: String, +} -/// Adds owners into database -fn add_owners_into_database(conn: &Connection, pkg: &MetadataPackage, crate_id: &i32) -> Result<()> { +/// Fetch owners from crates.io +fn get_owners(pkg: &MetadataPackage) -> Result> { // owners available in: https://crates.io/api/v1/crates/rand/owners let owners_url = format!("https://crates.io/api/v1/crates/{}/owners", pkg.name); let client = Client::new(); @@ -409,6 +436,7 @@ fn add_owners_into_database(conn: &Connection, pkg: &MetadataPackage, crate_id: res.read_to_string(&mut body).unwrap(); let json = Json::from_str(&body[..])?; + let mut result = Vec::new(); if let Some(owners) = json.as_object() .and_then(|j| j.get("users")) .and_then(|j| j.as_array()) { @@ -435,25 +463,38 @@ fn add_owners_into_database(conn: &Connection, pkg: &MetadataPackage, crate_id: continue; } - let owner_id: i32 = { - let rows = conn.query("SELECT id FROM owners WHERE login = $1", &[&login])?; - if rows.len() > 0 { - rows.get(0).get(0) - } else { - conn.query("INSERT INTO owners (login, avatar, name, email) - VALUES ($1, $2, $3, $4) - RETURNING id", - &[&login, &avatar, &name, &email])? - .get(0) - .get(0) - } - }; - - // add relationship - let _ = conn.query("INSERT INTO owner_rels (cid, oid) VALUES ($1, $2)", - &[crate_id, &owner_id]); + result.push(CrateOwner { + avatar: avatar.to_string(), + email: email.to_string(), + login: login.to_string(), + name: name.to_string(), + }); } + } + + Ok(result) +} + +/// Adds owners into database +fn add_owners_into_database(conn: &Connection, owners: &[CrateOwner], crate_id: &i32) -> Result<()> { + for owner in owners { + let owner_id: i32 = { + let rows = conn.query("SELECT id FROM owners WHERE login = $1", &[&owner.login])?; + if rows.len() > 0 { + rows.get(0).get(0) + } else { + conn.query("INSERT INTO owners (login, avatar, name, email) + VALUES ($1, $2, $3, $4) + RETURNING id", + &[&owner.login, &owner.avatar, &owner.name, &owner.email])? + .get(0) + .get(0) + } + }; + // add relationship + let _ = conn.query("INSERT INTO owner_rels (cid, oid) VALUES ($1, $2)", + &[crate_id, &owner_id]); } Ok(()) } diff --git a/src/db/delete_crate.rs b/src/db/delete_crate.rs index 98ceeed1d..7d7efd846 100644 --- a/src/db/delete_crate.rs +++ b/src/db/delete_crate.rs @@ -120,3 +120,60 @@ fn delete_prefix_from_s3(s3: &S3Client, name: &str) -> Result<(), Error> { } } } + +#[cfg(test)] +mod tests { + use super::*; + use failure::Error; + use postgres::Connection; + + #[test] + fn test_delete_from_database() { + fn crate_exists(conn: &Connection, name: &str) -> Result { + Ok(!conn + .query("SELECT * FROM crates WHERE name = $1;", &[&name])? + .is_empty()) + } + fn release_exists(conn: &Connection, id: i32) -> Result { + Ok(!conn + .query("SELECT * FROM releases WHERE id = $1;", &[&id])? + .is_empty()) + } + + crate::test::with_database(|db| { + // Create fake packages in the database + let pkg1_v1_id = db + .fake_release() + .name("package-1") + .version("1.0.0") + .create()?; + let pkg1_v2_id = db + .fake_release() + .name("package-1") + .version("2.0.0") + .create()?; + let pkg2_id = db.fake_release().name("package-2").create()?; + + assert!(crate_exists(db.conn(), "package-1")?); + assert!(crate_exists(db.conn(), "package-2")?); + assert!(release_exists(db.conn(), pkg1_v1_id)?); + assert!(release_exists(db.conn(), pkg1_v2_id)?); + assert!(release_exists(db.conn(), pkg2_id)?); + + let pkg1_id = db.conn() + .query("SELECT id FROM crates WHERE name = 'package-1';", &[])? + .get(0) + .get("id"); + + delete_from_database(db.conn(), "package-1", pkg1_id)?; + + assert!(!crate_exists(db.conn(), "package-1")?); + assert!(crate_exists(db.conn(), "package-2")?); + assert!(!release_exists(db.conn(), pkg1_v1_id)?); + assert!(!release_exists(db.conn(), pkg1_v2_id)?); + assert!(release_exists(db.conn(), pkg2_id)?); + + Ok(()) + }); + } +} diff --git a/src/db/migrate.rs b/src/db/migrate.rs index d8dd58ae5..411e4d775 100644 --- a/src/db/migrate.rs +++ b/src/db/migrate.rs @@ -1,11 +1,29 @@ //! Database migrations -use db::connect_db; use error::Result as CratesfyiResult; -use postgres::error::Error as PostgresError; -use postgres::transaction::Transaction; +use postgres::{Connection, transaction::Transaction, Error as PostgresError}; use schemamama::{Migration, Migrator, Version}; use schemamama_postgres::{PostgresAdapter, PostgresMigration}; +use std::rc::Rc; + + +enum ApplyMode { + Permanent, + Temporary, +} + +struct MigrationContext { + apply_mode: ApplyMode, +} + +impl MigrationContext { + fn format_query(&self, query: &str) -> String { + query.replace("{create_table}", match self.apply_mode { + ApplyMode::Permanent => "CREATE TABLE", + ApplyMode::Temporary => "CREATE TEMPORARY TABLE", + }) + } +} /// Creates a new PostgresMigration from upgrade and downgrade queries. @@ -16,12 +34,14 @@ use schemamama_postgres::{PostgresAdapter, PostgresMigration}; /// ``` /// let my_migration = migration!(100, /// "Create test table", -/// "CREATE TABLE test ( id SERIAL);", +/// "{create_table} test ( id SERIAL);", /// "DROP TABLE test;"); /// ``` macro_rules! migration { - ($version:expr, $description:expr, $up:expr, $down:expr $(,)?) => {{ - struct Amigration; + ($context:expr, $version:expr, $description:expr, $up:expr, $down:expr $(,)?) => {{ + struct Amigration { + ctx: Rc, + }; impl Migration for Amigration { fn version(&self) -> Version { $version @@ -33,33 +53,48 @@ macro_rules! migration { impl PostgresMigration for Amigration { fn up(&self, transaction: &Transaction) -> Result<(), PostgresError> { info!("Applying migration {}: {}", self.version(), self.description()); - transaction.batch_execute($up).map(|_| ()) + transaction.batch_execute(&self.ctx.format_query($up)).map(|_| ()) } fn down(&self, transaction: &Transaction) -> Result<(), PostgresError> { info!("Removing migration {}: {}", self.version(), self.description()); - transaction.batch_execute($down).map(|_| ()) + transaction.batch_execute(&self.ctx.format_query($down)).map(|_| ()) } } - Box::new(Amigration) + Box::new(Amigration { ctx: $context }) }}; } -pub fn migrate(version: Option) -> CratesfyiResult<()> { - let conn = connect_db()?; - let adapter = PostgresAdapter::with_metadata_table(&conn, "database_versions"); - adapter.setup_schema()?; +pub fn migrate(version: Option, conn: &Connection) -> CratesfyiResult<()> { + migrate_inner(version, conn, ApplyMode::Permanent) +} + +pub fn migrate_temporary(version: Option, conn: &Connection) -> CratesfyiResult<()> { + migrate_inner(version, conn, ApplyMode::Temporary) +} + +fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyMode) -> CratesfyiResult<()> { + let context = Rc::new(MigrationContext { apply_mode }); + + conn.execute( + &context.format_query( + "{create_table} IF NOT EXISTS database_versions (version BIGINT PRIMARY KEY);" + ), + &[], + )?; + let adapter = PostgresAdapter::with_metadata_table(conn, "database_versions"); let mut migrator = Migrator::new(adapter); let migrations: Vec> = vec![ migration!( + context.clone(), // version 1, // description "Initial database schema", // upgrade query - "CREATE TABLE crates ( + "{create_table} crates ( id SERIAL PRIMARY KEY, name VARCHAR(255) UNIQUE NOT NULL, latest_version_id INT DEFAULT 0, @@ -73,7 +108,7 @@ pub fn migrate(version: Option) -> CratesfyiResult<()> { github_last_update TIMESTAMP, content tsvector ); - CREATE TABLE releases ( + {create_table} releases ( id SERIAL PRIMARY KEY, crate_id INT NOT NULL REFERENCES crates(id), version VARCHAR(100), @@ -102,40 +137,40 @@ pub fn migrate(version: Option) -> CratesfyiResult<()> { default_target VARCHAR(100), UNIQUE (crate_id, version) ); - CREATE TABLE authors ( + {create_table} authors ( id SERIAL PRIMARY KEY, name VARCHAR(255), email VARCHAR(255), slug VARCHAR(255) UNIQUE NOT NULL ); - CREATE TABLE author_rels ( + {create_table} author_rels ( rid INT REFERENCES releases(id), aid INT REFERENCES authors(id), UNIQUE(rid, aid) ); - CREATE TABLE keywords ( + {create_table} keywords ( id SERIAL PRIMARY KEY, name VARCHAR(255), slug VARCHAR(255) NOT NULL UNIQUE ); - CREATE TABLE keyword_rels ( + {create_table} keyword_rels ( rid INT REFERENCES releases(id), kid INT REFERENCES keywords(id), UNIQUE(rid, kid) ); - CREATE TABLE owners ( + {create_table} owners ( id SERIAL PRIMARY KEY, login VARCHAR(255) NOT NULL UNIQUE, avatar VARCHAR(255), name VARCHAR(255), email VARCHAR(255) ); - CREATE TABLE owner_rels ( + {create_table} owner_rels ( cid INT REFERENCES releases(id), oid INT REFERENCES owners(id), UNIQUE(cid, oid) ); - CREATE TABLE builds ( + {create_table} builds ( id SERIAL, rid INT NOT NULL REFERENCES releases(id), rustc_version VARCHAR(100) NOT NULL, @@ -144,7 +179,7 @@ pub fn migrate(version: Option) -> CratesfyiResult<()> { build_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, output TEXT ); - CREATE TABLE queue ( + {create_table} queue ( id SERIAL, name VARCHAR(255), version VARCHAR(100), @@ -152,14 +187,14 @@ pub fn migrate(version: Option) -> CratesfyiResult<()> { date_added TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, UNIQUE(name, version) ); - CREATE TABLE files ( + {create_table} files ( path VARCHAR(4096) NOT NULL PRIMARY KEY, mime VARCHAR(100) NOT NULL, date_added TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, date_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, content BYTEA ); - CREATE TABLE config ( + {create_table} config ( name VARCHAR(100) NOT NULL PRIMARY KEY, value JSON NOT NULL ); @@ -170,6 +205,7 @@ pub fn migrate(version: Option) -> CratesfyiResult<()> { owners, releases, crates, builds, queue, files, config;" ), migration!( + context.clone(), // version 2, // description @@ -180,12 +216,13 @@ pub fn migrate(version: Option) -> CratesfyiResult<()> { "ALTER TABLE queue DROP COLUMN priority;" ), migration!( + context.clone(), // version 3, // description "Added sandbox_overrides table", // upgrade query - "CREATE TABLE sandbox_overrides ( + "{create_table} sandbox_overrides ( crate_name VARCHAR NOT NULL PRIMARY KEY, max_memory_bytes INTEGER, timeout_seconds INTEGER @@ -194,6 +231,7 @@ pub fn migrate(version: Option) -> CratesfyiResult<()> { "DROP TABLE sandbox_overrides;" ), migration!( + context.clone(), 4, "Make more fields not null", "ALTER TABLE releases ALTER COLUMN release_time SET NOT NULL, @@ -204,6 +242,7 @@ pub fn migrate(version: Option) -> CratesfyiResult<()> { ALTER COLUMN downloads DROP NOT NULL" ), migration!( + context.clone(), // version 5, // description diff --git a/src/db/mod.rs b/src/db/mod.rs index 616947528..349919628 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -2,8 +2,9 @@ pub(crate) use self::add_package::add_package_into_database; pub(crate) use self::add_package::add_build_into_database; +pub(crate) use self::add_package::CratesIoData; pub use self::file::add_path_into_database; -pub use self::migrate::migrate; +pub use self::migrate::{migrate, migrate_temporary}; pub use self::delete_crate::delete_crate; use postgres::{Connection, TlsMode}; diff --git a/src/docbuilder/rustwide_builder.rs b/src/docbuilder/rustwide_builder.rs index 61a1099d7..aef9031a1 100644 --- a/src/docbuilder/rustwide_builder.rs +++ b/src/docbuilder/rustwide_builder.rs @@ -1,6 +1,6 @@ use super::DocBuilder; use db::file::add_path_into_database; -use db::{add_build_into_database, add_package_into_database, connect_db}; +use db::{add_build_into_database, add_package_into_database, connect_db, CratesIoData}; use docbuilder::{crates::crates_from_path, Limits}; use error::Result; use failure::ResultExt; @@ -153,7 +153,7 @@ impl RustwideBuilder { .build(&self.toolchain, &krate, sandbox) .run(|build| { let res = self.execute_build(None, build, &limits)?; - if !res.successful { + if !res.result.successful { bail!("failed to build dummy crate for {}", self.rustc_version); } @@ -271,7 +271,7 @@ impl RustwideBuilder { // Do an initial build and then copy the sources in the database let res = self.execute_build(None, &build, &limits)?; - if res.successful { + if res.result.successful { debug!("adding sources into database"); let prefix = format!("sources/{}/{}", name, version); files_list = Some(add_path_into_database( @@ -325,7 +325,7 @@ impl RustwideBuilder { } let has_examples = build.host_source_dir().join("examples").is_dir(); - if res.successful { + if res.result.successful { ::web::metrics::SUCCESSFUL_BUILDS.inc(); } else if res.cargo_metadata.root().is_library() { ::web::metrics::FAILED_BUILDS.inc(); @@ -336,13 +336,15 @@ impl RustwideBuilder { &conn, res.cargo_metadata.root(), &build.host_source_dir(), - &res, + &res.result, files_list, successful_targets, + &res.default_target, + &CratesIoData::get_from_network(res.cargo_metadata.root())?, has_docs, has_examples, )?; - add_build_into_database(&conn, &release_id, &res)?; + add_build_into_database(&conn, &release_id, &res.result)?; doc_builder.add_to_cache(name, version); Ok(res) @@ -351,7 +353,7 @@ impl RustwideBuilder { build_dir.purge()?; krate.purge_from_cache(&self.workspace)?; local_storage.close()?; - Ok(res.successful) + Ok(res.result.successful) } fn build_target( @@ -363,7 +365,7 @@ impl RustwideBuilder { successful_targets: &mut Vec, ) -> Result<()> { let target_res = self.execute_build(Some(target), build, limits)?; - if target_res.successful { + if target_res.result.successful { // Cargo is not giving any error and not generating documentation of some crates // when we use a target compile options. Check documentation exists before // adding target to successfully_targets. @@ -381,7 +383,7 @@ impl RustwideBuilder { target: Option<&str>, build: &Build, limits: &Limits, - ) -> Result { + ) -> Result { let metadata = Metadata::from_source_dir(&build.host_source_dir())?; let cargo_metadata = CargoMetadata::load(&self.workspace, &self.toolchain, &build.host_source_dir())?; @@ -446,13 +448,16 @@ impl RustwideBuilder { .is_ok() }); - Ok(BuildResult { - build_log: storage.to_string(), - rustc_version: self.rustc_version.clone(), - docsrs_version: format!("docsrs {}", ::BUILD_VERSION), - successful, + Ok(FullBuildResult { + result: BuildResult { + build_log: storage.to_string(), + rustc_version: self.rustc_version.clone(), + docsrs_version: format!("docsrs {}", ::BUILD_VERSION), + successful, + }, cargo_metadata, target: target.unwrap_or_default().to_string(), + default_target: metadata.default_target.clone(), }) } @@ -497,11 +502,16 @@ impl RustwideBuilder { } } +struct FullBuildResult { + result: BuildResult, + target: String, + default_target: Option, + cargo_metadata: CargoMetadata, +} + pub(crate) struct BuildResult { pub(crate) rustc_version: String, pub(crate) docsrs_version: String, pub(crate) build_log: String, pub(crate) successful: bool, - target: String, - cargo_metadata: CargoMetadata, } diff --git a/src/lib.rs b/src/lib.rs index 7e58b8a55..c7916a0ff 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -53,6 +53,8 @@ pub mod db; pub mod utils; mod docbuilder; mod web; +#[cfg(test)] +mod test; use web::page::GlobalAlert; diff --git a/src/test/fakes.rs b/src/test/fakes.rs new file mode 100644 index 000000000..13f5ede88 --- /dev/null +++ b/src/test/fakes.rs @@ -0,0 +1,93 @@ +use super::TestDatabase; +use crate::docbuilder::BuildResult; +use crate::utils::{Dependency, MetadataPackage, Target}; +use crate::db::CratesIoData; +use failure::Error; +use rustc_serialize::json::Json; + +pub(crate) struct FakeRelease<'db> { + db: &'db TestDatabase, + package: MetadataPackage, + build_result: BuildResult, + files: Option, + doc_targets: Vec, + default_target: Option, + cratesio_data: CratesIoData, + has_docs: bool, + has_examples: bool, +} + +impl<'db> FakeRelease<'db> { + pub(super) fn new(db: &'db TestDatabase) -> Self { + FakeRelease { + db, + package: MetadataPackage { + id: "fake-package-id".into(), + name: "fake-package".into(), + version: "1.0.0".into(), + license: Some("MIT".into()), + repository: Some("https://git.example.com".into()), + homepage: Some("https://www.example.com".into()), + description: Some("Fake package".into()), + documentation: Some("https://docs.example.com".into()), + dependencies: vec![Dependency { + name: "fake-dependency".into(), + req: "^1.0.0".into(), + kind: None, + }], + targets: vec![Target::dummy_lib("fake_package".into(), None)], + readme: None, + keywords: vec!["fake".into(), "package".into()], + authors: vec!["Fake Person ".into()], + }, + build_result: BuildResult { + rustc_version: "rustc 2.0.0-nightly (000000000 1970-01-01)".into(), + docsrs_version: "docs.rs 1.0.0 (000000000 1970-01-01)".into(), + build_log: "It works!".into(), + successful: true, + }, + files: None, + doc_targets: Vec::new(), + default_target: None, + cratesio_data: CratesIoData { + release_time: time::get_time(), + yanked: false, + downloads: 0, + owners: Vec::new() + }, + has_docs: true, + has_examples: false, + } + } + + pub(crate) fn name(mut self, new: &str) -> Self { + self.package.name = new.into(); + self.package.id = format!("{}-id", new); + self + } + + pub(crate) fn version(mut self, new: &str) -> Self { + self.package.version = new.into(); + self + } + + pub(crate) fn create(self) -> Result { + let tempdir = tempdir::TempDir::new("docs.rs-fake")?; + + let release_id = crate::db::add_package_into_database( + self.db.conn(), + &self.package, + tempdir.path(), + &self.build_result, + self.files, + self.doc_targets, + &self.default_target, + &self.cratesio_data, + self.has_docs, + self.has_examples, + )?; + crate::db::add_build_into_database(self.db.conn(), &release_id, &self.build_result)?; + + Ok(release_id) + } +} diff --git a/src/test/mod.rs b/src/test/mod.rs new file mode 100644 index 000000000..9f2950cc8 --- /dev/null +++ b/src/test/mod.rs @@ -0,0 +1,43 @@ +mod fakes; + +use postgres::Connection; +use failure::Error; + +pub(crate) fn with_database(f: impl FnOnce(&TestDatabase) -> Result<(), Error>) { + let env = TestDatabase::new().expect("failed to initialize the environment"); + + if let Err(err) = f(&env) { + eprintln!("the test failed: {}", err); + for cause in err.iter_causes() { + eprintln!(" caused by: {}", cause); + } + + eprintln!("{}", err.backtrace()); + + panic!("the test failed"); + } +} + +pub(crate) struct TestDatabase { + conn: Connection, +} + +impl TestDatabase { + fn new() -> Result { + // The temporary migration uses CREATE TEMPORARY TABLE instead of CREATE TABLE, creating + // fresh temporary copies of the database on top of the real one. The temporary tables are + // only visible to this connection, and will be deleted when it exits. + let conn = crate::db::connect_db()?; + crate::db::migrate_temporary(None, &conn)?; + + Ok(TestDatabase { conn }) + } + + pub(crate) fn conn(&self) -> &Connection { + &self.conn + } + + pub(crate) fn fake_release(&self) -> fakes::FakeRelease { + fakes::FakeRelease::new(self) + } +} diff --git a/src/utils/cargo_metadata.rs b/src/utils/cargo_metadata.rs index 7d56b773e..2147c2ef7 100644 --- a/src/utils/cargo_metadata.rs +++ b/src/utils/cargo_metadata.rs @@ -111,6 +111,17 @@ pub(crate) struct Target { pub(crate) src_path: Option, } +impl Target { + #[cfg(test)] + pub(crate) fn dummy_lib(name: String, src_path: Option) -> Self { + Target { + name, + crate_types: vec!["lib".into()], + src_path, + } + } +} + #[derive(RustcDecodable)] pub(crate) struct Dependency { pub(crate) name: String, diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 8db86bd13..06864b9bc 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -10,6 +10,9 @@ pub use self::html::extract_head_and_body; pub use self::queue::add_crate_to_queue; pub(crate) use self::cargo_metadata::{CargoMetadata, Package as MetadataPackage}; +#[cfg(test)] +pub(crate) use self::cargo_metadata::{Dependency, Target}; + mod cargo_metadata; mod github_updater; mod copy; From 409198cff32d7b0e0b59c02d41d154d7cf8735a8 Mon Sep 17 00:00:00 2001 From: Pietro Albini Date: Thu, 19 Dec 2019 17:36:39 +0100 Subject: [PATCH 6/9] Update src/bin/cratesfyi.rs Co-Authored-By: Joshua Nelson --- src/bin/cratesfyi.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/bin/cratesfyi.rs b/src/bin/cratesfyi.rs index 990a2a653..fc0045225 100644 --- a/src/bin/cratesfyi.rs +++ b/src/bin/cratesfyi.rs @@ -115,7 +115,9 @@ pub fn main() { .about("Updates search index")) .subcommand(SubCommand::with_name("delete-crate") .about("Removes a whole crate from the database") - .arg(Arg::with_name("CRATE_NAME").help("Name of the crate to delete")))) + .arg(Arg::with_name("CRATE_NAME") + .takes_value(true) + .help("Name of the crate to delete")))) .subcommand(SubCommand::with_name("queue") .about("Interactions with the build queue") .subcommand(SubCommand::with_name("add") From 9e9696f140a16954efe2eda01b287466cc6ea8e4 Mon Sep 17 00:00:00 2001 From: Pietro Albini Date: Thu, 19 Dec 2019 17:40:38 +0100 Subject: [PATCH 7/9] readme: add link to wiki page on developing outside of docker-compose --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f1c9f2716..e0c2441f3 100644 --- a/README.md +++ b/README.md @@ -91,7 +91,11 @@ cargo test Some tests require access to the database. To run them, set the `CRATESFYI_DATABASE_URL` to the url of a PostgreSQL database. You don't have to run the migrations on it or ensure it's empty, as all the tests use temporary -tables to prevent conflicts with each other or existing data. +tables to prevent conflicts with each other or existing data. See the [wiki +page on developing outside docker-compose][wiki-no-compose] for more +information on how to setup this environment. + +[wiki-no-compose]: https://github.com/rust-lang/docs.rs/wiki/Developing-without-docker-compose ### Docker-Compose From 6aa83d7069236071da25565ec56b66741600b8d4 Mon Sep 17 00:00:00 2001 From: Joshua Nelson Date: Thu, 19 Dec 2019 17:19:46 -0500 Subject: [PATCH 8/9] Don't use unnecessary Rc --- src/db/migrate.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/db/migrate.rs b/src/db/migrate.rs index 411e4d775..7ac769257 100644 --- a/src/db/migrate.rs +++ b/src/db/migrate.rs @@ -4,14 +4,15 @@ use error::Result as CratesfyiResult; use postgres::{Connection, transaction::Transaction, Error as PostgresError}; use schemamama::{Migration, Migrator, Version}; use schemamama_postgres::{PostgresAdapter, PostgresMigration}; -use std::rc::Rc; +#[derive(Copy, Clone)] enum ApplyMode { Permanent, Temporary, } +#[derive(Copy, Clone)] struct MigrationContext { apply_mode: ApplyMode, } @@ -40,7 +41,7 @@ impl MigrationContext { macro_rules! migration { ($context:expr, $version:expr, $description:expr, $up:expr, $down:expr $(,)?) => {{ struct Amigration { - ctx: Rc, + ctx: MigrationContext, }; impl Migration for Amigration { fn version(&self) -> Version { @@ -74,7 +75,7 @@ pub fn migrate_temporary(version: Option, conn: &Connection) -> Cratesf } fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyMode) -> CratesfyiResult<()> { - let context = Rc::new(MigrationContext { apply_mode }); + let context = MigrationContext { apply_mode }; conn.execute( &context.format_query( @@ -88,7 +89,7 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM let migrations: Vec> = vec![ migration!( - context.clone(), + context, // version 1, // description @@ -205,7 +206,7 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM owners, releases, crates, builds, queue, files, config;" ), migration!( - context.clone(), + context, // version 2, // description @@ -216,7 +217,7 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM "ALTER TABLE queue DROP COLUMN priority;" ), migration!( - context.clone(), + context, // version 3, // description @@ -231,7 +232,7 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM "DROP TABLE sandbox_overrides;" ), migration!( - context.clone(), + context, 4, "Make more fields not null", "ALTER TABLE releases ALTER COLUMN release_time SET NOT NULL, @@ -242,7 +243,7 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM ALTER COLUMN downloads DROP NOT NULL" ), migration!( - context.clone(), + context, // version 5, // description From c7a036aa84e7210016a3d2d31d676f53214a7fe6 Mon Sep 17 00:00:00 2001 From: Pietro Albini Date: Fri, 27 Dec 2019 09:36:10 +0100 Subject: [PATCH 9/9] db: automatically replace "CREATE TABLE" in migrations --- src/db/migrate.rs | 43 +++++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/src/db/migrate.rs b/src/db/migrate.rs index 7ac769257..48dea5e8c 100644 --- a/src/db/migrate.rs +++ b/src/db/migrate.rs @@ -4,6 +4,7 @@ use error::Result as CratesfyiResult; use postgres::{Connection, transaction::Transaction, Error as PostgresError}; use schemamama::{Migration, Migrator, Version}; use schemamama_postgres::{PostgresAdapter, PostgresMigration}; +use std::borrow::Cow; #[derive(Copy, Clone)] @@ -18,11 +19,13 @@ struct MigrationContext { } impl MigrationContext { - fn format_query(&self, query: &str) -> String { - query.replace("{create_table}", match self.apply_mode { - ApplyMode::Permanent => "CREATE TABLE", - ApplyMode::Temporary => "CREATE TEMPORARY TABLE", - }) + fn format_query<'a>(&self, query: &'a str) -> Cow<'a, str> { + match self.apply_mode { + ApplyMode::Permanent => Cow::Borrowed(query), + ApplyMode::Temporary => { + Cow::Owned(query.replace("CREATE TABLE", "CREATE TEMPORARY TABLE")) + } + } } } @@ -35,7 +38,7 @@ impl MigrationContext { /// ``` /// let my_migration = migration!(100, /// "Create test table", -/// "{create_table} test ( id SERIAL);", +/// "CREATE TABLE test ( id SERIAL);", /// "DROP TABLE test;"); /// ``` macro_rules! migration { @@ -79,7 +82,7 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM conn.execute( &context.format_query( - "{create_table} IF NOT EXISTS database_versions (version BIGINT PRIMARY KEY);" + "CREATE TABLE IF NOT EXISTS database_versions (version BIGINT PRIMARY KEY);" ), &[], )?; @@ -95,7 +98,7 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM // description "Initial database schema", // upgrade query - "{create_table} crates ( + "CREATE TABLE crates ( id SERIAL PRIMARY KEY, name VARCHAR(255) UNIQUE NOT NULL, latest_version_id INT DEFAULT 0, @@ -109,7 +112,7 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM github_last_update TIMESTAMP, content tsvector ); - {create_table} releases ( + CREATE TABLE releases ( id SERIAL PRIMARY KEY, crate_id INT NOT NULL REFERENCES crates(id), version VARCHAR(100), @@ -138,40 +141,40 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM default_target VARCHAR(100), UNIQUE (crate_id, version) ); - {create_table} authors ( + CREATE TABLE authors ( id SERIAL PRIMARY KEY, name VARCHAR(255), email VARCHAR(255), slug VARCHAR(255) UNIQUE NOT NULL ); - {create_table} author_rels ( + CREATE TABLE author_rels ( rid INT REFERENCES releases(id), aid INT REFERENCES authors(id), UNIQUE(rid, aid) ); - {create_table} keywords ( + CREATE TABLE keywords ( id SERIAL PRIMARY KEY, name VARCHAR(255), slug VARCHAR(255) NOT NULL UNIQUE ); - {create_table} keyword_rels ( + CREATE TABLE keyword_rels ( rid INT REFERENCES releases(id), kid INT REFERENCES keywords(id), UNIQUE(rid, kid) ); - {create_table} owners ( + CREATE TABLE owners ( id SERIAL PRIMARY KEY, login VARCHAR(255) NOT NULL UNIQUE, avatar VARCHAR(255), name VARCHAR(255), email VARCHAR(255) ); - {create_table} owner_rels ( + CREATE TABLE owner_rels ( cid INT REFERENCES releases(id), oid INT REFERENCES owners(id), UNIQUE(cid, oid) ); - {create_table} builds ( + CREATE TABLE builds ( id SERIAL, rid INT NOT NULL REFERENCES releases(id), rustc_version VARCHAR(100) NOT NULL, @@ -180,7 +183,7 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM build_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, output TEXT ); - {create_table} queue ( + CREATE TABLE queue ( id SERIAL, name VARCHAR(255), version VARCHAR(100), @@ -188,14 +191,14 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM date_added TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, UNIQUE(name, version) ); - {create_table} files ( + CREATE TABLE files ( path VARCHAR(4096) NOT NULL PRIMARY KEY, mime VARCHAR(100) NOT NULL, date_added TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, date_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, content BYTEA ); - {create_table} config ( + CREATE TABLE config ( name VARCHAR(100) NOT NULL PRIMARY KEY, value JSON NOT NULL ); @@ -223,7 +226,7 @@ fn migrate_inner(version: Option, conn: &Connection, apply_mode: ApplyM // description "Added sandbox_overrides table", // upgrade query - "{create_table} sandbox_overrides ( + "CREATE TABLE sandbox_overrides ( crate_name VARCHAR NOT NULL PRIMARY KEY, max_memory_bytes INTEGER, timeout_seconds INTEGER