From 3c511936c26c79b51fa00c3c74516e000035d7eb Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Thu, 15 Aug 2024 16:52:34 +0800 Subject: [PATCH 01/73] Added custom array example with documentation. Signed-off-by: Marvin Hansen --- .gitignore | 6 + Cargo.toml | 1 + examples/postgres/custom_arrays/.gitignore | 2 + examples/postgres/custom_arrays/Cargo.toml | 9 + examples/postgres/custom_arrays/README.md | 886 ++++++++++++++++++ examples/postgres/custom_arrays/diesel.toml | 11 + .../postgres/custom_arrays/migrations/.keep | 0 .../down.sql | 6 + .../up.sql | 36 + .../2024-08-15-070500_services/down.sql | 5 + .../2024-08-15-070500_services/up.sql | 29 + examples/postgres/custom_arrays/src/lib.rs | 5 + .../model/endpoint_type/endpoint_type_impl.rs | 37 + .../src/model/endpoint_type/mod.rs | 33 + .../postgres/custom_arrays/src/model/mod.rs | 7 + .../src/model/protocol_type/mod.rs | 47 + .../custom_arrays/src/model/service/mod.rs | 97 ++ .../src/model/service/service_impl.rs | 147 +++ examples/postgres/custom_arrays/src/schema.rs | 26 + examples/postgres/custom_arrays/tests/mod.rs | 2 + .../custom_arrays/tests/service_tests.rs | 268 ++++++ 21 files changed, 1660 insertions(+) create mode 100644 examples/postgres/custom_arrays/.gitignore create mode 100644 examples/postgres/custom_arrays/Cargo.toml create mode 100644 examples/postgres/custom_arrays/README.md create mode 100644 examples/postgres/custom_arrays/diesel.toml create mode 100644 examples/postgres/custom_arrays/migrations/.keep create mode 100644 examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/down.sql create mode 100644 examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/up.sql create mode 100644 examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql create mode 100644 examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql create mode 100644 examples/postgres/custom_arrays/src/lib.rs create mode 100644 examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs create mode 100644 examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs create mode 100644 examples/postgres/custom_arrays/src/model/mod.rs create mode 100644 examples/postgres/custom_arrays/src/model/protocol_type/mod.rs create mode 100644 examples/postgres/custom_arrays/src/model/service/mod.rs create mode 100644 examples/postgres/custom_arrays/src/model/service/service_impl.rs create mode 100644 examples/postgres/custom_arrays/src/schema.rs create mode 100644 examples/postgres/custom_arrays/tests/mod.rs create mode 100644 examples/postgres/custom_arrays/tests/service_tests.rs diff --git a/.gitignore b/.gitignore index ec44e67ff3ec..11408af749e3 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,9 @@ Cargo.lock !diesel_cli/Cargo.lock .env *.snap.new + +./.idea/* +/.idea/.gitignore +/.idea/diesel.iml +/.idea/modules.xml +/.idea/vcs.xml diff --git a/Cargo.toml b/Cargo.toml index 67082d2f400b..c7da4e669941 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ members = [ "examples/postgres/getting_started_step_1", "examples/postgres/getting_started_step_2", "examples/postgres/getting_started_step_3", + "examples/postgres/custom_arrays", "examples/postgres/custom_types", "examples/postgres/composite_types", "examples/postgres/relations", diff --git a/examples/postgres/custom_arrays/.gitignore b/examples/postgres/custom_arrays/.gitignore new file mode 100644 index 000000000000..4f8380647089 --- /dev/null +++ b/examples/postgres/custom_arrays/.gitignore @@ -0,0 +1,2 @@ +/target +/.env diff --git a/examples/postgres/custom_arrays/Cargo.toml b/examples/postgres/custom_arrays/Cargo.toml new file mode 100644 index 000000000000..b953ab0a5f24 --- /dev/null +++ b/examples/postgres/custom_arrays/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "custom_arrays" +version = "0.1.0" +edition.workspace = true +publish = false + +[dependencies] +diesel = { path = "../../../diesel", version = "2.2.0", features = ["postgres", "r2d2"] } +dotenv = "0.15.0" diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md new file mode 100644 index 000000000000..340907276e99 --- /dev/null +++ b/examples/postgres/custom_arrays/README.md @@ -0,0 +1,886 @@ +# Custom array and custom data in Postgres + +In this guide, you learn more about the concepts listed below and illustrate the actual usage in Diesel with a sample +project. + +**Concepts**: + +* Custom Enum +* Custom type that uses a custom Enum +* Array of custom types +* Serialization / Deserialization of custom type array +* Implementation of DB operations on a Rust Type + +## The project: + +Your company decided to create a database of all microservices in its system; the high-level requirements are: + +* CRUD: Create, read, update, and delete a service in the databse +* List all offline services and all services that are online +* Retrieve all API endpoints for a service + +Upon closer inspection, you realize a few missing details in the requirements: + +* There are many different endpoint types, some are http, some are gRPC, and some are even UDP (i.e. a message bus), but + the number of endpoint types is limited to just three that haven’t changed in years. +* While each service may have one or more endpoints, no two services can share the same API endpoints, which means + normalizing API endpoints in a separate table makes little sense in this case. +* Unmentioned in the requirements, a service depends on other services, and it would be great to test, before deploying + a new service, if all of its dependencies are online. +* Since this part of the database contains only meta-data, its best to store it in a separate schema to separate the + metadata from all other data. + +Thinking further, you realize a few things: + +1) Endpoint type can be expressed as an Enum in Postgres and in Rust +2) Endpoint can be a custom type in Postgres that matches a Rust type +3) Service contains an array of custom type Enum +4) When each service gets a unique ID that also serves as primary key, then service dependencies are basically just a + collection of those unique service IDs. Thus, you just add an integer array in Postgres. +5) Checking all dependencies of a service is then as simple as loading the array of ID’s, iterating over it, check each + dependency if its online, and you are basically done with the core requirement. As the old saying goes, if you can + solve a problem with data structures, by all means, just do it. + +## Getting started + +Let’s crate a new crate, called custom_arrays: + +` +cargo new custom_arrays –lib +` + +Next, run the Diesel setup: + +```bash +diesel setup +``` + +And then generated a new Diesel migration called services: + +```bash +diesel migration generate services +``` + +This creates a new folder within the migration folder containing an empty up.sql and down.sql file. + +## Postgres schema + +Since the service management operates independently from the rest of the system, it is sensible to store all data in a +dedicated schema. A schema in Postgres is like a namespace as it ensures unique table and type names within the schema. +More importantly, you can create a new user that specifically has access to only a particular schema and that is always +a good security practice. To create a new schema with Diesel, you follow three steps: + +1) Declare the schema name in the diesel.toml file +2) Declare the schema in the migration up/down.sql files +3) Prefix all table names in the migration up/down.sql files with the schema + +Postgres uses internally a schema as something like a search path for a table and, by default, searches in the public +schema for a table. If that fails, Postgres returns an error that the table does not exists (in the default schema). +Because of the search path mechanism, you only have to declare the schema name and consistently prefix the tables in the +schema with the schema name and that’s it. + +In your diesel.toml file, add the following entry: + +```toml +[print_schema] +file = "src/schema.rs" +custom_type_derives = ["diesel::query_builder::QueryId", "Clone"] + +schema = "smdb" +``` + +Specifically, in the created migration folder, add the following to your up.sql file + +```sql +-- Your SQL goes here +CREATE SCHEMA IF NOT EXISTS smdb; +``` + +Also, add the corresponding drop operation in the down.sql file. + +```sql +DROP schema IF EXISTS smdb; +``` + +## Postgres Enum + +The company only uses three types of API endpoints, gRPC, http, or UDP. However, because data entry or transmission +errors happen, an UnknownProtocol has been added to catch everything else that may go wrong to ensure no serialization / +deserialization bugs crash the system. Instead, every once a while a Chron job runs over the database and searches for +those UnknownProtocol entries, reports it to the admin who may fixes the incorrect entries one day. Therefore the +Postgres ENUM in your up.sql looks like this: + +```sql +-- Your SQL goes here +CREATE SCHEMA IF NOT EXISTS smdb; + +CREATE TYPE smdb.protocol_type AS ENUM ( + 'UnknownProtocol', + 'GRPC', + 'HTTP', + 'UDP' +); +``` + +Notice, the table name is prefixed by the DB schema to ensure Postgres find it. Also, you add the corresponding drop +type operation to your down.sql file: + +```sql +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP schema IF EXISTS smdb; +``` + +## Postgres custom type + +The service endpoint in this case is modelled as a custom type. +There are few cases when you want to choose a custom type over a table. + +1) The data have no relation to data in other tables +2) You want to group a bunch of fields i.e. to store a configuration file with nested fields +3) You specifically don’t want to model relations + +In the case of a service endpoint, it really is as simple as every service has one or more endpoints, but these are +certainly not worth storing in a separate table since then you would have to deal with resolving relations during query +time. Rather, when using the custom type, you just access a field that is basically a tuple. With that out of the way, +your endpoint type looks like this in your up.sql: + +```sql +-- Your SQL goes here + +CREATE TYPE smdb.service_endpoint AS ( + "name" Text, + "version" INTEGER, + "base_uri" Text, + "port" INTEGER, + "protocol" smdb.protocol_type +); +``` + +And the matching drop operation in the down.sql file: + +```sql +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP schema IF EXISTS smdb; +``` + +## Postgres SERIAL vs INTEGER primary key + +There is an important detail you must decide upfront: Internal or External Primary key? + +External primary key refers to the idea that you designate primary keys outside of Postgres. +Let’s think this through: + +### Internal primary key: SERIAL type + +When you only need a primary key as an index, you just use the SERIAL and Postgres gives your data an automatically +incrementing integer primary key. You usually can return the primary key value after inserting so if you need to know +the key, you do get it after insert. + +### External primary key: INTEGER type + +In case you need to know the specific value of the primary key and you need to know it before inserting the data, you +have to assign unique primary keys before inserting data and you have to use the INTEGE type (or any of the many integer +variations) to convey to Postgres that you set the primary key yourself. Notice, you still have to set the NOT NULL and +PRIMARY KEY attribute to ensure data consistency. + +In case of the microservice database, I am afraid, you have to use external primary keys because, remember, a service +depends on other services. In order to insert a service that depends on any other service, regardless of whether it has +already been inserted or not, you have to know the service ID upfront. With that out of the way, let’s define the +service table. + +## Postgres array with a custom type + +To define the service table, the add the following to your up.sql file + +```sql +CREATE TABLE smdb.service( + "service_id" INTEGER NOT NULL PRIMARY KEY, + "name" Text NOT NULL, + "version" INTEGER NOT NULL, + "online" BOOLEAN NOT NULL, + "description" Text NOT NULL, + "health_check_uri" Text NOT NULL, + "base_uri" Text NOT NULL, + "dependencies" INTEGER[] NOT NULL, + "endpoints" smdb.service_endpoint[] NOT NULL +); +``` + +And add the matching drop statement to your down.sql file: + +```sql +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS smdb.service; +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP schema IF EXISTS smdb; +``` + +A few notes on the service table: + +* Notice the schema prefix appears in both, the table name and in referenced types +* The array type follows the usual convention type[] +* The NOT NULL attribute means that the array itself must be set; Values inside the array still might be null. + +The Diesel team decided that array values are nullable by default because there so many things that may go wrong when +dealing with array values thus making them nullable by default prevents a few potential crashes. Conversely, it also +means you have to unwrap option types whenever you access data deserialized from an array. + +In total, the up.sql looks as below: + +```sql +-- Your SQL goes here +CREATE SCHEMA IF NOT EXISTS smdb; + +CREATE TYPE smdb.protocol_type AS ENUM ( + 'UnknownProtocol', + 'GRPC', + 'HTTP', + 'UDP' +); + +CREATE TYPE smdb.service_endpoint AS ( + "name" Text, + "version" INTEGER, + "base_uri" Text, + "port" INTEGER, + "protocol" smdb.protocol_type +); + +CREATE TABLE smdb.service( + "service_id" INTEGER NOT NULL PRIMARY KEY, + "name" Text NOT NULL, + "version" INTEGER NOT NULL, + "online" BOOLEAN NOT NULL, + "description" Text NOT NULL, + "health_check_uri" Text NOT NULL, + "base_uri" Text NOT NULL, + "dependencies" INTEGER[] NOT NULL, + "endpoints" smdb.service_endpoint[] NOT NULL +); +``` + +Now, it’s time to run the Diesel migration: + +```bash +diesel migration run +``` + +This generates a schema.rs file in the src root: + +```rust +// @generated automatically by Diesel CLI. + +pub mod smdb { + pub mod sql_types { + #[derive(diesel::query_builder::QueryId, Clone, diesel::sql_types::SqlType)] + #[diesel(postgres_type(name = "service_endpoint", schema = "smdb"))] + pub struct ServiceEndpoint; + } + + diesel::table! { + use diesel::sql_types::*; + use super::sql_types::ServiceEndpoint; + + smdb.service (service_id) { + service_id -> Int4, + name -> Text, + version -> Int4, + online -> Bool, + description -> Text, + health_check_uri -> Text, + base_uri -> Text, + dependencies -> Array>, + endpoints -> Array>, + } + } +} +``` + +Notice, the Postgres types are stored in Postgres therefore you are not seeing them in the generated schema. Only tables +will show up in the generates schema. Furthermore, you will need a wrapper struct to map from Rust to the Postgres type. +For the ServiceEndpoint, Diesel already generated a matching wrapper struct with the correct annotations. The service +table then uses that wrapper struct “ServiceEndpoint” instead of the native Postgres type to reference the custom type +in the endpoints array. + +You want to attach the generated schema.rs to your lib file to access it from within your crate and stop your IDE from +showing related warnings + +Next, you want to create a folder model with a mod file in it attached to your lib file. This is your place to store all +Rust type implementations matching all the generated Postgres types and tables. + +Lastly, you also want to add a type alias for a pooled connection in the lib file because that connection definition is +a long and convoluted type in need of a shorthand. + +At this point, your lib file looks as shown below: + +```rust +mod schema; +pub mod model; + +pub type Connection = diesel::r2d2::PooledConnection>; +``` + +## Rust Types + +In total, you implement three Rust types: + +* Enum: ProtocolType +* Struct: Endpoint +* Struct: Service + +### Rust Enum: ProtocolType + +Diesel needs a wrapper struct to store a custom Enum in Postgres, so let’s add one: + +```rust +#[derive(SqlType)] +#[diesel(sql_type = protocol_type)] +#[diesel(postgres_type(name = "protocol_type"))] +pub struct PgProtocolType; +``` + +It is important that you add both, sql_type and postgres_type, otherwise insert will fail. + +Next, let’s define the actual Enum. Bear in mind to use the wrapper struct as sql_type on the Enum: + +```rust +#[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] +#[diesel(sql_type = PgProtocolType)] +pub enum ProtocolType { + UnknownProtocol, + GRPC, + HTTP, + UDP, +} +``` + +It’s worth pointing out that you are dealing with three types in total: + +1) protocol_type: The Postgres Enum type +2) PGProtocolType: The Wrapper struct that refers to the Postgres Enum type +3) ProtocolType: The Rust Enum that refers to the wrapper struct PGProtocolType + +Mixing any of those three types will result in a complicated Diesel trait error. However, these error messages are just +a convoluted way to say that the database type mismatches the Rust type. When you encounter a consulted trait error +message, make sure to check: + +1) Do I have a wrapper struct? +2) Does my a wrapper struct derives SqlType? +3) Does my wrapper type has both, sql_type and postgres_type declared? +4) Are sql_type and postgres_type both refering to the correct Postgres type? +5) Does my Rust type refers to the wrapper struct type? + +If all those checks pass and you still see errors, it’s most likely a serialization error. + +To serialize and deserialize a custom Enum, you write a custom toSql and fromSql implementation. Luckily, this is +straightforward. + +```rust + +impl ToSql for ProtocolType { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + match *self { + ProtocolType::UnknownProtocol => out.write_all(b"UnknownProtocol")?, + ProtocolType::GRPC => out.write_all(b"GRPC")?, + ProtocolType::HTTP => out.write_all(b"HTTP")?, + ProtocolType::UDP => out.write_all(b"UDP")?, + } + Ok(IsNull::No) + } +} + +impl FromSql for ProtocolType { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + match bytes.as_bytes() { + b"UnknownProtocol" => Ok(ProtocolType::UnknownProtocol), + b"GRPC" => Ok(ProtocolType::GRPC), + b"HTTP" => Ok(ProtocolType::HTTP), + b"UDP" => Ok(ProtocolType::UDP), + _ => Ok(ProtocolType::UnknownProtocol), + } + } +} +``` + +In toSql, it is important to end the implementation with Ok(IsNull::No). In the from_sql, it is important to add a catch +all case that returns an UnknownProtocol instance. In practice, the catch all rarely ever gets triggered, but in those +few corner cases when it does, it keeps the application running. + +### Rust Struct Endpoint + +```rust +#[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] +#[diesel(sql_type=crate::schema::smdb::sql_types::ServiceEndpoint)] +pub struct Endpoint { + pub name: String, + pub version: i32, + pub base_uri: String, + pub port: i32, + pub protocol: ProtocolType, +} +``` + +It is worth mentioning is that the sql_type refers to the wrapper struct generated by Diesel and stored in the schema.rs +file. Keep this in mind if you ever refactor the schema.rs file into a different folder because the macro annotation +checks the path during compilation and throws an error if it cannot find the type in the provided path. The serialize +and deserialize implementation of a custom type is not as obvious as the Enum because, internally, Postgres represent a +custom type as an anonymous typed tuple. Therefore, you have to map a struct to a typed tuple and back. + +Let’s start with the toSql implementation to store the Rust Endpoint struct as typed tuple. Luckily, Diesel provides a +helper util to do just that. + +```rust +impl ToSql for Endpoint { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( + &( + self.name.to_owned(), + self.version.to_owned(), + self.base_uri.to_owned(), + self.port.to_owned(), + self.protocol.to_owned(), + ), + &mut out.reborrow(), + ) + } +} +``` + +I cannot stress enough that it is paramount that the tuple type signature must match exactly the Postgres type signature defined in your up.sql. Ideally, you want to use the split view function in your IDE to have the up.sql +in one pane and the the toSql implementation in another pane, both side by side, to double check +that the number and types match. +If the type or number of types mismatch, you will get a compiler error telling you that somehow either +the number of fields don’t match or that the type of the fields don’t match. +Also, because the write_tuple expects values, you have to call either to_owned() or clone() on any referenced data. + +The from_sql reverses the process by converting a Postgres typed tuple back into a Rust struct. +Again, Diesel provides a convenient helper to do so: + +```rust +impl FromSql for Endpoint { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + let (name, version, base_uri, port, protocol) = + FromSql::, Pg>::from_sql(bytes)?; + + Ok(Endpoint { + name, + version, + base_uri, + port, + protocol, + }) + } +} +``` + +Similar to the serialization process, it is paramount that the type annotation match exactly the one used in toSql and +the type definition in up.sql. + +Debugging serialization issues in Diesel is relatively straight forward since the compiler usually points out the +location of the issue and, often, the issue is a type mismatch that is relatively easy to fix. + +It’s worth repeating that you are dealing with three types in total: + +1) service_endpoint: The Postgres custom type +2) ServiceEndpoint: The wrapper struct generated by Diesel that refers to the Postgres type service_endpoint. Also, only + the wrapper struct carries the postgres_type annotation in addition to the an sql_type annotation. +3) Endpoint: The Rust struct with an sql_type annotation refering to the wrapper struct ServiceEndpoint + +Make sure all of those types match correctly. + +### Rust Struct Service + +The service struct gets its serialization and deserialization implementation generated by Diesel so that saves some +typing. +On the other hand, it is a good practice to implement database operations on the actual type itself. +The wisdom here is twofold. For once, you cannot separate the database operation from the type, +therefore it makes sense to implement the operations on the type itself. This also hides implementation details +in encapsulation, +as [discussed in the book](https://doc.rust-lang.org/book/ch17-01-what-is-oo.html#encapsulation-that-hides-implementation-details). + +Second, you gain a single point of maintenance because, in case your table definition changes, you have to update the +Rust type anyways and because Diesel is all statically checked, the compiler will immediately point out where your +operations need to be corrected to match the new type definition. + +The requirements stated we want CRUD operations, and type-based programming suggest creating a different type per +operation. Because we use external primary keys, we don’t need an additional type for the delete operation, therefore, +in total we crate 3 service types: + +1) Service: For Read +2) CreateService: For create operations +3) UpdateServce: For update operations + +The Service and CreateService have identical fields. Also, both types require a primary key annotation in addition to +the table name. The UpdateServce, however, has each field wrapped into an option type. When the option type is Some, +Diesel knows that this field needs updating. If the field is set to None, Diesel ignores it. + +```rust +#[derive(Debug, Clone, Queryable, Selectable)] +#[diesel(table_name= crate::schema::smdb::service, primary_key(service_id))] +pub struct Service { + pub service_id: i32, + pub name: String, + pub version: i32, + pub online: bool, + pub description: String, + pub health_check_uri: String, + pub base_uri: String, + pub dependencies: Vec>, + pub endpoints: Vec>, +} + +#[derive(Debug, Clone, Queryable, Insertable)] +#[diesel(table_name= crate::schema::smdb::service, primary_key(service_id))] +pub struct CreateService { + pub service_id: i32, + // ... skipped + pub endpoints: Vec>, +} + +#[derive(Debug, Clone, Queryable, Insertable, AsChangeset)] +#[diesel(table_name= crate::schema::smdb::service)] +pub struct UpdateService { + pub name: Option, + pub version: Option, + pub online: Option, + pub description: Option, + pub health_check_uri: Option, + pub base_uri: Option, + pub dependencies: Option>>, + pub endpoints: Option>>, +} +``` + +Next, let’s implement the CRUD operations on the service type. +Remember the handy connection type alias defined earlier? +This service implementation is the place to use it. + +### **Create** + +```rust +impl Service { + pub fn create(db: &mut Connection, item: &CreateService) -> QueryResult { + insert_into(crate::schema::smdb::service::table) + .values(item) + .get_result::(db) + } + +} +``` + +The insert into function needs the table as target and the value to insert. It is a common convention to return the +inserted value so that the callsite can verify that the insert completed correctly. + +### **Read** + +```rust + pub fn read(db: &mut Connection, param_service_id: i32) -> QueryResult { + service + .filter(service_id.eq(param_service_id)) + .first::(db) + } +``` + +Each service has one unique ID and therefore querying for a service ID returns either exactly one result or an error. +That means, the read operation completes with capturing and returning the first result. In case you have data that may +return multiple entries for a search key, you must change the return type of the read method to QueryResult> +and use load instead of first, just as it shown in the read all method. + +### **Read All** + +```rust + pub fn read_all(db: &mut Connection) -> QueryResult> { + service.load::(db) + } +``` + +Here, we just load everything from the service table and return the entire result as a vector. Note, it is expected that +the service table only has relatively few entries. For tables that hold a large number of data, you can implement +pagination, add limit clause, or some kind of filtering to reduce the number of returned values. + +### **Update** + +```rust + pub fn update( + db: &mut Connection, + param_service_id: i32, + item: &UpdateService, + ) -> QueryResult { + diesel::update(service.filter(service_id.eq(param_service_id))) + .set(item) + .returning(Service::as_returning()) + .get_result(db) + } +``` + +The update method, similar to insert, requires the target table and update values as argument and returns the updated +service as result. Notice, the parameter is of type UpdateService to ensure compiler verification. + +### **Delete** + +```rust + pub fn delete(db: &mut Connection, param_service_id: i32) -> QueryResult { + diesel::delete(service.filter(service_id.eq(param_service_id))).execute(db) + } +``` + +Delete is a standard function provided by Diesel. Notice, the filter searched over the primary key to ensure only one +service with the matching ID gets deleted. However, if you were to filter over a non-unique attribute, you may end up +deleting more data than you though you would. In that case, always run the corresponding SQL query in a SQL console to +double check that your filter criterion returns exactly what you want it to return. + +With the CRUD methods implemented, it’s time to look at the more interesting part of the requirements. + +## Additional methods + +“Unmentioned in the requirements, a service depends on other services, and it would be great to test, before deploying a +new service, if all of its dependencies are online.” + +Here, we have to implement a method to set a service online and another one to set it offline. By experience, a few +non-trivial errors are caused by incorrectly set Boolean flags and whenever you can, hide Boolean flags in your public +API and use a dedicated method instead to the set them. + +Next, we need a method to return true if a service is online. At this point, we also want to add a method that checks if +a service actually is in the database. Imagine, you want to test if you can deploy a new service with, say 5 +dependencies, and somehow the database returns no, you can’t. The obvious question is why? There might be some services +that are offline, fair enough, just start them, but it might also be possible that somehow a service isn’t currently in +the database and that is where you need another check method. + +Then, we want another method that takes a collection of service ID’s, checks all of them and returns either true if all +services are online, or returns an error message that states which service is offline. + +And because we are already here, let’s add another method that returns all services that are online and another one that +returns all services that are currently offline. The latter is handy to quickly identify any deployment blocker. + +### Set Service Online / Offline + +Let’s start with a private helper method that sets the Boolean online flag on a service in the database. + +```rust + fn set_svc_online( + db: &mut Connection, + param_service_id: i32, + param_online: bool, + ) -> QueryResult<()> { + match diesel::update(service.filter(service_id.eq(param_service_id))) + .set(online.eq(param_online)) + .returning(Service::as_returning()) + .get_result(db) + { + Ok(_) => Ok(()), + Err(e) => Err(e), + } + } +``` + +Here the generated DSL really shines as you can easily select a field and set it to a new value. Notice, the return type +is void in case of success. Realize, if you were to return the updated Boolean flag, you would cause a non-trivial +ambiguity on the call-site. For setting the Boolean flag true, the result would be true, which is correct, but there is +also a risk of the return value being interpreted as the operation having succeeded. And that matters, because if you +set the flag to false, the same method return false, which is also correct, but could be mis-understood as the operation +didn’t completed because false was returned. A good API is mostly free of ambiguity and therefore, therefore you just +return Ok(()) in case of success because there is no way to misunderstand a returned ok. + +Now, let’s add two public wrapper methods that set the flag to either true or false. + +```rust + pub fn set_service_online(db: &mut Connection, param_service_id: i32) -> QueryResult<()> { + Self::set_svc_online(db, param_service_id, true) + } + + pub fn set_service_offline(db: &mut Connection, param_service_id: i32) -> QueryResult<()> { + Self::set_svc_online(db, param_service_id, false) + } +``` + +### Check Service Exists + +Next up, we add a method to check if service is in the database and the easiest way to implement this is to test if find +returns a result or error. + +```rust + pub fn check_if_service_id_exists( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult { + match service.find(param_service_id).first::(db) { + Ok(_) => Ok(true), + Err(_) => Ok(false), + } + } +``` + +### Check Service Online + +To check if a service is online, we again lean on the generated DSL: + +```rust + pub fn check_if_service_id_exists( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult { + match service.find(param_service_id).first::(db) { + Ok(_) => Ok(true), + Err(_) => Ok(false), + } + } +``` + +### Get Service Dependencies + +Notice, if we select dependencies instead of online, we return all the dependencies of a service. Let’s add another +method to do just that: + +```rust + pub fn get_all_service_dependencies( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult>> { + service + .filter(service_id.eq(param_service_id)) + .select(dependencies) + .first::>>(db) + } +``` + +### Get Service Endpoints + +And likewise, if we select endpoints, we get all API endpoints of a service: + +```rust + pub fn get_all_service_endpoints( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult>> { + service + .filter(service_id.eq(param_service_id)) + .select(endpoints) + .first::>>(db) + } +``` + +As an observation, when we replace the filter criterion with a test if a service is online, we get all services with +online set to true and that is very easy to implement: + +### Get All Online Services + +```rust + pub fn get_all_online_services(db: &mut Connection) -> QueryResult> { + service + .filter(online.eq(true)) + .select(Service::as_returning()) + .load::(db) + } +``` + +### Get All Offline Services + +Likewise, we use the same filter to +find and return all services with online set to false. + +```rust + pub fn get_all_offline_services(db: &mut Connection) -> QueryResult> { + service + .filter(online.eq(false)) + .select(Service::as_returning()) + .load::(db) + } +``` + +### Check All Serices Online + +Finally, we can implement the last method that takes a collection of service ID’s, checks all of them, and returns +either true, or the service ID that is offline. + +```rust + pub fn check_all_services_online( + db: &mut Connection, + services: &[i32], + ) -> QueryResult<(bool, Option)> { + for id in services { + match Service::check_if_service_id_online(db, *id) { + Ok(res) => { + if !res { + return Ok((false, Some(format!("Service {} is offline", id)))); + } + } + Err(e) => return Err(e), + }; + } + + Ok((true, None)) + } +``` + +Here, it is a matter of taste to return a Boolean flag with an optional string or to encode the case of an offline +service as a custom error. The rational for the Boolean flag is that any service might be offline for any reason so that +doesn’t count as a database error whereas if the query would fail, that would amount to a database error. That said, the +decision depends largely on the design requirements and if you already have custom errors defined, then adding a +ServiceDependencyOfflineError variant should be straight forward. + +At this point, the Service type implementation is complete. I have skipped the testing procedure, but in a nutshell, you +just create a util that connects to a running Postgres server and then returns a connection to run all tests. You find +all tests in the test folder. + +## Applied Best Practices + +Implementing the database operations on the Service type is simple, straight forward, +and follows three best practices: + +1) Take the database connection as a parameter +2) Prefer the generated DSL over custom queries whenever possible +3) Add type annotations on database return types + +### Database connection as a parameter + +The first one refers to the idea that no type should hold application state, and a database connection definitely counts as application state. Instead, you would write a database connection manager +that manages a Postgres connection pool, and then calls into the database methods implemented +in the service type giving it a pooled connection as parameter. + + +### Prefer the generated DSL over custom queries + +The second one refers to the idea that non-complex queries are usually easier and more composable expressed in the +generated DSL than in custom SQL queries and therefore the DSL is preferred in that case. However, if you have to write +complex transactions, there is a chance the DSL isn’t cutting it, so in that scenario you may end up writing custom SQL. +That said, the DSL gets you a long way before you are writing custom SQL. + +### Add type annotations + +The last one, type annotation on database return types, isn’t that obvious because Rust can infer the database return +type from the method return type, so why bother? Why would you write get_result::(db) instead of just +get_result(db) you may wonder? Here is the big secret, type inference in Rust needs occasionally a hint. When you +annotated the database return type and then wants to daisy chain another operation via map or flatmap, without the type +annotation you get a compile error. If you have the type annotation in place, you can add more operations whenever you +want and things compile right away. +To illustrate the last point, the count operator in Postgres only returns an i64 integer. Suppose your application +expects an u64 from the persistence API. Wouldn’t it be nice to make the conversion on the fly? + +Let’s take a closer look. Conventionally, you write the count operation like so: + +```rust + pub fn count(db: &mut Connection) -> QueryResult { + service.count().get_result::(db) + } +``` + +Notice, the get result has a type annotation for i64. If you remove it, the code still compiles. But leave it there for +a moment. Let’s add a map operation that takes an i64 returned from the count query and converts it into an u64. Notice, +we have to update the return type of the method as well. + +```rust + pub fn count_u64(db: &mut Connection) -> QueryResult { + service.count() + .get_result::(db) + .map(|c| c as u64) + } +``` + +Run the compile and see that it works. Now, if you were to remove the type annotation from get_result, you get a +compile error saying that trait FromSql is not implemented. + +If you write a database layer for an existing system, this technique comes in handy as you can seamlessly convert +between Diesel DB types and your target types while leaving your target types as it. And because you never know when you have to do this, it’s generally recommended to add type annotations to DB return types. + diff --git a/examples/postgres/custom_arrays/diesel.toml b/examples/postgres/custom_arrays/diesel.toml new file mode 100644 index 000000000000..334408479f8f --- /dev/null +++ b/examples/postgres/custom_arrays/diesel.toml @@ -0,0 +1,11 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/schema.rs" +custom_type_derives = ["diesel::query_builder::QueryId", "Clone"] + +schema = "smdb" + +[migrations_directory] +dir = "migrations" \ No newline at end of file diff --git a/examples/postgres/custom_arrays/migrations/.keep b/examples/postgres/custom_arrays/migrations/.keep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/down.sql b/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 000000000000..a9f526091194 --- /dev/null +++ b/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/up.sql b/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 000000000000..d68895b1a7b7 --- /dev/null +++ b/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql new file mode 100644 index 000000000000..2068b001f400 --- /dev/null +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql @@ -0,0 +1,5 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS smdb.service; +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP schema IF EXISTS smdb; \ No newline at end of file diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql new file mode 100644 index 000000000000..9a0c021b7c1a --- /dev/null +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql @@ -0,0 +1,29 @@ +-- Your SQL goes here +CREATE SCHEMA IF NOT EXISTS smdb; + +CREATE TYPE smdb.protocol_type AS ENUM ( + 'UnknownProtocol', + 'GRPC', + 'HTTP', + 'UDP' +); + +CREATE TYPE smdb.service_endpoint AS ( + "name" Text, + "version" INTEGER, + "base_uri" Text, + "port" INTEGER, + "protocol" smdb.protocol_type +); + +CREATE TABLE smdb.service( + "service_id" INTEGER NOT NULL PRIMARY KEY, + "name" Text NOT NULL, + "version" INTEGER NOT NULL, + "online" BOOLEAN NOT NULL, + "description" Text NOT NULL, + "health_check_uri" Text NOT NULL, + "base_uri" Text NOT NULL, + "dependencies" INTEGER[] NOT NULL, + "endpoints" smdb.service_endpoint[] NOT NULL +); \ No newline at end of file diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs new file mode 100644 index 000000000000..5845dc0f98fa --- /dev/null +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -0,0 +1,5 @@ +pub mod model; +mod schema; + +pub type Connection = + diesel::r2d2::PooledConnection>; diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs new file mode 100644 index 000000000000..066d3227f78f --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs @@ -0,0 +1,37 @@ +use crate::model::endpoint_type::Endpoint; +use crate::model::protocol_type::PgProtocolType; +use diesel::deserialize::FromSql; +use diesel::pg::{Pg, PgValue}; +use diesel::serialize::{Output, ToSql}; +use diesel::sql_types::{Integer, Record, Text}; +use diesel::{deserialize, serialize}; + +impl ToSql for Endpoint { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( + &( + self.name.to_owned(), + self.version.to_owned(), + self.base_uri.to_owned(), + self.port.to_owned(), + self.protocol.to_owned(), + ), + &mut out.reborrow(), + ) + } +} + +impl FromSql for Endpoint { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + let (name, version, base_uri, port, protocol) = + FromSql::, Pg>::from_sql(bytes)?; + + Ok(Endpoint { + name, + version, + base_uri, + port, + protocol, + }) + } +} diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs new file mode 100644 index 000000000000..09c1b41cbab9 --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs @@ -0,0 +1,33 @@ +use crate::model::protocol_type::ProtocolType; +use diesel::deserialize::FromSqlRow; +use diesel::expression::AsExpression; + +mod endpoint_type_impl; + +#[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] +#[diesel(sql_type=crate::schema::smdb::sql_types::ServiceEndpoint)] +pub struct Endpoint { + pub name: String, + pub version: i32, + pub base_uri: String, + pub port: i32, + pub protocol: ProtocolType, +} + +impl Endpoint { + pub fn new( + name: String, + version: i32, + base_uri: String, + port: i32, + protocol: ProtocolType, + ) -> Self { + Self { + name, + version, + base_uri, + port, + protocol, + } + } +} diff --git a/examples/postgres/custom_arrays/src/model/mod.rs b/examples/postgres/custom_arrays/src/model/mod.rs new file mode 100644 index 000000000000..c9cf7fa59aca --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/mod.rs @@ -0,0 +1,7 @@ +pub mod endpoint_type; +pub mod service; + +pub mod protocol_type; + +pub type Connection = + diesel::r2d2::PooledConnection>; diff --git a/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs b/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs new file mode 100644 index 000000000000..5af43e599b65 --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs @@ -0,0 +1,47 @@ +use diesel::deserialize::{FromSql, FromSqlRow}; +use diesel::expression::AsExpression; +use diesel::pg::{Pg, PgValue}; +use diesel::serialize::{IsNull, Output, ToSql}; +use diesel::sql_types::SqlType; +use diesel::{deserialize, serialize}; +use std::io::Write; + +// Diesel type mapping requires a struct to derive a SqlType for custom ToSql and FromSql implementations +#[derive(SqlType)] +#[diesel(sql_type = protocol_type)] +#[diesel(postgres_type(name = "protocol_type"))] +pub struct PgProtocolType; + +#[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] +#[diesel(sql_type = PgProtocolType)] +#[allow(clippy::upper_case_acronyms)] +pub enum ProtocolType { + UnknownProtocol, + GRPC, + HTTP, + UDP, +} + +impl ToSql for ProtocolType { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + match *self { + ProtocolType::UnknownProtocol => out.write_all(b"UnknownProtocol")?, + ProtocolType::GRPC => out.write_all(b"GRPC")?, + ProtocolType::HTTP => out.write_all(b"HTTP")?, + ProtocolType::UDP => out.write_all(b"UDP")?, + } + Ok(IsNull::No) + } +} + +impl FromSql for ProtocolType { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + match bytes.as_bytes() { + b"UnknownProtocol" => Ok(ProtocolType::UnknownProtocol), + b"GRPC" => Ok(ProtocolType::GRPC), + b"HTTP" => Ok(ProtocolType::HTTP), + b"UDP" => Ok(ProtocolType::UDP), + _ => Ok(ProtocolType::UnknownProtocol), + } + } +} diff --git a/examples/postgres/custom_arrays/src/model/service/mod.rs b/examples/postgres/custom_arrays/src/model/service/mod.rs new file mode 100644 index 000000000000..d58cca7b6040 --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/service/mod.rs @@ -0,0 +1,97 @@ +use crate::model::endpoint_type::Endpoint; +use diesel::{AsChangeset, Insertable, Queryable, Selectable}; + +mod service_impl; + +#[derive(Debug, Clone, Queryable, Selectable)] +#[diesel(table_name= crate::schema::smdb::service, primary_key(service_id))] +pub struct Service { + pub service_id: i32, + pub name: String, + pub version: i32, + pub online: bool, + pub description: String, + pub health_check_uri: String, + pub base_uri: String, + pub dependencies: Vec>, + pub endpoints: Vec>, +} + +#[derive(Debug, Clone, Queryable, Insertable)] +#[diesel(table_name= crate::schema::smdb::service, primary_key(service_id))] +pub struct CreateService { + pub service_id: i32, + pub name: String, + pub version: i32, + pub online: bool, + pub description: String, + pub health_check_uri: String, + pub base_uri: String, + pub dependencies: Vec>, + pub endpoints: Vec>, +} + +impl CreateService { + #[allow(clippy::too_many_arguments)] + pub fn new( + service_id: i32, + name: String, + version: i32, + online: bool, + description: String, + health_check_uri: String, + base_uri: String, + dependencies: Vec>, + endpoints: Vec>, + ) -> Self { + Self { + service_id, + name, + version, + online, + description, + health_check_uri, + base_uri, + dependencies, + endpoints, + } + } +} + +#[derive(Debug, Clone, Queryable, Insertable, AsChangeset)] +#[diesel(table_name= crate::schema::smdb::service)] +pub struct UpdateService { + pub name: Option, + pub version: Option, + pub online: Option, + pub description: Option, + pub health_check_uri: Option, + pub base_uri: Option, + pub dependencies: Option>>, + pub endpoints: Option>>, +} + +impl UpdateService { + #[allow(clippy::too_many_arguments)] + pub fn new( + name: Option, + version: Option, + online: Option, + description: Option, + health_check_uri: Option, + base_uri: Option, + dependencies: Option>>, + endpoints: Option>>, + ) -> Self { + Self { + name, + version, + online, + description, + health_check_uri, + base_uri, + dependencies, + endpoints, + } + } +} diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs new file mode 100644 index 000000000000..e2137702581e --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -0,0 +1,147 @@ +use crate::model::endpoint_type::Endpoint; +use crate::model::service::{CreateService, Service, UpdateService}; +use crate::model::Connection; +use crate::schema::smdb::service::dsl::*; +use diesel::{ + insert_into, ExpressionMethods, QueryDsl, QueryResult, RunQueryDsl, SelectableHelper, +}; + +impl Service { + pub fn create(db: &mut Connection, item: &CreateService) -> QueryResult { + insert_into(crate::schema::smdb::service::table) + .values(item) + .get_result::(db) + } + + pub fn count(db: &mut Connection) -> QueryResult { + service.count().get_result::(db) + } + + pub fn count_u64(db: &mut Connection) -> QueryResult { + service.count().get_result::(db).map(|c| c as u64) + } + + pub fn check_if_service_id_exists( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult { + match service.find(param_service_id).first::(db) { + Ok(_) => Ok(true), + Err(_) => Ok(false), + } + } + + pub fn check_all_services_online( + db: &mut Connection, + services: &[i32], + ) -> QueryResult<(bool, Option)> { + for id in services { + match Service::check_if_service_id_online(db, *id) { + Ok(res) => { + if !res { + return Ok((false, Some(format!("Service {} is offline", id)))); + } + } + Err(e) => return Err(e), + }; + } + + Ok((true, None)) + } + + pub fn check_if_service_id_online( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult { + match service + .filter(service_id.eq(param_service_id)) + .select(online) + .first::(db) + { + Ok(res) => Ok(res), + Err(e) => Err(e), + } + } + + pub fn get_all_online_services(db: &mut Connection) -> QueryResult> { + service + .filter(online.eq(true)) + .select(Service::as_returning()) + .load::(db) + } + + pub fn get_all_offline_services(db: &mut Connection) -> QueryResult> { + service + .filter(online.eq(false)) + .select(Service::as_returning()) + .load::(db) + } + + pub fn get_all_service_dependencies( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult>> { + service + .filter(service_id.eq(param_service_id)) + .select(dependencies) + .first::>>(db) + } + + pub fn get_all_service_endpoints( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult>> { + service + .filter(service_id.eq(param_service_id)) + .select(endpoints) + .first::>>(db) + } + + pub fn read(db: &mut Connection, param_service_id: i32) -> QueryResult { + service + .filter(service_id.eq(param_service_id)) + .first::(db) + } + + pub fn read_all(db: &mut Connection) -> QueryResult> { + service.load::(db) + } + + pub fn set_service_online(db: &mut Connection, param_service_id: i32) -> QueryResult<()> { + Self::set_svc_online(db, param_service_id, true) + } + + pub fn set_service_offline(db: &mut Connection, param_service_id: i32) -> QueryResult<()> { + Self::set_svc_online(db, param_service_id, false) + } + + fn set_svc_online( + db: &mut Connection, + param_service_id: i32, + param_online: bool, + ) -> QueryResult<()> { + match diesel::update(service.filter(service_id.eq(param_service_id))) + .set(online.eq(param_online)) + .returning(Service::as_returning()) + .get_result(db) + { + Ok(_) => Ok(()), + Err(e) => Err(e), + } + } + + pub fn update( + db: &mut Connection, + param_service_id: i32, + item: &UpdateService, + ) -> QueryResult { + diesel::update(service.filter(service_id.eq(param_service_id))) + .set(item) + .returning(Service::as_returning()) + .get_result(db) + } + + pub fn delete(db: &mut Connection, param_service_id: i32) -> QueryResult { + diesel::delete(service.filter(service_id.eq(param_service_id))).execute(db) + } +} diff --git a/examples/postgres/custom_arrays/src/schema.rs b/examples/postgres/custom_arrays/src/schema.rs new file mode 100644 index 000000000000..91378b5ced2e --- /dev/null +++ b/examples/postgres/custom_arrays/src/schema.rs @@ -0,0 +1,26 @@ +// @generated automatically by Diesel CLI. + +pub mod smdb { + pub mod sql_types { + #[derive(diesel::query_builder::QueryId, Clone, diesel::sql_types::SqlType)] + #[diesel(postgres_type(name = "service_endpoint", schema = "smdb"))] + pub struct ServiceEndpoint; + } + + diesel::table! { + use diesel::sql_types::*; + use super::sql_types::ServiceEndpoint; + + smdb.service (service_id) { + service_id -> Int4, + name -> Text, + version -> Int4, + online -> Bool, + description -> Text, + health_check_uri -> Text, + base_uri -> Text, + dependencies -> Array>, + endpoints -> Array>, + } + } +} diff --git a/examples/postgres/custom_arrays/tests/mod.rs b/examples/postgres/custom_arrays/tests/mod.rs new file mode 100644 index 000000000000..67ae88e7ba49 --- /dev/null +++ b/examples/postgres/custom_arrays/tests/mod.rs @@ -0,0 +1,2 @@ +#[cfg(test)] +mod service_tests; diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs new file mode 100644 index 000000000000..9809478a6c63 --- /dev/null +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -0,0 +1,268 @@ +use custom_arrays::model::endpoint_type::Endpoint; +use custom_arrays::model::protocol_type::ProtocolType; +use custom_arrays::model::service; +use custom_arrays::model::service::{CreateService, UpdateService}; +use custom_arrays::Connection; +use diesel::r2d2::{ConnectionManager, Pool}; +use diesel::PgConnection; +use dotenv::dotenv; +use std::env; + +fn postgres_connection_pool() -> Pool> { + dotenv().ok(); + + let database_url = env::var("DATABASE_URL") + .or_else(|_| env::var("POSTGRES_DATABASE_URL")) + .expect("DATABASE_URL must be set"); + + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Could not build connection pool") +} + +#[test] +fn test_service() { + let pool = postgres_connection_pool(); + let conn = &mut pool.get().unwrap(); + + println!("Test create!"); + test_create_service(conn); + + println!("Test count!"); + test_count_service(conn); + + println!("Test check if exists!"); + test_check_if_service_id_exists(conn); + + println!("Test check if online!"); + test_check_if_service_id_online(conn); + + println!("Test get all online services!"); + test_get_all_online_services(conn); + + println!("Test get all offline services!"); + test_get_all_offline_services(conn); + + println!("Test get all service dependencies!"); + test_get_all_service_dependencies(conn); + + println!("Test get all service endpoints!"); + test_get_all_service_endpoints(conn); + + println!("Test read!"); + test_service_read(conn); + + println!("Test read_all!"); + test_service_read_all(conn); + + println!("Test set service online!"); + test_set_service_online(conn); + + println!("Test set service offline!"); + test_set_service_offline(conn); + + println!("Test update service!"); + test_service_update(conn); + + println!("Test delete service!"); + test_service_delete(conn); +} + +fn test_create_service(conn: &mut Connection) { + let grpc_endpoint = Endpoint::new( + "test_grpc_endpoint".to_string(), + 1, + "/grpc".to_string(), + 7070, + ProtocolType::GRPC, + ); + + let http_endpoint = Endpoint::new( + "test_http_endpoint".to_string(), + 1, + "/http".to_string(), + 8080, + ProtocolType::HTTP, + ); + + let endpoints = vec![Some(grpc_endpoint.clone()), Some(http_endpoint.clone())]; + + let dependencies = vec![Some(42)]; + + let service = CreateService { + service_id: 1, + name: "test".to_string(), + version: 1, + online: true, + description: "test".to_string(), + health_check_uri: "http://example.com".to_string(), + base_uri: "http://example.com".to_string(), + dependencies, + endpoints, + }; + + let result = service::Service::create(conn, &service); + + dbg!(&result); + + assert!(result.is_ok()); + + let service = result.unwrap(); + + assert_eq!(service.service_id, 1); + assert_eq!(service.name, "test"); + assert_eq!(service.version, 1); + assert_eq!(service.online, true); + assert_eq!(service.description, "test"); + assert_eq!(service.health_check_uri, "http://example.com"); + assert_eq!(service.base_uri, "http://example.com"); + assert_eq!(service.dependencies, vec![Some(42)]); + assert_eq!( + service.endpoints, + vec![Some(grpc_endpoint.clone()), Some(http_endpoint.clone())] + ); +} + +fn test_count_service(conn: &mut Connection) { + let result = service::Service::count(conn); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 1); +} + +fn test_check_if_service_id_exists(conn: &mut Connection) { + let result = service::Service::check_if_service_id_exists(conn, 1); + assert!(result.is_ok()); + assert!(result.unwrap()); +} + +fn test_check_if_service_id_online(conn: &mut Connection) { + let result = service::Service::check_if_service_id_online(conn, 1); + assert!(result.is_ok()); + assert!(result.unwrap()); +} + +fn test_get_all_online_services(conn: &mut Connection) { + let result = service::Service::get_all_online_services(conn); + assert!(result.is_ok()); + assert!(result.unwrap().len() > 0); +} + +fn test_get_all_offline_services(conn: &mut Connection) { + let result = service::Service::get_all_offline_services(conn); + assert!(result.is_ok()); + assert_eq!(result.unwrap().len(), 0); +} + +fn test_get_all_service_dependencies(conn: &mut Connection) { + let service_id = 1; + + let result = service::Service::get_all_service_dependencies(conn, service_id); + assert!(result.is_ok()); + assert_eq!(result.unwrap().len(), 1); +} + +fn test_get_all_service_endpoints(conn: &mut Connection) { + let service_id = 1; + + let result = service::Service::get_all_service_endpoints(conn, service_id); + assert!(result.is_ok()); + assert_eq!(result.unwrap().len(), 2); +} + +fn test_service_read(conn: &mut Connection) { + let service_id = 1; + + let result = service::Service::read(conn, service_id); + assert!(result.is_ok()); + + let service = result.unwrap(); + + assert_eq!(service.service_id, 1); + assert_eq!(service.name, "test"); + assert_eq!(service.version, 1); + assert_eq!(service.online, true); + assert_eq!(service.description, "test"); + assert_eq!(service.health_check_uri, "http://example.com"); + assert_eq!(service.base_uri, "http://example.com"); + assert_eq!(service.dependencies, vec![Some(42)]); +} + +fn test_service_read_all(conn: &mut Connection) { + let result = service::Service::read_all(conn); + assert!(result.is_ok()); + + let services = result.unwrap(); + assert!(services.len() > 0); +} + +fn test_set_service_online(conn: &mut Connection) { + let service_id = 1; + + let result = service::Service::set_service_online(conn, service_id); + assert!(result.is_ok()); + + let result = service::Service::check_if_service_id_online(conn, service_id); + assert!(result.is_ok()); + assert!(result.unwrap()); +} + +fn test_set_service_offline(conn: &mut Connection) { + let service_id = 1; + + let result = service::Service::set_service_offline(conn, service_id); + assert!(result.is_ok()); + + let result = service::Service::check_if_service_id_online(conn, service_id); + assert!(result.is_ok()); + assert!(!result.unwrap()); +} + +fn test_service_update(conn: &mut Connection) { + // check if service_id exists so we can update the service + let result = service::Service::check_if_service_id_exists(conn, 1); + assert!(result.is_ok()); + assert!(result.unwrap()); + + let update = UpdateService::new( + Some("new_test".to_string()), + Some(2), + Some(true), + None, + None, + None, + None, + None, + ); + + let result = service::Service::update(conn, 1, &update); + assert!(result.is_ok()); + + let service = result.unwrap(); + + assert_eq!(service.service_id, 1); + assert_eq!(service.name, "new_test"); + assert_eq!(service.version, 2); + assert_eq!(service.online, true); + assert_eq!(service.description, "test"); + assert_eq!(service.health_check_uri, "http://example.com"); + assert_eq!(service.base_uri, "http://example.com"); + assert_eq!(service.dependencies.len(), 1); + assert_eq!(service.dependencies, vec![Some(42)]); +} + +fn test_service_delete(conn: &mut Connection) { + let result = service::Service::read(conn, 1); + assert!(result.is_ok()); + + let result = service::Service::delete(conn, 1); + assert!(result.is_ok()); + + let result = service::Service::read(conn, 1); + assert!(result.is_err()); + + let result = service::Service::count(conn); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 0); +} From 233d30b12e93661c79fc5ce47640e6be8b22d29b Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 06:03:52 +0800 Subject: [PATCH 02/73] Fixed a bunch of typos in the custom array example Readme. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 340907276e99..77eaafb49e73 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -15,7 +15,7 @@ project. Your company decided to create a database of all microservices in its system; the high-level requirements are: -* CRUD: Create, read, update, and delete a service in the databse +* CRUD: Create, read, update, and delete a service in the database * List all offline services and all services that are online * Retrieve all API endpoints for a service @@ -369,7 +369,7 @@ message, make sure to check: 1) Do I have a wrapper struct? 2) Does my a wrapper struct derives SqlType? 3) Does my wrapper type has both, sql_type and postgres_type declared? -4) Are sql_type and postgres_type both refering to the correct Postgres type? +4) Are sql_type and postgres_type both referring to the correct Postgres type? 5) Does my Rust type refers to the wrapper struct type? If all those checks pass and you still see errors, it’s most likely a serialization error. @@ -486,7 +486,7 @@ It’s worth repeating that you are dealing with three types in total: 1) service_endpoint: The Postgres custom type 2) ServiceEndpoint: The wrapper struct generated by Diesel that refers to the Postgres type service_endpoint. Also, only the wrapper struct carries the postgres_type annotation in addition to the an sql_type annotation. -3) Endpoint: The Rust struct with an sql_type annotation refering to the wrapper struct ServiceEndpoint +3) Endpoint: The Rust struct with an sql_type annotation referring to the wrapper struct ServiceEndpoint Make sure all of those types match correctly. @@ -510,7 +510,7 @@ in total we crate 3 service types: 1) Service: For Read 2) CreateService: For create operations -3) UpdateServce: For update operations +3) UpdateService: For update operations The Service and CreateService have identical fields. Also, both types require a primary key annotation in addition to the table name. The UpdateServce, however, has each field wrapped into an option type. When the option type is Some, @@ -679,7 +679,7 @@ Here the generated DSL really shines as you can easily select a field and set it is void in case of success. Realize, if you were to return the updated Boolean flag, you would cause a non-trivial ambiguity on the call-site. For setting the Boolean flag true, the result would be true, which is correct, but there is also a risk of the return value being interpreted as the operation having succeeded. And that matters, because if you -set the flag to false, the same method return false, which is also correct, but could be mis-understood as the operation +set the flag to false, the same method return false, which is also correct, but could be misunderstood as the operation didn’t completed because false was returned. A good API is mostly free of ambiguity and therefore, therefore you just return Ok(()) in case of success because there is no way to misunderstand a returned ok. @@ -789,7 +789,7 @@ find and return all services with online set to false. } ``` -### Check All Serices Online +### Check All Services Online Finally, we can implement the last method that takes a collection of service ID’s, checks all of them, and returns either true, or the service ID that is offline. From a4e6985f43a70b23665ee7ff21f5ee8be9886476 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 06:14:35 +0800 Subject: [PATCH 03/73] Updated and edited Readme in the custom array example. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 77eaafb49e73..e6a5165b8578 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -493,27 +493,26 @@ Make sure all of those types match correctly. ### Rust Struct Service The service struct gets its serialization and deserialization implementation generated by Diesel so that saves some -typing. -On the other hand, it is a good practice to implement database operations on the actual type itself. +typing. On the other hand, it is a good practice to implement database operations on the actual type itself. The wisdom here is twofold. For once, you cannot separate the database operation from the type, -therefore it makes sense to implement the operations on the type itself. This also hides implementation details -in encapsulation, +therefore it makes sense to implement the operations on the type itself. +This also hides implementation details in encapsulation, as [discussed in the book](https://doc.rust-lang.org/book/ch17-01-what-is-oo.html#encapsulation-that-hides-implementation-details). Second, you gain a single point of maintenance because, in case your table definition changes, you have to update the -Rust type anyways and because Diesel is all statically checked, the compiler will immediately point out where your +Rust type anyways and because Diesel is statically checked, the compiler will immediately point out where your operations need to be corrected to match the new type definition. The requirements stated we want CRUD operations, and type-based programming suggest creating a different type per operation. Because we use external primary keys, we don’t need an additional type for the delete operation, therefore, -in total we crate 3 service types: +in total, we crate 3 service types: 1) Service: For Read 2) CreateService: For create operations 3) UpdateService: For update operations The Service and CreateService have identical fields. Also, both types require a primary key annotation in addition to -the table name. The UpdateServce, however, has each field wrapped into an option type. When the option type is Some, +the table name. The UpdateService, however, has each field wrapped into an option type. When the option type is Some, Diesel knows that this field needs updating. If the field is set to None, Diesel ignores it. ```rust @@ -635,10 +634,12 @@ With the CRUD methods implemented, it’s time to look at the more interesting p ## Additional methods -“Unmentioned in the requirements, a service depends on other services, and it would be great to test, before deploying a +In the requirement, it was stated that: + +“... a service depends on other services, and it would be great to test before deploying a new service, if all of its dependencies are online.” -Here, we have to implement a method to set a service online and another one to set it offline. By experience, a few +To do so we have to implement a method that sets a service online and another one to set it offline. By experience, a few non-trivial errors are caused by incorrectly set Boolean flags and whenever you can, hide Boolean flags in your public API and use a dedicated method instead to the set them. @@ -651,8 +652,8 @@ the database and that is where you need another check method. Then, we want another method that takes a collection of service ID’s, checks all of them and returns either true if all services are online, or returns an error message that states which service is offline. -And because we are already here, let’s add another method that returns all services that are online and another one that -returns all services that are currently offline. The latter is handy to quickly identify any deployment blocker. +Finally, let’s add another method that returns all services that are online and another one that +returns all services that are currently offline. The latter is handy to quickly identify a deployment blocker. ### Set Service Online / Offline From c8f99381ff70ff909f3daf9355f39da41acedc00 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 06:29:59 +0800 Subject: [PATCH 04/73] Applied multiple lints to tests in custom array example. Added custom array example to GH CI action. Signed-off-by: Marvin Hansen --- .github/workflows/ci.yml | 1 + examples/postgres/custom_arrays/tests/service_tests.rs | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 52768400afb1..783d05eadb0c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -310,6 +310,7 @@ jobs: cargo +stable clippy --tests --manifest-path examples/postgres/all_about_updates/Cargo.toml cargo +stable clippy --tests --manifest-path examples/postgres/custom_types/Cargo.toml cargo +stable clippy --tests --manifest-path examples/postgres/composite_types/Cargo.toml + cargo +stable clippy --tests --manifest-path examples/postgres/custom_arrays/Cargo.toml cargo +stable clippy --tests --manifest-path examples/postgres/relations/Cargo.toml cargo +stable clippy --tests --manifest-path diesel_derives/Cargo.toml --features "sqlite diesel/sqlite" diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index 9809478a6c63..7a98312b4b04 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -114,7 +114,7 @@ fn test_create_service(conn: &mut Connection) { assert_eq!(service.service_id, 1); assert_eq!(service.name, "test"); assert_eq!(service.version, 1); - assert_eq!(service.online, true); + assert!(service.online); assert_eq!(service.description, "test"); assert_eq!(service.health_check_uri, "http://example.com"); assert_eq!(service.base_uri, "http://example.com"); @@ -146,7 +146,7 @@ fn test_check_if_service_id_online(conn: &mut Connection) { fn test_get_all_online_services(conn: &mut Connection) { let result = service::Service::get_all_online_services(conn); assert!(result.is_ok()); - assert!(result.unwrap().len() > 0); + assert!(!result.unwrap().is_empty()); } fn test_get_all_offline_services(conn: &mut Connection) { @@ -182,7 +182,7 @@ fn test_service_read(conn: &mut Connection) { assert_eq!(service.service_id, 1); assert_eq!(service.name, "test"); assert_eq!(service.version, 1); - assert_eq!(service.online, true); + assert!(service.online); assert_eq!(service.description, "test"); assert_eq!(service.health_check_uri, "http://example.com"); assert_eq!(service.base_uri, "http://example.com"); @@ -194,7 +194,7 @@ fn test_service_read_all(conn: &mut Connection) { assert!(result.is_ok()); let services = result.unwrap(); - assert!(services.len() > 0); + assert!(!services.is_empty()); } fn test_set_service_online(conn: &mut Connection) { @@ -244,7 +244,7 @@ fn test_service_update(conn: &mut Connection) { assert_eq!(service.service_id, 1); assert_eq!(service.name, "new_test"); assert_eq!(service.version, 2); - assert_eq!(service.online, true); + assert!(service.online); assert_eq!(service.description, "test"); assert_eq!(service.health_check_uri, "http://example.com"); assert_eq!(service.base_uri, "http://example.com"); From 0986ddf61f8ac1c4f6bd773be161fa8088d9cb22 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 06:46:38 +0800 Subject: [PATCH 05/73] Added a lint suppression to stop a false positive. Signed-off-by: Marvin Hansen --- diesel/src/pg/query_builder/copy/copy_to.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/diesel/src/pg/query_builder/copy/copy_to.rs b/diesel/src/pg/query_builder/copy/copy_to.rs index 49778514f607..a2e4963ca1e6 100644 --- a/diesel/src/pg/query_builder/copy/copy_to.rs +++ b/diesel/src/pg/query_builder/copy/copy_to.rs @@ -273,6 +273,9 @@ where C::get_buffer(out) } + // Suppresses a false positive. See the lint docs: + // https://rust-lang.github.io/rust-clippy/master/index.html#/needless_lifetimes + #[allow(clippy::needless_lifetimes)] fn execute<'conn, T>( &'conn mut self, command: CopyToCommand, From c7fbf34f8da454351cff1d462979f67f8387d010 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 06:57:39 +0800 Subject: [PATCH 06/73] Fixed tests in custom array example. Signed-off-by: Marvin Hansen --- .../migrations/2024-08-15-070500_services/down.sql | 4 ++-- .../migrations/2024-08-15-070500_services/up.sql | 8 ++++---- examples/postgres/custom_arrays/src/schema.rs | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql index 2068b001f400..11dba6558335 100644 --- a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql @@ -1,5 +1,5 @@ -- This file should undo anything in `up.sql` DROP TABLE IF EXISTS smdb.service; -DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; -DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP TYPE IF EXISTS service_endpoint CASCADE; +DROP TYPE IF EXISTS protocol_type CASCADE; DROP schema IF EXISTS smdb; \ No newline at end of file diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql index 9a0c021b7c1a..9b473e75fdcd 100644 --- a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql @@ -1,19 +1,19 @@ -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE smdb.protocol_type AS ENUM ( +CREATE TYPE protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', 'UDP' ); -CREATE TYPE smdb.service_endpoint AS ( +CREATE TYPE service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" smdb.protocol_type + "protocol" protocol_type ); CREATE TABLE smdb.service( @@ -25,5 +25,5 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" smdb.service_endpoint[] NOT NULL + "endpoints" service_endpoint[] NOT NULL ); \ No newline at end of file diff --git a/examples/postgres/custom_arrays/src/schema.rs b/examples/postgres/custom_arrays/src/schema.rs index 91378b5ced2e..8b6e84a6af49 100644 --- a/examples/postgres/custom_arrays/src/schema.rs +++ b/examples/postgres/custom_arrays/src/schema.rs @@ -3,7 +3,7 @@ pub mod smdb { pub mod sql_types { #[derive(diesel::query_builder::QueryId, Clone, diesel::sql_types::SqlType)] - #[diesel(postgres_type(name = "service_endpoint", schema = "smdb"))] + #[diesel(postgres_type(name = "service_endpoint"))] pub struct ServiceEndpoint; } From d97c684ef46d6a115cc7d66baecce1cac2864ef2 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 07:07:22 +0800 Subject: [PATCH 07/73] Updated Readme in custom array example to reflect correct custom schema usage. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 49 +++++++++++++---------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index e6a5165b8578..bba47610c815 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -74,10 +74,12 @@ a good security practice. To create a new schema with Diesel, you follow three s 2) Declare the schema in the migration up/down.sql files 3) Prefix all table names in the migration up/down.sql files with the schema -Postgres uses internally a schema as something like a search path for a table and, by default, searches in the public -schema for a table. If that fails, Postgres returns an error that the table does not exists (in the default schema). -Because of the search path mechanism, you only have to declare the schema name and consistently prefix the tables in the -schema with the schema name and that’s it. +Postgres uses internally a schema as something like a search path for a table and, by default, +searches in the public schema for a table. If that fails, Postgres returns an error that the table does not exists (in the default schema). +Because of the search path mechanism, you only have to declare the schema name and consistently prefix thetables in the schema with the schema name and that’s it. + +It is important to know that a custom schema only applies to tables. Custom types and enum still +reside in the default public schema. In your diesel.toml file, add the following entry: @@ -114,7 +116,7 @@ Postgres ENUM in your up.sql looks like this: -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE smdb.protocol_type AS ENUM ( +CREATE TYPE protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', @@ -122,11 +124,10 @@ CREATE TYPE smdb.protocol_type AS ENUM ( ); ``` -Notice, the table name is prefixed by the DB schema to ensure Postgres find it. Also, you add the corresponding drop -type operation to your down.sql file: +Add the corresponding drop type operation to your down.sql file: ```sql -DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP TYPE IF EXISTS protocol_type CASCADE; DROP schema IF EXISTS smdb; ``` @@ -147,20 +148,20 @@ your endpoint type looks like this in your up.sql: ```sql -- Your SQL goes here -CREATE TYPE smdb.service_endpoint AS ( +CREATE TYPE service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" smdb.protocol_type + "protocol" protocol_type ); ``` And the matching drop operation in the down.sql file: ```sql -DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; -DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP TYPE IF EXISTS service_endpoint CASCADE; +DROP TYPE IF EXISTS protocol_type CASCADE; DROP schema IF EXISTS smdb; ``` @@ -203,24 +204,28 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" smdb.service_endpoint[] NOT NULL + "endpoints" service_endpoint[] NOT NULL ); ``` -And add the matching drop statement to your down.sql file: +As mentioned earlier, the custom schema applies to tables therefore the service table is +prefixed with the schema name whereas the endpoint type is not as it resides in the public schema. + +Add the matching drop statement to your down.sql file: ```sql -- This file should undo anything in `up.sql` DROP TABLE IF EXISTS smdb.service; -DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; -DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP TYPE IF EXISTS service_endpoint CASCADE; +DROP TYPE IF EXISTS protocol_type CASCADE; DROP schema IF EXISTS smdb; ``` A few notes on the service table: -* Notice the schema prefix appears in both, the table name and in referenced types -* The array type follows the usual convention type[] +* Notice the schema prefix appears only in the table name. +* The custom type and custom Enum do not have any prefix because they are in the default schema. +* The array type follows the usual convention type[]. * The NOT NULL attribute means that the array itself must be set; Values inside the array still might be null. The Diesel team decided that array values are nullable by default because there so many things that may go wrong when @@ -233,19 +238,19 @@ In total, the up.sql looks as below: -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE smdb.protocol_type AS ENUM ( +CREATE TYPE protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', 'UDP' ); -CREATE TYPE smdb.service_endpoint AS ( +CREATE TYPE service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" smdb.protocol_type + "protocol" protocol_type ); CREATE TABLE smdb.service( @@ -257,7 +262,7 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" smdb.service_endpoint[] NOT NULL + "endpoints" service_endpoint[] NOT NULL ); ``` From bf2bbe408558347d9905aeeb892142e13e94a53c Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 19:31:06 +0800 Subject: [PATCH 08/73] Custom Array Example: Added embedded schema migration and updated the Readme with a section on testing. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/Cargo.toml | 5 +- examples/postgres/custom_arrays/README.md | 219 +++++++++++++++++- examples/postgres/custom_arrays/src/lib.rs | 56 +++++ .../custom_arrays/tests/service_tests.rs | 31 ++- 4 files changed, 302 insertions(+), 9 deletions(-) diff --git a/examples/postgres/custom_arrays/Cargo.toml b/examples/postgres/custom_arrays/Cargo.toml index b953ab0a5f24..156d7749b536 100644 --- a/examples/postgres/custom_arrays/Cargo.toml +++ b/examples/postgres/custom_arrays/Cargo.toml @@ -5,5 +5,8 @@ edition.workspace = true publish = false [dependencies] -diesel = { path = "../../../diesel", version = "2.2.0", features = ["postgres", "r2d2"] } +diesel = { path = "../../../diesel", version = "2.2.0", default-features = false, features = ["postgres", "r2d2"] } +diesel_migrations = { path = "../../../diesel_migrations", version = "2.2.0", default-features = false, features = ["postgres"] } + + dotenv = "0.15.0" diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index bba47610c815..4d85beb3cb99 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -1,9 +1,23 @@ -# Custom array and custom data in Postgres +# Custom Array and Data Types in Postgres + +Table of Content: +1. [Concepts](#concepts) +2. [The project](#the-project) +3. [Getting started](#getting-started) +4. [Postgres schema](#postgres-schema) +5. [Postgres Enum](#postgres-enum) +6. [Postgres custom type](#postgres-custom-type) +7. [Postgres SERIAL vs INTEGER primary key](#postgres-serial-vs-integer-primary-key) +8. [Postgres array with custom type](#postgres-array-with-custom-type) +9. [Rust Types](#rust-types) +10. [Additional methods](#additional-methods) +11. [Applied Best Practices](#applied-best-practices) +12. [Testing](#testing) In this guide, you learn more about the concepts listed below and illustrate the actual usage in Diesel with a sample project. -**Concepts**: +## Concepts: * Custom Enum * Custom type that uses a custom Enum @@ -190,7 +204,7 @@ depends on other services. In order to insert a service that depends on any othe already been inserted or not, you have to know the service ID upfront. With that out of the way, let’s define the service table. -## Postgres array with a custom type +## Postgres array with custom type To define the service table, the add the following to your up.sql file @@ -826,9 +840,7 @@ doesn’t count as a database error whereas if the query would fail, that would decision depends largely on the design requirements and if you already have custom errors defined, then adding a ServiceDependencyOfflineError variant should be straight forward. -At this point, the Service type implementation is complete. I have skipped the testing procedure, but in a nutshell, you -just create a util that connects to a running Postgres server and then returns a connection to run all tests. You find -all tests in the test folder. +At this point, the Service type implementation is complete. ## Applied Best Practices @@ -890,3 +902,198 @@ compile error saying that trait FromSql is not implemented. If you write a database layer for an existing system, this technique comes in handy as you can seamlessly convert between Diesel DB types and your target types while leaving your target types as it. And because you never know when you have to do this, it’s generally recommended to add type annotations to DB return types. +## Testing + +Diesel enables you to test your database schema and migration early on in the development process. +To do so, you need only meed: + +* A util that creates a DB connection +* A util that runs the DB migration +* And your DB integration tests + +### DB Connection Types + +Broadly speaking, there are two ways to handle database connection. One way is to create one connection per application +and use it until the application shuts down. Another way is to create a pool of connection and, +whenever a database connection is needed for an DB operation, a connection is taken from the pool +and after the DB operation has been completed, the connection is returned to the pool. +The first approach is quite common in small application whereas the second one is commonly used in server application +that expected consistent database usage. + +**Important** + +Using database connection pool in Diesel requires you to enable the `r2d2` feature in your cargo.toml file. + +In Diesel, you can handle connections either way, but the only noticeable difference is the actual connection type +used as type parameter. For that reason, a type alias for the DB connection was declared early on because +that would allow you to switch between a single connection and a pooled connection without refactoring. +However, because connection pooling in Diesel is so well supported, I suggest to use it by default. + +### DB Connection Test Util + +Let's start with a small util that returns a DB connection. First you need to get a database URI from somewhere, +then construct a connection pool, and lastly return the connection pool. +Remember, this is just a test util so there is no need to add anything more than necessary. + +```rust +fn postgres_connection_pool() -> Pool> { + dotenv().ok(); + + let database_url = env::var("DATABASE_URL") + .or_else(|_| env::var("POSTGRES_DATABASE_URL")) + .expect("DATABASE_URL must be set"); + + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Could not build connection pool") +} +``` + +Here we use the dotenv crate to test if there is an environment variable of either DATABASE_URL or POSTGRES_DATABASE_URL +and if so, parse the string and use it to build a connection pool. Therefore, make sure you have one of +those variables set in your environment. Notice, the test_on_check_out flag is set to true, +which means the database will be pinged to check if the database is actually reachable and the connection is working correctly. + +### DB Migration Util + +Diesel can run a database migration in one of two ways. First, you can use the Diesel CLI in your terminal +to generate, run, or revert migrations manually. This is ideal for development when you frequently +change the database schema. The second way is programmatically via the embedded migration macro, +which is ideal to build a single binary with all migrations compiled into it so that +you don't have to install and run the Diesel CLI on the target machine. +This simplifies deployment of your application and streamlines continuous integration testing. + +**Important** + +You must add the crate `diesel_migrations` to your cargo.toml and set the target database as feature flag +to enable the embedded migration macro. + +To serve both purposes, deployment and CI testing, let's add a new function `run_db_migration` to the lib.rs file +of the crate that takes a connection as parameter, checks if the DB connection is valid, +checks if there are any pending migrations, and if so, runs all pending migrations. +The implementation is straight forward, as you can see below: + +```rust +pub fn run_db_migration( + conn: &mut Connection, +) -> Result<(), Box> { + // Check DB connection! + match conn.ping() { + Ok(_) => {} + Err(e) => { + eprint!("[run_db_migration]: Error connecting to database: {}", e); + return Err(Box::new(e)); + } + } + + // Check if DB has pending migrations + let has_pending = match conn.has_pending_migration(MIGRATIONS) { + Ok(has_pending) => has_pending, + Err(e) => { + eprint!( + "[run_db_migration]: Error checking for pending database migrations: {}", + e + ); + return Err(e); + } + }; + + // If so, run all pending migrations. + if has_pending { + match conn.run_pending_migrations(MIGRATIONS) { + Ok(_) => Ok(()), + Err(e) => { + eprint!("[run_db_migration]: Error migrating database: {}", e); + Err(e) + } + } + } else { + // Nothing pending, just return + Ok(()) + } +} +``` + +The rational to check for all potential errors is twofold. For once, because the database connection is given as a parameter, +you just don't know if the creating pool has checked the connection, therefore you better check it to catch a dead connection before using it. +Second, even if you have a correct database connection, this does not guarantee that the migration will succeed. +There might be some types left from a previously aborted drop operations or +a random error might happened at any stage of the migration, therefore you have to handle the error where it occurs. +Also, because you run the db migration during application startup or before testing, +ensure you have clear error messages to speed up diagnostic and debugging. + + +### DB Integration Tests + +Database integration tests become flaky when executed in parallel usually because of conflicting read / write operations. +While modern database systems can handle concurrent data access, test tools with assertions not so much. +That means, test assertions start to fail seemingly randomly when executed concurrently. +There are only very few viable options to deal with this reality: + +* Don't use parallel test execution +* Only parallelize test per isolated access +* Do synchronization and test the actual database state before each test and run tests as atomic transactions + +Out of the three options, the last one will almost certainly win you an over-engineering award in the unlikely case +your colleagues appreciate the resulting test complexity. If not, good luck. +In practice, not using parallel test execution is often not possible either because of the larger number +of integration tests that run on a CI server. To be clear, strictly sequential tests is a great option +for small projects with low complexity, it just doesn't scale as the project grows in size and complexity. + +And that leaves us only with the middle-ground of grouping tables into isolated access. +Suppose your database has 25 tables you are tasked to test. +Some of them are clearly unrelated, others only require read access to some tables, +and then you have those where you have to test for multi-table inserts and updates. + +Say, you can form 7 groups of tables that are clearly independent of each other, +then you can run those 7 test groups in parallel, but for all practical purpose within each group, +all tests are run in sequence unless you are testing specifically for concurrent read write access. +You want to put those test into a dedicated test group anyways as they capture errors that are more complex +than errors from your basic integration tests. +And it makes sense to stage integration tests into simple functional tests, complex workflow tests, +and chaos tests that triggers read / write conflicts randomly to test for blind spots. + +In any case, the test suite for the service example follow the sequential execution pattern so that they can be +executed in parallel along other test groups without causing randomly failing tests. Specifically, +the test structure looks as shown below. However, the full test suite is in the test folder. + +```rust +#[test] +fn test_service() { + let pool = postgres_connection_pool(); + let conn = &mut pool.get().unwrap(); + + println!("Test DB migration"); + test_db_migration(conn); + + println!("Test create!"); + test_create_service(conn); + + println!("Test count!"); + test_count_service(conn); + + //... +} + +fn test_db_migration(conn: &mut Connection) { + let res = custom_arrays::run_db_migration(conn); + //dbg!(&result); + assert!(res.is_ok()); +} +``` + +The idea here is simple yet powerful: There is just one Rust test so regardless of whether you test with Cargo, +Nextest or any other test util, this test will run everything within it in sequence. +The print statements are usually only shown if a test fails. However, there is one important details worth mentioning, +the assert macro often obfuscates the error message and if you have ever seen a complex stack trace +full of fnOnce invocations, you know that already. To get a more meaningful error message, just uncomment the dbg! +statement that unwraps the result before the assertion and you will see a helpful error message in most cases. + +You may have noticed that the DB migration util checks if there are pending migrations and if there is nothing, +it does nothing and just returns. The wisdom behind this decision is that, there are certain corner cases +that only occur when you run a database tet multiple times and you really want to run the DB migration +just once to simulate that scenario as realistic as possible. When you test locally, the same logic applies +and you really only want to run a database migration when the schema has changed. + diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs index 5845dc0f98fa..d352381d2147 100644 --- a/examples/postgres/custom_arrays/src/lib.rs +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -1,5 +1,61 @@ +use diesel::r2d2::R2D2Connection; +use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; +use std::error::Error; + pub mod model; mod schema; pub type Connection = diesel::r2d2::PooledConnection>; + +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations"); + +/// Runs all pending database migrations. +/// +/// Will return an error if the database connection is invalid, or if any of the +/// migrations fail. Otherwise, it returns Ok() +/// +/// # Errors +/// +/// * If the database connection is invalid +/// * If checking for pending database migrations fails +/// * If any of the database migrations fail +/// +pub fn run_db_migration( + conn: &mut Connection, +) -> Result<(), Box> { + // Check DB connection! + match conn.ping() { + Ok(_) => {} + Err(e) => { + eprint!("[run_db_migration]: Error connecting to database: {}", e); + return Err(Box::new(e)); + } + } + + // Check if DB has pending migrations + let has_pending = match conn.has_pending_migration(MIGRATIONS) { + Ok(has_pending) => has_pending, + Err(e) => { + eprint!( + "[run_db_migration]: Error checking for pending database migrations: {}", + e + ); + return Err(e); + } + }; + + // If so, run all pending migrations. + if has_pending { + match conn.run_pending_migrations(MIGRATIONS) { + Ok(_) => Ok(()), + Err(e) => { + eprint!("[run_db_migration]: Error migrating database: {}", e); + Err(e) + } + } + } else { + // Nothing pending, just return + Ok(()) + } +} diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index 7a98312b4b04..e840716d9656 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -22,11 +22,20 @@ fn postgres_connection_pool() -> Pool> { .expect("Could not build connection pool") } +fn test_db_migration(conn: &mut Connection) { + let res = custom_arrays::run_db_migration(conn); + //dbg!(&result); + assert!(res.is_ok()); +} + #[test] fn test_service() { let pool = postgres_connection_pool(); let conn = &mut pool.get().unwrap(); + println!("Test DB migration"); + test_db_migration(conn); + println!("Test create!"); test_create_service(conn); @@ -105,8 +114,7 @@ fn test_create_service(conn: &mut Connection) { let result = service::Service::create(conn, &service); - dbg!(&result); - + // dbg!(&result); assert!(result.is_ok()); let service = result.unwrap(); @@ -127,30 +135,35 @@ fn test_create_service(conn: &mut Connection) { fn test_count_service(conn: &mut Connection) { let result = service::Service::count(conn); + //dbg!(&result); assert!(result.is_ok()); assert_eq!(result.unwrap(), 1); } fn test_check_if_service_id_exists(conn: &mut Connection) { let result = service::Service::check_if_service_id_exists(conn, 1); + //dbg!(&result); assert!(result.is_ok()); assert!(result.unwrap()); } fn test_check_if_service_id_online(conn: &mut Connection) { let result = service::Service::check_if_service_id_online(conn, 1); + //dbg!(&result); assert!(result.is_ok()); assert!(result.unwrap()); } fn test_get_all_online_services(conn: &mut Connection) { let result = service::Service::get_all_online_services(conn); + //dbg!(&result); assert!(result.is_ok()); assert!(!result.unwrap().is_empty()); } fn test_get_all_offline_services(conn: &mut Connection) { let result = service::Service::get_all_offline_services(conn); + //dbg!(&result); assert!(result.is_ok()); assert_eq!(result.unwrap().len(), 0); } @@ -159,6 +172,7 @@ fn test_get_all_service_dependencies(conn: &mut Connection) { let service_id = 1; let result = service::Service::get_all_service_dependencies(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); assert_eq!(result.unwrap().len(), 1); } @@ -167,6 +181,7 @@ fn test_get_all_service_endpoints(conn: &mut Connection) { let service_id = 1; let result = service::Service::get_all_service_endpoints(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); assert_eq!(result.unwrap().len(), 2); } @@ -175,6 +190,7 @@ fn test_service_read(conn: &mut Connection) { let service_id = 1; let result = service::Service::read(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); let service = result.unwrap(); @@ -191,6 +207,7 @@ fn test_service_read(conn: &mut Connection) { fn test_service_read_all(conn: &mut Connection) { let result = service::Service::read_all(conn); + //dbg!(&result); assert!(result.is_ok()); let services = result.unwrap(); @@ -201,9 +218,11 @@ fn test_set_service_online(conn: &mut Connection) { let service_id = 1; let result = service::Service::set_service_online(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); let result = service::Service::check_if_service_id_online(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); assert!(result.unwrap()); } @@ -212,9 +231,11 @@ fn test_set_service_offline(conn: &mut Connection) { let service_id = 1; let result = service::Service::set_service_offline(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); let result = service::Service::check_if_service_id_online(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); assert!(!result.unwrap()); } @@ -222,6 +243,7 @@ fn test_set_service_offline(conn: &mut Connection) { fn test_service_update(conn: &mut Connection) { // check if service_id exists so we can update the service let result = service::Service::check_if_service_id_exists(conn, 1); + //dbg!(&result); assert!(result.is_ok()); assert!(result.unwrap()); @@ -237,6 +259,7 @@ fn test_service_update(conn: &mut Connection) { ); let result = service::Service::update(conn, 1, &update); + //dbg!(&result); assert!(result.is_ok()); let service = result.unwrap(); @@ -254,15 +277,19 @@ fn test_service_update(conn: &mut Connection) { fn test_service_delete(conn: &mut Connection) { let result = service::Service::read(conn, 1); + //dbg!(&result); assert!(result.is_ok()); let result = service::Service::delete(conn, 1); + //dbg!(&result); assert!(result.is_ok()); let result = service::Service::read(conn, 1); + //dbg!(&result); assert!(result.is_err()); let result = service::Service::count(conn); + //dbg!(&result); assert!(result.is_ok()); assert_eq!(result.unwrap(), 0); } From 3b8dfb8333250731c52e5181df0016797dc5b2bc Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 19:37:39 +0800 Subject: [PATCH 09/73] Update examples/postgres/custom_arrays/README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 4d85beb3cb99..85d3355039b8 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -423,7 +423,7 @@ impl FromSql for ProtocolType { } ``` -In toSql, it is important to end the implementation with Ok(IsNull::No). In the from_sql, it is important to add a catch +In ToSql, it is important to end the implementation with Ok(IsNull::No). In the from_sql, it is important to add a catch all case that returns an UnknownProtocol instance. In practice, the catch all rarely ever gets triggered, but in those few corner cases when it does, it keeps the application running. From 08c3341ce6768a21abc133cd477404372c10632c Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:35:37 +0800 Subject: [PATCH 10/73] Update examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql [no ci] Syle fix in down.sql Co-authored-by: Georg Semmler --- .../migrations/2024-08-15-070500_services/down.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql index 11dba6558335..8a00c31272cc 100644 --- a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql @@ -2,4 +2,4 @@ DROP TABLE IF EXISTS smdb.service; DROP TYPE IF EXISTS service_endpoint CASCADE; DROP TYPE IF EXISTS protocol_type CASCADE; -DROP schema IF EXISTS smdb; \ No newline at end of file +DROP SCHEMA IF EXISTS smdb; \ No newline at end of file From 26d773560680f8b92aa2407c645fd540f36419d1 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:37:25 +0800 Subject: [PATCH 11/73] Update examples/postgres/custom_arrays/src/model/service/service_impl.rs [skip ci] Co-authored-by: Georg Semmler --- .../custom_arrays/src/model/service/service_impl.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index e2137702581e..150258639727 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -53,14 +53,10 @@ impl Service { db: &mut Connection, param_service_id: i32, ) -> QueryResult { - match service + service .filter(service_id.eq(param_service_id)) .select(online) .first::(db) - { - Ok(res) => Ok(res), - Err(e) => Err(e), - } } pub fn get_all_online_services(db: &mut Connection) -> QueryResult> { From 3160ba86b211eb830812ae79bdce09f7d406393f Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:38:42 +0800 Subject: [PATCH 12/73] Update examples/postgres/custom_arrays/src/model/service/service_impl.rs [skip ci] Updated check if service exists to return false on error, as suggested Co-authored-by: Georg Semmler --- .../postgres/custom_arrays/src/model/service/service_impl.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index 150258639727..b4408cac15f0 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -25,10 +25,7 @@ impl Service { db: &mut Connection, param_service_id: i32, ) -> QueryResult { - match service.find(param_service_id).first::(db) { - Ok(_) => Ok(true), - Err(_) => Ok(false), - } + service.find(param_service_id).first::(db).optional().map(Option::is_some) } pub fn check_all_services_online( From 3ab39fac00196d6270dd2be8b275a895227febe5 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:39:15 +0800 Subject: [PATCH 13/73] Update .gitignore --- .gitignore | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.gitignore b/.gitignore index 11408af749e3..ec44e67ff3ec 100644 --- a/.gitignore +++ b/.gitignore @@ -3,9 +3,3 @@ Cargo.lock !diesel_cli/Cargo.lock .env *.snap.new - -./.idea/* -/.idea/.gitignore -/.idea/diesel.iml -/.idea/modules.xml -/.idea/vcs.xml From 52008d43e987bdca183a6c31fb32d2df91f91750 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:43:44 +0800 Subject: [PATCH 14/73] Apply suggestions from code review [no ci] Several minor updates in the Readme Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 85d3355039b8..4410ec2092cc 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -122,7 +122,7 @@ DROP schema IF EXISTS smdb; The company only uses three types of API endpoints, gRPC, http, or UDP. However, because data entry or transmission errors happen, an UnknownProtocol has been added to catch everything else that may go wrong to ensure no serialization / -deserialization bugs crash the system. Instead, every once a while a Chron job runs over the database and searches for +deserialization bugs crash the system. Instead, every once a while a Cron job runs over the database and searches for those UnknownProtocol entries, reports it to the admin who may fixes the incorrect entries one day. Therefore the Postgres ENUM in your up.sql looks like this: @@ -393,7 +393,7 @@ message, make sure to check: If all those checks pass and you still see errors, it’s most likely a serialization error. -To serialize and deserialize a custom Enum, you write a custom toSql and fromSql implementation. Luckily, this is +To serialize and deserialize a custom Enum, you write a custom ToSql and FromSql implementation. Luckily, this is straightforward. ```rust @@ -468,7 +468,7 @@ impl ToSql for Endpoint { ``` I cannot stress enough that it is paramount that the tuple type signature must match exactly the Postgres type signature defined in your up.sql. Ideally, you want to use the split view function in your IDE to have the up.sql -in one pane and the the toSql implementation in another pane, both side by side, to double check +in one pane and the the ToSql implementation in another pane, both side by side, to double check that the number and types match. If the type or number of types mismatch, you will get a compiler error telling you that somehow either the number of fields don’t match or that the type of the fields don’t match. From 7e85c579b08d18893d090e0d16ef4aa69d56f39b Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:44:26 +0800 Subject: [PATCH 15/73] Update examples/postgres/custom_arrays/src/model/service/service_impl.rs [no ci] updated service impl Co-authored-by: Georg Semmler --- .../custom_arrays/src/model/service/service_impl.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index b4408cac15f0..4d9545138927 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -33,14 +33,9 @@ impl Service { services: &[i32], ) -> QueryResult<(bool, Option)> { for id in services { - match Service::check_if_service_id_online(db, *id) { - Ok(res) => { - if !res { - return Ok((false, Some(format!("Service {} is offline", id)))); - } - } - Err(e) => return Err(e), - }; + if !Service::check_if_service_id_online(db, *id)? { + return Ok((false, Some(format!("Service {} is offline", id)))); + } } Ok((true, None)) From 7cc2d915e59d9503ae9e29ca82ce0d936294ad93 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:45:42 +0800 Subject: [PATCH 16/73] [no ci] Update examples/postgres/custom_arrays/src/model/service/service_impl.rs Updated update service method as to remove unneeded data loading. Co-authored-by: Georg Semmler --- .../custom_arrays/src/model/service/service_impl.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index 4d9545138927..d76e3b1cbd81 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -108,14 +108,10 @@ impl Service { param_service_id: i32, param_online: bool, ) -> QueryResult<()> { - match diesel::update(service.filter(service_id.eq(param_service_id))) + diesel::update(service.filter(service_id.eq(param_service_id))) .set(online.eq(param_online)) - .returning(Service::as_returning()) - .get_result(db) - { - Ok(_) => Ok(()), - Err(e) => Err(e), - } + .execute(db)?; + Ok(()) } pub fn update( From 829ac272bd476991e0dade12323152ce015d22ae Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 12:07:41 +0800 Subject: [PATCH 17/73] changed dotenv to dotenvy Signed-off-by: Marvin Hansen --- .../postgres/custom_arrays/src/model/service/service_impl.rs | 4 ++-- examples/postgres/custom_arrays/tests/service_tests.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index d76e3b1cbd81..2d7772ee29c6 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -3,7 +3,7 @@ use crate::model::service::{CreateService, Service, UpdateService}; use crate::model::Connection; use crate::schema::smdb::service::dsl::*; use diesel::{ - insert_into, ExpressionMethods, QueryDsl, QueryResult, RunQueryDsl, SelectableHelper, + insert_into, ExpressionMethods, OptionalExtension, QueryDsl, QueryResult, RunQueryDsl, SelectableHelper, }; impl Service { @@ -25,7 +25,7 @@ impl Service { db: &mut Connection, param_service_id: i32, ) -> QueryResult { - service.find(param_service_id).first::(db).optional().map(Option::is_some) + service.find(param_service_id).first::(db).optional().map(|arg0: Option| Option::is_some(&arg0)) } pub fn check_all_services_online( diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index e840716d9656..6962e027e532 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -5,7 +5,7 @@ use custom_arrays::model::service::{CreateService, UpdateService}; use custom_arrays::Connection; use diesel::r2d2::{ConnectionManager, Pool}; use diesel::PgConnection; -use dotenv::dotenv; +use dotenvy::dotenv; use std::env; fn postgres_connection_pool() -> Pool> { From 9e8c7ad5fcf3f2ebf9c8bdf166e4ddac5f773cec Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 12:10:13 +0800 Subject: [PATCH 18/73] Moved Endpoint impl into type definition file. (See [comment](https://github.com/diesel-rs/diesel/pull/4169#discussion_r1719596303)) Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/Cargo.toml | 4 +- .../model/endpoint_type/endpoint_type_impl.rs | 37 ----------------- .../src/model/endpoint_type/mod.rs | 40 +++++++++++++++++-- 3 files changed, 39 insertions(+), 42 deletions(-) delete mode 100644 examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs diff --git a/examples/postgres/custom_arrays/Cargo.toml b/examples/postgres/custom_arrays/Cargo.toml index 156d7749b536..c6c1782861ca 100644 --- a/examples/postgres/custom_arrays/Cargo.toml +++ b/examples/postgres/custom_arrays/Cargo.toml @@ -8,5 +8,5 @@ publish = false diesel = { path = "../../../diesel", version = "2.2.0", default-features = false, features = ["postgres", "r2d2"] } diesel_migrations = { path = "../../../diesel_migrations", version = "2.2.0", default-features = false, features = ["postgres"] } - -dotenv = "0.15.0" +[dev-dependencies] +dotenvy = "0.15" diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs deleted file mode 100644 index 066d3227f78f..000000000000 --- a/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::model::endpoint_type::Endpoint; -use crate::model::protocol_type::PgProtocolType; -use diesel::deserialize::FromSql; -use diesel::pg::{Pg, PgValue}; -use diesel::serialize::{Output, ToSql}; -use diesel::sql_types::{Integer, Record, Text}; -use diesel::{deserialize, serialize}; - -impl ToSql for Endpoint { - fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { - serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( - &( - self.name.to_owned(), - self.version.to_owned(), - self.base_uri.to_owned(), - self.port.to_owned(), - self.protocol.to_owned(), - ), - &mut out.reborrow(), - ) - } -} - -impl FromSql for Endpoint { - fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { - let (name, version, base_uri, port, protocol) = - FromSql::, Pg>::from_sql(bytes)?; - - Ok(Endpoint { - name, - version, - base_uri, - port, - protocol, - }) - } -} diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs index 09c1b41cbab9..de0d1c4cd8e8 100644 --- a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs +++ b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs @@ -1,8 +1,11 @@ -use crate::model::protocol_type::ProtocolType; -use diesel::deserialize::FromSqlRow; +use crate::model::protocol_type::{PgProtocolType, ProtocolType}; +use diesel::deserialize::{FromSql, FromSqlRow}; use diesel::expression::AsExpression; +use diesel::pg::{Pg, PgValue}; +use diesel::{deserialize, serialize}; +use diesel::serialize::{Output, ToSql}; +use diesel::sql_types::{Integer, Record, Text}; -mod endpoint_type_impl; #[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] #[diesel(sql_type=crate::schema::smdb::sql_types::ServiceEndpoint)] @@ -31,3 +34,34 @@ impl Endpoint { } } } + + +impl ToSql for Endpoint { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( + &( + self.name.to_owned(), + self.version.to_owned(), + self.base_uri.to_owned(), + self.port.to_owned(), + self.protocol.to_owned(), + ), + &mut out.reborrow(), + ) + } +} + +impl FromSql for Endpoint { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + let (name, version, base_uri, port, protocol) = + FromSql::, Pg>::from_sql(bytes)?; + + Ok(Endpoint { + name, + version, + base_uri, + port, + protocol, + }) + } +} From 942d3f62433769497b15486268bb20cab5dbb8b3 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 12:25:22 +0800 Subject: [PATCH 19/73] [skip ci] Switched example code and tests from PGConnectionPool to single PGConnection. (See [comment](https://github.com/diesel-rs/diesel/pull/4169#discussion_r1719587567)) Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/src/lib.rs | 5 +++-- .../src/model/endpoint_type/mod.rs | 4 +--- .../postgres/custom_arrays/src/model/mod.rs | 3 --- .../custom_arrays/src/model/service/mod.rs | 2 +- .../src/model/service/service_impl.rs | 13 ++++++++---- .../custom_arrays/tests/service_tests.rs | 21 +++++++------------ 6 files changed, 21 insertions(+), 27 deletions(-) diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs index d352381d2147..a6246bb7cad8 100644 --- a/examples/postgres/custom_arrays/src/lib.rs +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -1,12 +1,13 @@ use diesel::r2d2::R2D2Connection; +use diesel::PgConnection; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use std::error::Error; pub mod model; mod schema; -pub type Connection = - diesel::r2d2::PooledConnection>; +// pub type Connection = diesel::r2d2::PooledConnection>; +pub type Connection = PgConnection; pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations"); diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs index de0d1c4cd8e8..6374b9bc6b8e 100644 --- a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs +++ b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs @@ -2,10 +2,9 @@ use crate::model::protocol_type::{PgProtocolType, ProtocolType}; use diesel::deserialize::{FromSql, FromSqlRow}; use diesel::expression::AsExpression; use diesel::pg::{Pg, PgValue}; -use diesel::{deserialize, serialize}; use diesel::serialize::{Output, ToSql}; use diesel::sql_types::{Integer, Record, Text}; - +use diesel::{deserialize, serialize}; #[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] #[diesel(sql_type=crate::schema::smdb::sql_types::ServiceEndpoint)] @@ -35,7 +34,6 @@ impl Endpoint { } } - impl ToSql for Endpoint { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( diff --git a/examples/postgres/custom_arrays/src/model/mod.rs b/examples/postgres/custom_arrays/src/model/mod.rs index c9cf7fa59aca..6ee5a86732bb 100644 --- a/examples/postgres/custom_arrays/src/model/mod.rs +++ b/examples/postgres/custom_arrays/src/model/mod.rs @@ -2,6 +2,3 @@ pub mod endpoint_type; pub mod service; pub mod protocol_type; - -pub type Connection = - diesel::r2d2::PooledConnection>; diff --git a/examples/postgres/custom_arrays/src/model/service/mod.rs b/examples/postgres/custom_arrays/src/model/service/mod.rs index d58cca7b6040..02f402149801 100644 --- a/examples/postgres/custom_arrays/src/model/service/mod.rs +++ b/examples/postgres/custom_arrays/src/model/service/mod.rs @@ -59,7 +59,7 @@ impl CreateService { } #[derive(Debug, Clone, Queryable, Insertable, AsChangeset)] -#[diesel(table_name= crate::schema::smdb::service)] +#[diesel(table_name=crate::schema::smdb::service)] pub struct UpdateService { pub name: Option, pub version: Option, diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index 2d7772ee29c6..82e6cf0788e1 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -1,9 +1,10 @@ use crate::model::endpoint_type::Endpoint; use crate::model::service::{CreateService, Service, UpdateService}; -use crate::model::Connection; use crate::schema::smdb::service::dsl::*; +use crate::Connection; use diesel::{ - insert_into, ExpressionMethods, OptionalExtension, QueryDsl, QueryResult, RunQueryDsl, SelectableHelper, + insert_into, ExpressionMethods, OptionalExtension, QueryDsl, QueryResult, RunQueryDsl, + SelectableHelper, }; impl Service { @@ -25,7 +26,11 @@ impl Service { db: &mut Connection, param_service_id: i32, ) -> QueryResult { - service.find(param_service_id).first::(db).optional().map(|arg0: Option| Option::is_some(&arg0)) + service + .find(param_service_id) + .first::(db) + .optional() + .map(|arg0: Option| Option::is_some(&arg0)) } pub fn check_all_services_online( @@ -34,7 +39,7 @@ impl Service { ) -> QueryResult<(bool, Option)> { for id in services { if !Service::check_if_service_id_online(db, *id)? { - return Ok((false, Some(format!("Service {} is offline", id)))); + return Ok((false, Some(format!("Service {} is offline", id)))); } } diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index 6962e027e532..a264ee515bcf 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -3,23 +3,16 @@ use custom_arrays::model::protocol_type::ProtocolType; use custom_arrays::model::service; use custom_arrays::model::service::{CreateService, UpdateService}; use custom_arrays::Connection; -use diesel::r2d2::{ConnectionManager, Pool}; -use diesel::PgConnection; +use diesel::{Connection as DieselConnection, PgConnection}; use dotenvy::dotenv; use std::env; -fn postgres_connection_pool() -> Pool> { +fn postgres_connection() -> PgConnection { dotenv().ok(); - let database_url = env::var("DATABASE_URL") - .or_else(|_| env::var("POSTGRES_DATABASE_URL")) - .expect("DATABASE_URL must be set"); - - let manager = ConnectionManager::::new(database_url); - Pool::builder() - .test_on_check_out(true) - .build(manager) - .expect("Could not build connection pool") + let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set"); + PgConnection::establish(&database_url) + .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) } fn test_db_migration(conn: &mut Connection) { @@ -30,8 +23,8 @@ fn test_db_migration(conn: &mut Connection) { #[test] fn test_service() { - let pool = postgres_connection_pool(); - let conn = &mut pool.get().unwrap(); + let mut connection = postgres_connection(); + let conn = &mut connection; println!("Test DB migration"); test_db_migration(conn); From 2889d4415fdff0b15f9d92274fc41f54945ff85e Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 12:49:57 +0800 Subject: [PATCH 20/73] [skip ci] Moved all custom types into custom schema smdb. See [comment](https://github.com/diesel-rs/diesel/pull/4169#discussion_r1719613646) Added a DatabaseError to PgProtocolType in from_sql implementation to convey that the DB contains something that isn't expected. See [comment](https://github.com/diesel-rs/diesel/pull/4169#discussion_r1719595463). Signed-off-by: Marvin Hansen --- .../migrations/2024-08-15-070500_services/down.sql | 4 ++-- .../migrations/2024-08-15-070500_services/up.sql | 8 ++++---- .../custom_arrays/src/model/protocol_type/mod.rs | 13 +++++++++++-- examples/postgres/custom_arrays/src/schema.rs | 2 +- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql index 8a00c31272cc..50cbfd11a8cf 100644 --- a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql @@ -1,5 +1,5 @@ -- This file should undo anything in `up.sql` DROP TABLE IF EXISTS smdb.service; -DROP TYPE IF EXISTS service_endpoint CASCADE; -DROP TYPE IF EXISTS protocol_type CASCADE; +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; DROP SCHEMA IF EXISTS smdb; \ No newline at end of file diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql index 9b473e75fdcd..9a0c021b7c1a 100644 --- a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql @@ -1,19 +1,19 @@ -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE protocol_type AS ENUM ( +CREATE TYPE smdb.protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', 'UDP' ); -CREATE TYPE service_endpoint AS ( +CREATE TYPE smdb.service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" protocol_type + "protocol" smdb.protocol_type ); CREATE TABLE smdb.service( @@ -25,5 +25,5 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" service_endpoint[] NOT NULL + "endpoints" smdb.service_endpoint[] NOT NULL ); \ No newline at end of file diff --git a/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs b/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs index 5af43e599b65..ff464578f2c4 100644 --- a/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs +++ b/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs @@ -1,6 +1,8 @@ use diesel::deserialize::{FromSql, FromSqlRow}; use diesel::expression::AsExpression; use diesel::pg::{Pg, PgValue}; +use diesel::result::DatabaseErrorKind; +use diesel::result::Error::DatabaseError; use diesel::serialize::{IsNull, Output, ToSql}; use diesel::sql_types::SqlType; use diesel::{deserialize, serialize}; @@ -9,7 +11,7 @@ use std::io::Write; // Diesel type mapping requires a struct to derive a SqlType for custom ToSql and FromSql implementations #[derive(SqlType)] #[diesel(sql_type = protocol_type)] -#[diesel(postgres_type(name = "protocol_type"))] +#[diesel(postgres_type(name = "protocol_type", schema = "smdb"))] pub struct PgProtocolType; #[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] @@ -41,7 +43,14 @@ impl FromSql for ProtocolType { b"GRPC" => Ok(ProtocolType::GRPC), b"HTTP" => Ok(ProtocolType::HTTP), b"UDP" => Ok(ProtocolType::UDP), - _ => Ok(ProtocolType::UnknownProtocol), + _ => Err(DatabaseError( + DatabaseErrorKind::SerializationFailure, + Box::new(format!( + "Unrecognized enum variant: {:?}", + String::from_utf8_lossy(bytes.as_bytes()) + )), + ) + .into()), } } } diff --git a/examples/postgres/custom_arrays/src/schema.rs b/examples/postgres/custom_arrays/src/schema.rs index 8b6e84a6af49..91378b5ced2e 100644 --- a/examples/postgres/custom_arrays/src/schema.rs +++ b/examples/postgres/custom_arrays/src/schema.rs @@ -3,7 +3,7 @@ pub mod smdb { pub mod sql_types { #[derive(diesel::query_builder::QueryId, Clone, diesel::sql_types::SqlType)] - #[diesel(postgres_type(name = "service_endpoint"))] + #[diesel(postgres_type(name = "service_endpoint", schema = "smdb"))] pub struct ServiceEndpoint; } From ea5c37fc96eaf7367530e5f43a0f476ca1125d0f Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 16:34:31 +0800 Subject: [PATCH 21/73] Updated Readme and tests to match previous code changes. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 407 ++++++++++-------- examples/postgres/custom_arrays/src/lib.rs | 3 + .../custom_arrays/tests/service_tests.rs | 2 +- 3 files changed, 239 insertions(+), 173 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 4410ec2092cc..33121873ec18 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -1,6 +1,7 @@ # Custom Array and Data Types in Postgres Table of Content: + 1. [Concepts](#concepts) 2. [The project](#the-project) 3. [Getting started](#getting-started) @@ -12,7 +13,7 @@ Table of Content: 9. [Rust Types](#rust-types) 10. [Additional methods](#additional-methods) 11. [Applied Best Practices](#applied-best-practices) -12. [Testing](#testing) +12. [Testing](#testing) In this guide, you learn more about the concepts listed below and illustrate the actual usage in Diesel with a sample project. @@ -86,14 +87,12 @@ a good security practice. To create a new schema with Diesel, you follow three s 1) Declare the schema name in the diesel.toml file 2) Declare the schema in the migration up/down.sql files -3) Prefix all table names in the migration up/down.sql files with the schema - -Postgres uses internally a schema as something like a search path for a table and, by default, -searches in the public schema for a table. If that fails, Postgres returns an error that the table does not exists (in the default schema). -Because of the search path mechanism, you only have to declare the schema name and consistently prefix thetables in the schema with the schema name and that’s it. +3) Add the schema to the SQL type as annotations -It is important to know that a custom schema only applies to tables. Custom types and enum still -reside in the default public schema. +Postgres uses internally a schema as something like a search path for a table and, by default, searches in the public +schema for a table. If that fails, Postgres returns an error that the table does not exists (in the default schema). +Because of the search path mechanism, you only have to declare the schema name and consistently prefix the tables in the +schema with the schema name and that’s it. In your diesel.toml file, add the following entry: @@ -115,22 +114,23 @@ CREATE SCHEMA IF NOT EXISTS smdb; Also, add the corresponding drop operation in the down.sql file. ```sql -DROP schema IF EXISTS smdb; +DROP SCHEMA IF EXISTS smdb; ``` ## Postgres Enum The company only uses three types of API endpoints, gRPC, http, or UDP. However, because data entry or transmission -errors happen, an UnknownProtocol has been added to catch everything else that may go wrong to ensure no serialization / -deserialization bugs crash the system. Instead, every once a while a Cron job runs over the database and searches for -those UnknownProtocol entries, reports it to the admin who may fixes the incorrect entries one day. Therefore the +errors happen, an UnknownProtocol has been added to catch everything else that may go wrong to ensure that a potential +serialization or deserialization bug does not crash the system. +Instead, every once a while a Cron job runs over the database and searches for +those UnknownProtocol entries, reports it to the admin who may fixes the incorrect entries. Therefore the Postgres ENUM in your up.sql looks like this: ```sql -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE protocol_type AS ENUM ( +CREATE TYPE smdb.protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', @@ -138,44 +138,51 @@ CREATE TYPE protocol_type AS ENUM ( ); ``` +Notice the schema prefix before the Enum name. Add the corresponding drop type operation to your down.sql file: ```sql -DROP TYPE IF EXISTS protocol_type CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; DROP schema IF EXISTS smdb; ``` ## Postgres custom type The service endpoint in this case is modelled as a custom type. -There are few cases when you want to choose a custom type over a table. - -1) The data have no relation to data in other tables -2) You want to group a bunch of fields i.e. to store a configuration file with nested fields -3) You specifically don’t want to model relations - -In the case of a service endpoint, it really is as simple as every service has one or more endpoints, but these are -certainly not worth storing in a separate table since then you would have to deal with resolving relations during query -time. Rather, when using the custom type, you just access a field that is basically a tuple. With that out of the way, +There are few cases when you want to choose a custom type over a table: + +1) The data have no relation to data in other tables. +2) You want to group a set of fields i.e. to store a complex configuration file with nested fields. +3) You specifically don’t relations because of specific requirements. + +The service endpoint is an example of the first two cases. +For once, the endpoints have no relation to any other data therefore a separate table makes little sense. Secondly, the +entire service metadata database really is more of a configuration store and, in a way, the service table really is just +one complex configuration with nested fields. +Also, worth mentioning, if you were to store endpoints in a separate table, then you would have to deal with resolving +relations during query time and that is probably a bit too much for just loading a configuration. +Rather, when using the custom type, you just access a field that is basically a tuple. +With that out of the way, your endpoint type looks like this in your up.sql: ```sql -- Your SQL goes here -CREATE TYPE service_endpoint AS ( +CREATE TYPE smdb.service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" protocol_type + "protocol" smdb.protocol_type ); ``` +Again, all custom types are prefixed with the custom schema. And the matching drop operation in the down.sql file: ```sql -DROP TYPE IF EXISTS service_endpoint CASCADE; -DROP TYPE IF EXISTS protocol_type CASCADE; +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; DROP schema IF EXISTS smdb; ``` @@ -189,8 +196,8 @@ Let’s think this through: ### Internal primary key: SERIAL type When you only need a primary key as an index, you just use the SERIAL and Postgres gives your data an automatically -incrementing integer primary key. You usually can return the primary key value after inserting so if you need to know -the key, you do get it after insert. +incrementing integer primary key. +You usually can return the primary key value after inserting so if you need to know the key, you do get it after insert. ### External primary key: INTEGER type @@ -218,28 +225,25 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" service_endpoint[] NOT NULL + "endpoints" smdb.service_endpoint[] NOT NULL ); ``` -As mentioned earlier, the custom schema applies to tables therefore the service table is -prefixed with the schema name whereas the endpoint type is not as it resides in the public schema. - Add the matching drop statement to your down.sql file: ```sql -- This file should undo anything in `up.sql` DROP TABLE IF EXISTS smdb.service; -DROP TYPE IF EXISTS service_endpoint CASCADE; -DROP TYPE IF EXISTS protocol_type CASCADE; -DROP schema IF EXISTS smdb; +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP SCHEMA IF EXISTS smdb; ``` A few notes on the service table: -* Notice the schema prefix appears only in the table name. -* The custom type and custom Enum do not have any prefix because they are in the default schema. -* The array type follows the usual convention type[]. +* All types and tables are prefixed with the custom schema to give Postgres a + hint where to find these types and tables. +* The array type follows the usual convention schema.type[]. * The NOT NULL attribute means that the array itself must be set; Values inside the array still might be null. The Diesel team decided that array values are nullable by default because there so many things that may go wrong when @@ -252,19 +256,19 @@ In total, the up.sql looks as below: -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE protocol_type AS ENUM ( +CREATE TYPE smdb.protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', 'UDP' ); -CREATE TYPE service_endpoint AS ( +CREATE TYPE smdb.service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" protocol_type + "protocol" smdb.protocol_type ); CREATE TABLE smdb.service( @@ -276,7 +280,7 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" service_endpoint[] NOT NULL + "endpoints" smdb.service_endpoint[] NOT NULL ); ``` @@ -286,7 +290,11 @@ Now, it’s time to run the Diesel migration: diesel migration run ``` -This generates a schema.rs file in the src root: +You may use a database console to double check if all types and tables have been created inside the custom schema. If a +type somehow appears in the public default schema, double check if the type definition in up.sql has been prefixed with +the schema name. + +The Diesel migration generates the following schema.rs file in the src root: ```rust // @generated automatically by Diesel CLI. @@ -329,8 +337,11 @@ showing related warnings Next, you want to create a folder model with a mod file in it attached to your lib file. This is your place to store all Rust type implementations matching all the generated Postgres types and tables. -Lastly, you also want to add a type alias for a pooled connection in the lib file because that connection definition is -a long and convoluted type in need of a shorthand. +Lastly, you also want to add a type alias for a postgres database connection in the lib file because that gives you the +freedom to swap between a normal single connection or a connection pool without updating your type implementation or +tests. +The connection pool type alias has ben uncommented, but the type signature of the pooled connection makes it obvious why +a type alias is a good idea. At this point, your lib file looks as shown below: @@ -338,7 +349,12 @@ At this point, your lib file looks as shown below: mod schema; pub mod model; -pub type Connection = diesel::r2d2::PooledConnection>; +// Alias for a pooled connection. +// pub type Connection = diesel::r2d2::PooledConnection>; + +// Alias for a normal, single, connection. +pub type Connection = PgConnection; + ``` ## Rust Types @@ -356,11 +372,13 @@ Diesel needs a wrapper struct to store a custom Enum in Postgres, so let’s add ```rust #[derive(SqlType)] #[diesel(sql_type = protocol_type)] -#[diesel(postgres_type(name = "protocol_type"))] +#[diesel(postgres_type(name = "protocol_type", schema = "smdb"))] pub struct PgProtocolType; ``` It is important that you add both, sql_type and postgres_type, otherwise insert will fail. +Furthermore, it is important to add the database schema ("smdb") otherwise Postgres fails to find the type and aborts an +operation on that type. Next, let’s define the actual Enum. Bear in mind to use the wrapper struct as sql_type on the Enum: @@ -392,12 +410,16 @@ message, make sure to check: 5) Does my Rust type refers to the wrapper struct type? If all those checks pass and you still see errors, it’s most likely a serialization error. +To serialize and deserialize a custom Enum, you write a custom ToSql and FromSql implementation. -To serialize and deserialize a custom Enum, you write a custom ToSql and FromSql implementation. Luckily, this is -straightforward. +`ToSql` describes how to serialize a given rust type (Self) as sql side type (first generic argument) for a specific +database backend (second generic argument). +It needs to translate the type into the relevant wire protocol in that case. +For postgres/mysql enums that's just the enum value as ascii string, +but in general it's depended on the database + type. Also, it is important +to end the implementation with Ok(IsNull::No). ```rust - impl ToSql for ProtocolType { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { match *self { @@ -409,7 +431,15 @@ impl ToSql for ProtocolType { Ok(IsNull::No) } } +``` + +`FromSql` describes how to deserialize a given rust type (Self) as sql side type (first generic argument) for a specific +database backend (second generic argument). +It need to translate from the relevant wire protocol to the rust type. +For postgres/mysql enums that just means matching on the as ascii string, +but in general it's depended on the database + type. +```rust impl FromSql for ProtocolType { fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { match bytes.as_bytes() { @@ -417,15 +447,21 @@ impl FromSql for ProtocolType { b"GRPC" => Ok(ProtocolType::GRPC), b"HTTP" => Ok(ProtocolType::HTTP), b"UDP" => Ok(ProtocolType::UDP), - _ => Ok(ProtocolType::UnknownProtocol), + _ => Err(DatabaseError( + DatabaseErrorKind::SerializationFailure, + Box::new(format!( + "Unrecognized enum variant: {:?}", + String::from_utf8_lossy(bytes.as_bytes()) + )), + ) + .into()), } } } ``` -In ToSql, it is important to end the implementation with Ok(IsNull::No). In the from_sql, it is important to add a catch -all case that returns an UnknownProtocol instance. In practice, the catch all rarely ever gets triggered, but in those -few corner cases when it does, it keeps the application running. +In the from_sql, it is important to add error handling for unknown Enum variants +to catch errors that may result from incorrect database updates. ### Rust Struct Endpoint @@ -441,14 +477,16 @@ pub struct Endpoint { } ``` -It is worth mentioning is that the sql_type refers to the wrapper struct generated by Diesel and stored in the schema.rs -file. Keep this in mind if you ever refactor the schema.rs file into a different folder because the macro annotation -checks the path during compilation and throws an error if it cannot find the type in the provided path. The serialize -and deserialize implementation of a custom type is not as obvious as the Enum because, internally, Postgres represent a -custom type as an anonymous typed tuple. Therefore, you have to map a struct to a typed tuple and back. +It is worth mentioning is that the sql_type refers to the sql side type generated by Diesel and stored in the schema.rs +file. +Keep this in mind if you ever refactor the schema.rs file into a different folder because the macro annotation checks +the path during compilation and throws an error if it cannot find the type in the provided path. +The serialize and deserialize implementation of a custom type is not as obvious as the Enum because, internally, +Postgres represent a custom type as an anonymous typed tuple. Therefore, you have to map a struct to a typed tuple and +back. -Let’s start with the toSql implementation to store the Rust Endpoint struct as typed tuple. Luckily, Diesel provides a -helper util to do just that. +Let’s start with the toSql implementation to store the Rust Endpoint struct as typed tuple. +Luckily, Diesel provides a helper util to do just that. ```rust impl ToSql for Endpoint { @@ -467,14 +505,16 @@ impl ToSql for Endpoint { } ``` -I cannot stress enough that it is paramount that the tuple type signature must match exactly the Postgres type signature defined in your up.sql. Ideally, you want to use the split view function in your IDE to have the up.sql -in one pane and the the ToSql implementation in another pane, both side by side, to double check -that the number and types match. -If the type or number of types mismatch, you will get a compiler error telling you that somehow either -the number of fields don’t match or that the type of the fields don’t match. +I cannot stress enough that it is paramount that the tuple type signature must match exactly the Postgres type signature +defined in your up.sql. +Ideally, you want to use the split view function in your IDE to have the up.sql in one pane and the the ToSql +implementation in another pane, both side by side, to double check +that the number and types match. +If the type or number of types mismatch, you will get a compiler error telling you that somehow either +the number of fields don’t match or that the type of the fields don’t match. Also, because the write_tuple expects values, you have to call either to_owned() or clone() on any referenced data. -The from_sql reverses the process by converting a Postgres typed tuple back into a Rust struct. +The from_sql reverses the process by converting a Postgres typed tuple back into a Rust struct. Again, Diesel provides a convenient helper to do so: ```rust @@ -494,11 +534,12 @@ impl FromSql for Endpoint { } ``` -Similar to the serialization process, it is paramount that the type annotation match exactly the one used in toSql and -the type definition in up.sql. +Similar to the serialization process, it is paramount that the type annotation +match exactly the one used in toSql and the type definition in up.sql. -Debugging serialization issues in Diesel is relatively straight forward since the compiler usually points out the -location of the issue and, often, the issue is a type mismatch that is relatively easy to fix. +Debugging serialization issues in Diesel is relatively straight forward +since the compiler usually points out the location of the issue and, often, the issue is a type mismatch that is +relatively easy to fix. It’s worth repeating that you are dealing with three types in total: @@ -514,7 +555,7 @@ Make sure all of those types match correctly. The service struct gets its serialization and deserialization implementation generated by Diesel so that saves some typing. On the other hand, it is a good practice to implement database operations on the actual type itself. The wisdom here is twofold. For once, you cannot separate the database operation from the type, -therefore it makes sense to implement the operations on the type itself. +therefore it makes sense to implement the operations on the type itself. This also hides implementation details in encapsulation, as [discussed in the book](https://doc.rust-lang.org/book/ch17-01-what-is-oo.html#encapsulation-that-hides-implementation-details). @@ -530,9 +571,17 @@ in total, we crate 3 service types: 2) CreateService: For create operations 3) UpdateService: For update operations -The Service and CreateService have identical fields. Also, both types require a primary key annotation in addition to -the table name. The UpdateService, however, has each field wrapped into an option type. When the option type is Some, -Diesel knows that this field needs updating. If the field is set to None, Diesel ignores it. +The Service and CreateService have identical fields. +Also, both types require a primary key annotation in addition to +the table name. For more details about inserts, refer to the +official [all about inserts guide](https://diesel.rs/guides/all-about-inserts.html). + +The UpdateService, however, has each field wrapped into an option type. When the option type is Some, Diesel knows that +this field needs updating. If the field is set to None, Diesel ignores it. +For more details about updates, refer to the +official [all about updates guide](https://diesel.rs/guides/all-about-updates.html) + +The relevant type declarations are: ```rust #[derive(Debug, Clone, Queryable, Selectable)] @@ -572,7 +621,7 @@ pub struct UpdateService { ``` Next, let’s implement the CRUD operations on the service type. -Remember the handy connection type alias defined earlier? +Remember the handy connection type alias defined earlier? This service implementation is the place to use it. ### **Create** @@ -591,7 +640,7 @@ impl Service { The insert into function needs the table as target and the value to insert. It is a common convention to return the inserted value so that the callsite can verify that the insert completed correctly. -### **Read** +### **Read** ```rust pub fn read(db: &mut Connection, param_service_id: i32) -> QueryResult { @@ -653,12 +702,13 @@ With the CRUD methods implemented, it’s time to look at the more interesting p ## Additional methods -In the requirement, it was stated that: +In the requirement, it was stated that: “... a service depends on other services, and it would be great to test before deploying a new service, if all of its dependencies are online.” -To do so we have to implement a method that sets a service online and another one to set it offline. By experience, a few +To do so we have to implement a method that sets a service online and another one to set it offline. By experience, a +few non-trivial errors are caused by incorrectly set Boolean flags and whenever you can, hide Boolean flags in your public API and use a dedicated method instead to the set them. @@ -853,10 +903,10 @@ and follows three best practices: ### Database connection as a parameter -The first one refers to the idea that no type should hold application state, and a database connection definitely counts as application state. Instead, you would write a database connection manager -that manages a Postgres connection pool, and then calls into the database methods implemented -in the service type giving it a pooled connection as parameter. - +The first one refers to the idea that no type should hold application state, and a database connection definitely counts +as application state. Instead, you would write a database connection manager +that manages a Postgres connection pool, and then calls into the database methods implemented +in the service type giving it a pooled connection as parameter. ### Prefer the generated DSL over custom queries @@ -896,86 +946,92 @@ we have to update the return type of the method as well. } ``` -Run the compile and see that it works. Now, if you were to remove the type annotation from get_result, you get a -compile error saying that trait FromSql is not implemented. +Run the compile and see that it works. Now, if you were to remove the type annotation from get_result, +you get a compile error saying that trait FromSql is not implemented. +The type annotation is required because there can be more than one FromSql impl for the same rust type and there can be +more than one FromSql type for the same sql type. That gives a lot flexibility how you actually map data between your +database and your rust code, but it requires explicit type annotation. If you write a database layer for an existing system, this technique comes in handy as you can seamlessly convert -between Diesel DB types and your target types while leaving your target types as it. And because you never know when you have to do this, it’s generally recommended to add type annotations to DB return types. +between Diesel DB types and your target types while leaving your target types as it. And because you never know when you +have to do this, it’s generally recommended to add type annotations to DB return types. ## Testing -Diesel enables you to test your database schema and migration early on in the development process. +Diesel enables you to test your database schema and migration early on in the development process. To do so, you need only meed: * A util that creates a DB connection -* A util that runs the DB migration +* A util that runs the DB migration * And your DB integration tests -### DB Connection Types +### DB Connection Types -Broadly speaking, there are two ways to handle database connection. One way is to create one connection per application -and use it until the application shuts down. Another way is to create a pool of connection and, -whenever a database connection is needed for an DB operation, a connection is taken from the pool -and after the DB operation has been completed, the connection is returned to the pool. -The first approach is quite common in small application whereas the second one is commonly used in server application -that expected consistent database usage. +Broadly speaking, there are two ways to handle database connection. One way is to create one connection per application +and use it until the application shuts down. Another way is to create a pool of connection and, +whenever a database connection is needed for an DB operation, a connection is taken from the pool +and after the DB operation has been completed, the connection is returned to the pool. +The first approach is quite common in small application whereas the second one is commonly used in server application +that expected consistent database usage. -**Important** +**Important** -Using database connection pool in Diesel requires you to enable the `r2d2` feature in your cargo.toml file. +Using database connection pool in Diesel requires you to enable the `r2d2` feature in your cargo.toml file. -In Diesel, you can handle connections either way, but the only noticeable difference is the actual connection type -used as type parameter. For that reason, a type alias for the DB connection was declared early on because -that would allow you to switch between a single connection and a pooled connection without refactoring. -However, because connection pooling in Diesel is so well supported, I suggest to use it by default. +In Diesel, you can handle connections either way, but the only noticeable difference is the actual connection type used +as type parameter. For that reason, a type alias for the DB connection was declared in the lib.rs file because +that would allow you to switch between a single connection and a pooled connection without refactoring. -### DB Connection Test Util +### DB Connection Test Util -Let's start with a small util that returns a DB connection. First you need to get a database URI from somewhere, -then construct a connection pool, and lastly return the connection pool. +Let's start with a small util that returns a simple DB connection. First you need to get a database URI from somewhere, +then construct a connection, and lastly return the connection. Remember, this is just a test util so there is no need to add anything more than necessary. ```rust -fn postgres_connection_pool() -> Pool> { - dotenv().ok(); +use dotenvy::dotenv; - let database_url = env::var("DATABASE_URL") - .or_else(|_| env::var("POSTGRES_DATABASE_URL")) - .expect("DATABASE_URL must be set"); +fn postgres_connection() -> PgConnection { + dotenv().ok(); - let manager = ConnectionManager::::new(database_url); - Pool::builder() - .test_on_check_out(true) - .build(manager) - .expect("Could not build connection pool") + let database_url = env::var("POSTGRES_DATABASE_URL") + .expect("POSTGRES_DATABASE_URL must be set"); + + PgConnection::establish(&database_url) + .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) } ``` -Here we use the dotenv crate to test if there is an environment variable of either DATABASE_URL or POSTGRES_DATABASE_URL -and if so, parse the string and use it to build a connection pool. Therefore, make sure you have one of -those variables set in your environment. Notice, the test_on_check_out flag is set to true, -which means the database will be pinged to check if the database is actually reachable and the connection is working correctly. +Here we use the dotenv crate to test if there is an environment variable of either DATABASE_URL or POSTGRES_DATABASE_URL +and if so, parse the string and use it to establish a connection. Therefore, make sure the POSTGRES_DATABASE_URL is set +correctly. + +### DB Migration Util -### DB Migration Util +Diesel can run a database migration in one of two ways. +First, you can use the Diesel CLI in your terminal +to generate, run, or revert migrations manually. This is ideal for development when you frequently change the database +schema. -Diesel can run a database migration in one of two ways. First, you can use the Diesel CLI in your terminal -to generate, run, or revert migrations manually. This is ideal for development when you frequently -change the database schema. The second way is programmatically via the embedded migration macro, -which is ideal to build a single binary with all migrations compiled into it so that -you don't have to install and run the Diesel CLI on the target machine. -This simplifies deployment of your application and streamlines continuous integration testing. +The second way is programmatically via the embedded migration macro, +which is ideal to build a single binary with all migrations compiled into it so that +you don't have to install and run the Diesel CLI on the target machine. +This simplifies deployment of your application and streamlines continuous integration testing. **Important** -You must add the crate `diesel_migrations` to your cargo.toml and set the target database as feature flag -to enable the embedded migration macro. +You must add the crate `diesel_migrations` to your cargo.toml and set the target database as feature flag to enable the +embedded migration macro. -To serve both purposes, deployment and CI testing, let's add a new function `run_db_migration` to the lib.rs file -of the crate that takes a connection as parameter, checks if the DB connection is valid, -checks if there are any pending migrations, and if so, runs all pending migrations. -The implementation is straight forward, as you can see below: +To serve both purposes, deployment and CI testing, let's add a new function `run_db_migration` to the lib.rs file of the +crate that takes a connection as parameter, checks if the DB connection is valid, checks if there are any pending +migrations, and if so, runs all pending migrations. The implementation is straight forward, as you can see below: ```rust +pub type Connection = PgConnection; + +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations"); + pub fn run_db_migration( conn: &mut Connection, ) -> Result<(), Box> { @@ -1016,54 +1072,56 @@ pub fn run_db_migration( } ``` -The rational to check for all potential errors is twofold. For once, because the database connection is given as a parameter, -you just don't know if the creating pool has checked the connection, therefore you better check it to catch a dead connection before using it. -Second, even if you have a correct database connection, this does not guarantee that the migration will succeed. -There might be some types left from a previously aborted drop operations or -a random error might happened at any stage of the migration, therefore you have to handle the error where it occurs. -Also, because you run the db migration during application startup or before testing, -ensure you have clear error messages to speed up diagnostic and debugging. - +The rational to check for all potential errors is twofold. For once, because the database connection is given as a +parameter, +you just don't know if the component that created the connection has checked the connection, therefore you better check +it to catch a dead connection. +Second, even if you have a correct database connection, this does not guarantee that the migration will succeed. +There might be some types left from a previously aborted drop operations or +a random error might happened at any stage of the migration, therefore you have to handle the error where it occurs. +Also, because you run the db migration during application startup or before testing, +ensure you have clear error messages to speed up diagnostic and debugging. ### DB Integration Tests -Database integration tests become flaky when executed in parallel usually because of conflicting read / write operations. -While modern database systems can handle concurrent data access, test tools with assertions not so much. -That means, test assertions start to fail seemingly randomly when executed concurrently. +Database integration tests become flaky when executed in parallel usually because of conflicting read / write +operations. +While modern database systems can handle concurrent data access, test tools with assertions not so much. +That means, test assertions start to fail seemingly randomly when executed concurrently. There are only very few viable options to deal with this reality: * Don't use parallel test execution * Only parallelize test per isolated access -* Do synchronization and test the actual database state before each test and run tests as atomic transactions +* Do synchronization and test the actual database state before each test and run tests as atomic transactions -Out of the three options, the last one will almost certainly win you an over-engineering award in the unlikely case -your colleagues appreciate the resulting test complexity. If not, good luck. -In practice, not using parallel test execution is often not possible either because of the larger number -of integration tests that run on a CI server. To be clear, strictly sequential tests is a great option -for small projects with low complexity, it just doesn't scale as the project grows in size and complexity. +Out of the three options, the last one will almost certainly win you an over-engineering award in the unlikely case +your colleagues appreciate the resulting test complexity. If not, good luck. +In practice, not using parallel test execution is often not possible either because of the larger number +of integration tests that run on a CI server. To be clear, strictly sequential tests is a great option +for small projects with low complexity, it just doesn't scale as the project grows in size and complexity. -And that leaves us only with the middle-ground of grouping tables into isolated access. -Suppose your database has 25 tables you are tasked to test. -Some of them are clearly unrelated, others only require read access to some tables, +And that leaves us only with the middle-ground of grouping tables into isolated access. +Suppose your database has 25 tables you are tasked to test. +Some of them are clearly unrelated, others only require read access to some tables, and then you have those where you have to test for multi-table inserts and updates. -Say, you can form 7 groups of tables that are clearly independent of each other, -then you can run those 7 test groups in parallel, but for all practical purpose within each group, -all tests are run in sequence unless you are testing specifically for concurrent read write access. -You want to put those test into a dedicated test group anyways as they capture errors that are more complex -than errors from your basic integration tests. -And it makes sense to stage integration tests into simple functional tests, complex workflow tests, -and chaos tests that triggers read / write conflicts randomly to test for blind spots. +Say, you can form 7 groups of tables that are clearly independent of each other, +then you can run those 7 test groups in parallel, but for all practical purpose within each group, +all tests are run in sequence unless you are testing specifically for concurrent read write access. +You want to put those test into a dedicated test group anyways as they capture errors that are more complex +than errors from your basic integration tests. +And it makes sense to stage integration tests into simple functional tests, complex workflow tests, +and chaos tests that triggers read / write conflicts randomly to test for blind spots. In any case, the test suite for the service example follow the sequential execution pattern so that they can be -executed in parallel along other test groups without causing randomly failing tests. Specifically, +executed in parallel along other test groups without causing randomly failing tests. Specifically, the test structure looks as shown below. However, the full test suite is in the test folder. ```rust #[test] fn test_service() { - let pool = postgres_connection_pool(); - let conn = &mut pool.get().unwrap(); + let mut connection = postgres_connection(); + let conn = &mut connection; println!("Test DB migration"); test_db_migration(conn); @@ -1084,16 +1142,21 @@ fn test_db_migration(conn: &mut Connection) { } ``` -The idea here is simple yet powerful: There is just one Rust test so regardless of whether you test with Cargo, -Nextest or any other test util, this test will run everything within it in sequence. -The print statements are usually only shown if a test fails. However, there is one important details worth mentioning, -the assert macro often obfuscates the error message and if you have ever seen a complex stack trace -full of fnOnce invocations, you know that already. To get a more meaningful error message, just uncomment the dbg! +The idea here is simple yet powerful: +There is just one Rust test so regardless of whether you test with Cargo, +Nextest or any other test util, this test will run everything within it in sequence. +The print statements are usually only shown if a test fails. +However, there is one important details worth mentioning, +the assert macro often obfuscates the error message and if you have ever seen a complex stack trace full of fnOnce +invocations, you know that already. +To get a more meaningful error message, just uncomment the dbg! statement that unwraps the result before the assertion and you will see a helpful error message in most cases. -You may have noticed that the DB migration util checks if there are pending migrations and if there is nothing, -it does nothing and just returns. The wisdom behind this decision is that, there are certain corner cases -that only occur when you run a database tet multiple times and you really want to run the DB migration -just once to simulate that scenario as realistic as possible. When you test locally, the same logic applies -and you really only want to run a database migration when the schema has changed. +You may have noticed that the DB migration util checks if there are pending migrations and if there is nothing, it does +nothing and just returns. +The wisdom behind this decision is that, there are certain corner cases +that only occur when you run a database tet multiple times and you really want to run the DB migration just once to +simulate that scenario as realistic as possible. +When you test locally, the same logic applies and you really only want to run a database migration when the schema has +changed. diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs index a6246bb7cad8..b115b8cbc580 100644 --- a/examples/postgres/custom_arrays/src/lib.rs +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -6,7 +6,10 @@ use std::error::Error; pub mod model; mod schema; +// Alias for a pooled connection. // pub type Connection = diesel::r2d2::PooledConnection>; + +// Alias for a normal, single, connection. pub type Connection = PgConnection; pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations"); diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index a264ee515bcf..7815aa0a89eb 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -10,7 +10,7 @@ use std::env; fn postgres_connection() -> PgConnection { dotenv().ok(); - let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set"); + let database_url = env::var("POSTGRES_DATABASE_URL").expect("POSTGRES_DATABASE_URL must be set"); PgConnection::establish(&database_url) .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) } From 817238fe012e40e44bb45b8adf13282240379de3 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Thu, 15 Aug 2024 16:52:34 +0800 Subject: [PATCH 22/73] Added custom array example with documentation. Signed-off-by: Marvin Hansen --- .gitignore | 6 + Cargo.toml | 1 + examples/postgres/custom_arrays/.gitignore | 2 + examples/postgres/custom_arrays/Cargo.toml | 9 + examples/postgres/custom_arrays/README.md | 886 ++++++++++++++++++ examples/postgres/custom_arrays/diesel.toml | 11 + .../postgres/custom_arrays/migrations/.keep | 0 .../down.sql | 6 + .../up.sql | 36 + .../2024-08-15-070500_services/down.sql | 5 + .../2024-08-15-070500_services/up.sql | 29 + examples/postgres/custom_arrays/src/lib.rs | 5 + .../model/endpoint_type/endpoint_type_impl.rs | 37 + .../src/model/endpoint_type/mod.rs | 33 + .../postgres/custom_arrays/src/model/mod.rs | 7 + .../src/model/protocol_type/mod.rs | 47 + .../custom_arrays/src/model/service/mod.rs | 97 ++ .../src/model/service/service_impl.rs | 147 +++ examples/postgres/custom_arrays/src/schema.rs | 26 + examples/postgres/custom_arrays/tests/mod.rs | 2 + .../custom_arrays/tests/service_tests.rs | 268 ++++++ 21 files changed, 1660 insertions(+) create mode 100644 examples/postgres/custom_arrays/.gitignore create mode 100644 examples/postgres/custom_arrays/Cargo.toml create mode 100644 examples/postgres/custom_arrays/README.md create mode 100644 examples/postgres/custom_arrays/diesel.toml create mode 100644 examples/postgres/custom_arrays/migrations/.keep create mode 100644 examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/down.sql create mode 100644 examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/up.sql create mode 100644 examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql create mode 100644 examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql create mode 100644 examples/postgres/custom_arrays/src/lib.rs create mode 100644 examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs create mode 100644 examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs create mode 100644 examples/postgres/custom_arrays/src/model/mod.rs create mode 100644 examples/postgres/custom_arrays/src/model/protocol_type/mod.rs create mode 100644 examples/postgres/custom_arrays/src/model/service/mod.rs create mode 100644 examples/postgres/custom_arrays/src/model/service/service_impl.rs create mode 100644 examples/postgres/custom_arrays/src/schema.rs create mode 100644 examples/postgres/custom_arrays/tests/mod.rs create mode 100644 examples/postgres/custom_arrays/tests/service_tests.rs diff --git a/.gitignore b/.gitignore index ec44e67ff3ec..11408af749e3 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,9 @@ Cargo.lock !diesel_cli/Cargo.lock .env *.snap.new + +./.idea/* +/.idea/.gitignore +/.idea/diesel.iml +/.idea/modules.xml +/.idea/vcs.xml diff --git a/Cargo.toml b/Cargo.toml index 67082d2f400b..c7da4e669941 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ members = [ "examples/postgres/getting_started_step_1", "examples/postgres/getting_started_step_2", "examples/postgres/getting_started_step_3", + "examples/postgres/custom_arrays", "examples/postgres/custom_types", "examples/postgres/composite_types", "examples/postgres/relations", diff --git a/examples/postgres/custom_arrays/.gitignore b/examples/postgres/custom_arrays/.gitignore new file mode 100644 index 000000000000..4f8380647089 --- /dev/null +++ b/examples/postgres/custom_arrays/.gitignore @@ -0,0 +1,2 @@ +/target +/.env diff --git a/examples/postgres/custom_arrays/Cargo.toml b/examples/postgres/custom_arrays/Cargo.toml new file mode 100644 index 000000000000..b953ab0a5f24 --- /dev/null +++ b/examples/postgres/custom_arrays/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "custom_arrays" +version = "0.1.0" +edition.workspace = true +publish = false + +[dependencies] +diesel = { path = "../../../diesel", version = "2.2.0", features = ["postgres", "r2d2"] } +dotenv = "0.15.0" diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md new file mode 100644 index 000000000000..340907276e99 --- /dev/null +++ b/examples/postgres/custom_arrays/README.md @@ -0,0 +1,886 @@ +# Custom array and custom data in Postgres + +In this guide, you learn more about the concepts listed below and illustrate the actual usage in Diesel with a sample +project. + +**Concepts**: + +* Custom Enum +* Custom type that uses a custom Enum +* Array of custom types +* Serialization / Deserialization of custom type array +* Implementation of DB operations on a Rust Type + +## The project: + +Your company decided to create a database of all microservices in its system; the high-level requirements are: + +* CRUD: Create, read, update, and delete a service in the databse +* List all offline services and all services that are online +* Retrieve all API endpoints for a service + +Upon closer inspection, you realize a few missing details in the requirements: + +* There are many different endpoint types, some are http, some are gRPC, and some are even UDP (i.e. a message bus), but + the number of endpoint types is limited to just three that haven’t changed in years. +* While each service may have one or more endpoints, no two services can share the same API endpoints, which means + normalizing API endpoints in a separate table makes little sense in this case. +* Unmentioned in the requirements, a service depends on other services, and it would be great to test, before deploying + a new service, if all of its dependencies are online. +* Since this part of the database contains only meta-data, its best to store it in a separate schema to separate the + metadata from all other data. + +Thinking further, you realize a few things: + +1) Endpoint type can be expressed as an Enum in Postgres and in Rust +2) Endpoint can be a custom type in Postgres that matches a Rust type +3) Service contains an array of custom type Enum +4) When each service gets a unique ID that also serves as primary key, then service dependencies are basically just a + collection of those unique service IDs. Thus, you just add an integer array in Postgres. +5) Checking all dependencies of a service is then as simple as loading the array of ID’s, iterating over it, check each + dependency if its online, and you are basically done with the core requirement. As the old saying goes, if you can + solve a problem with data structures, by all means, just do it. + +## Getting started + +Let’s crate a new crate, called custom_arrays: + +` +cargo new custom_arrays –lib +` + +Next, run the Diesel setup: + +```bash +diesel setup +``` + +And then generated a new Diesel migration called services: + +```bash +diesel migration generate services +``` + +This creates a new folder within the migration folder containing an empty up.sql and down.sql file. + +## Postgres schema + +Since the service management operates independently from the rest of the system, it is sensible to store all data in a +dedicated schema. A schema in Postgres is like a namespace as it ensures unique table and type names within the schema. +More importantly, you can create a new user that specifically has access to only a particular schema and that is always +a good security practice. To create a new schema with Diesel, you follow three steps: + +1) Declare the schema name in the diesel.toml file +2) Declare the schema in the migration up/down.sql files +3) Prefix all table names in the migration up/down.sql files with the schema + +Postgres uses internally a schema as something like a search path for a table and, by default, searches in the public +schema for a table. If that fails, Postgres returns an error that the table does not exists (in the default schema). +Because of the search path mechanism, you only have to declare the schema name and consistently prefix the tables in the +schema with the schema name and that’s it. + +In your diesel.toml file, add the following entry: + +```toml +[print_schema] +file = "src/schema.rs" +custom_type_derives = ["diesel::query_builder::QueryId", "Clone"] + +schema = "smdb" +``` + +Specifically, in the created migration folder, add the following to your up.sql file + +```sql +-- Your SQL goes here +CREATE SCHEMA IF NOT EXISTS smdb; +``` + +Also, add the corresponding drop operation in the down.sql file. + +```sql +DROP schema IF EXISTS smdb; +``` + +## Postgres Enum + +The company only uses three types of API endpoints, gRPC, http, or UDP. However, because data entry or transmission +errors happen, an UnknownProtocol has been added to catch everything else that may go wrong to ensure no serialization / +deserialization bugs crash the system. Instead, every once a while a Chron job runs over the database and searches for +those UnknownProtocol entries, reports it to the admin who may fixes the incorrect entries one day. Therefore the +Postgres ENUM in your up.sql looks like this: + +```sql +-- Your SQL goes here +CREATE SCHEMA IF NOT EXISTS smdb; + +CREATE TYPE smdb.protocol_type AS ENUM ( + 'UnknownProtocol', + 'GRPC', + 'HTTP', + 'UDP' +); +``` + +Notice, the table name is prefixed by the DB schema to ensure Postgres find it. Also, you add the corresponding drop +type operation to your down.sql file: + +```sql +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP schema IF EXISTS smdb; +``` + +## Postgres custom type + +The service endpoint in this case is modelled as a custom type. +There are few cases when you want to choose a custom type over a table. + +1) The data have no relation to data in other tables +2) You want to group a bunch of fields i.e. to store a configuration file with nested fields +3) You specifically don’t want to model relations + +In the case of a service endpoint, it really is as simple as every service has one or more endpoints, but these are +certainly not worth storing in a separate table since then you would have to deal with resolving relations during query +time. Rather, when using the custom type, you just access a field that is basically a tuple. With that out of the way, +your endpoint type looks like this in your up.sql: + +```sql +-- Your SQL goes here + +CREATE TYPE smdb.service_endpoint AS ( + "name" Text, + "version" INTEGER, + "base_uri" Text, + "port" INTEGER, + "protocol" smdb.protocol_type +); +``` + +And the matching drop operation in the down.sql file: + +```sql +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP schema IF EXISTS smdb; +``` + +## Postgres SERIAL vs INTEGER primary key + +There is an important detail you must decide upfront: Internal or External Primary key? + +External primary key refers to the idea that you designate primary keys outside of Postgres. +Let’s think this through: + +### Internal primary key: SERIAL type + +When you only need a primary key as an index, you just use the SERIAL and Postgres gives your data an automatically +incrementing integer primary key. You usually can return the primary key value after inserting so if you need to know +the key, you do get it after insert. + +### External primary key: INTEGER type + +In case you need to know the specific value of the primary key and you need to know it before inserting the data, you +have to assign unique primary keys before inserting data and you have to use the INTEGE type (or any of the many integer +variations) to convey to Postgres that you set the primary key yourself. Notice, you still have to set the NOT NULL and +PRIMARY KEY attribute to ensure data consistency. + +In case of the microservice database, I am afraid, you have to use external primary keys because, remember, a service +depends on other services. In order to insert a service that depends on any other service, regardless of whether it has +already been inserted or not, you have to know the service ID upfront. With that out of the way, let’s define the +service table. + +## Postgres array with a custom type + +To define the service table, the add the following to your up.sql file + +```sql +CREATE TABLE smdb.service( + "service_id" INTEGER NOT NULL PRIMARY KEY, + "name" Text NOT NULL, + "version" INTEGER NOT NULL, + "online" BOOLEAN NOT NULL, + "description" Text NOT NULL, + "health_check_uri" Text NOT NULL, + "base_uri" Text NOT NULL, + "dependencies" INTEGER[] NOT NULL, + "endpoints" smdb.service_endpoint[] NOT NULL +); +``` + +And add the matching drop statement to your down.sql file: + +```sql +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS smdb.service; +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP schema IF EXISTS smdb; +``` + +A few notes on the service table: + +* Notice the schema prefix appears in both, the table name and in referenced types +* The array type follows the usual convention type[] +* The NOT NULL attribute means that the array itself must be set; Values inside the array still might be null. + +The Diesel team decided that array values are nullable by default because there so many things that may go wrong when +dealing with array values thus making them nullable by default prevents a few potential crashes. Conversely, it also +means you have to unwrap option types whenever you access data deserialized from an array. + +In total, the up.sql looks as below: + +```sql +-- Your SQL goes here +CREATE SCHEMA IF NOT EXISTS smdb; + +CREATE TYPE smdb.protocol_type AS ENUM ( + 'UnknownProtocol', + 'GRPC', + 'HTTP', + 'UDP' +); + +CREATE TYPE smdb.service_endpoint AS ( + "name" Text, + "version" INTEGER, + "base_uri" Text, + "port" INTEGER, + "protocol" smdb.protocol_type +); + +CREATE TABLE smdb.service( + "service_id" INTEGER NOT NULL PRIMARY KEY, + "name" Text NOT NULL, + "version" INTEGER NOT NULL, + "online" BOOLEAN NOT NULL, + "description" Text NOT NULL, + "health_check_uri" Text NOT NULL, + "base_uri" Text NOT NULL, + "dependencies" INTEGER[] NOT NULL, + "endpoints" smdb.service_endpoint[] NOT NULL +); +``` + +Now, it’s time to run the Diesel migration: + +```bash +diesel migration run +``` + +This generates a schema.rs file in the src root: + +```rust +// @generated automatically by Diesel CLI. + +pub mod smdb { + pub mod sql_types { + #[derive(diesel::query_builder::QueryId, Clone, diesel::sql_types::SqlType)] + #[diesel(postgres_type(name = "service_endpoint", schema = "smdb"))] + pub struct ServiceEndpoint; + } + + diesel::table! { + use diesel::sql_types::*; + use super::sql_types::ServiceEndpoint; + + smdb.service (service_id) { + service_id -> Int4, + name -> Text, + version -> Int4, + online -> Bool, + description -> Text, + health_check_uri -> Text, + base_uri -> Text, + dependencies -> Array>, + endpoints -> Array>, + } + } +} +``` + +Notice, the Postgres types are stored in Postgres therefore you are not seeing them in the generated schema. Only tables +will show up in the generates schema. Furthermore, you will need a wrapper struct to map from Rust to the Postgres type. +For the ServiceEndpoint, Diesel already generated a matching wrapper struct with the correct annotations. The service +table then uses that wrapper struct “ServiceEndpoint” instead of the native Postgres type to reference the custom type +in the endpoints array. + +You want to attach the generated schema.rs to your lib file to access it from within your crate and stop your IDE from +showing related warnings + +Next, you want to create a folder model with a mod file in it attached to your lib file. This is your place to store all +Rust type implementations matching all the generated Postgres types and tables. + +Lastly, you also want to add a type alias for a pooled connection in the lib file because that connection definition is +a long and convoluted type in need of a shorthand. + +At this point, your lib file looks as shown below: + +```rust +mod schema; +pub mod model; + +pub type Connection = diesel::r2d2::PooledConnection>; +``` + +## Rust Types + +In total, you implement three Rust types: + +* Enum: ProtocolType +* Struct: Endpoint +* Struct: Service + +### Rust Enum: ProtocolType + +Diesel needs a wrapper struct to store a custom Enum in Postgres, so let’s add one: + +```rust +#[derive(SqlType)] +#[diesel(sql_type = protocol_type)] +#[diesel(postgres_type(name = "protocol_type"))] +pub struct PgProtocolType; +``` + +It is important that you add both, sql_type and postgres_type, otherwise insert will fail. + +Next, let’s define the actual Enum. Bear in mind to use the wrapper struct as sql_type on the Enum: + +```rust +#[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] +#[diesel(sql_type = PgProtocolType)] +pub enum ProtocolType { + UnknownProtocol, + GRPC, + HTTP, + UDP, +} +``` + +It’s worth pointing out that you are dealing with three types in total: + +1) protocol_type: The Postgres Enum type +2) PGProtocolType: The Wrapper struct that refers to the Postgres Enum type +3) ProtocolType: The Rust Enum that refers to the wrapper struct PGProtocolType + +Mixing any of those three types will result in a complicated Diesel trait error. However, these error messages are just +a convoluted way to say that the database type mismatches the Rust type. When you encounter a consulted trait error +message, make sure to check: + +1) Do I have a wrapper struct? +2) Does my a wrapper struct derives SqlType? +3) Does my wrapper type has both, sql_type and postgres_type declared? +4) Are sql_type and postgres_type both refering to the correct Postgres type? +5) Does my Rust type refers to the wrapper struct type? + +If all those checks pass and you still see errors, it’s most likely a serialization error. + +To serialize and deserialize a custom Enum, you write a custom toSql and fromSql implementation. Luckily, this is +straightforward. + +```rust + +impl ToSql for ProtocolType { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + match *self { + ProtocolType::UnknownProtocol => out.write_all(b"UnknownProtocol")?, + ProtocolType::GRPC => out.write_all(b"GRPC")?, + ProtocolType::HTTP => out.write_all(b"HTTP")?, + ProtocolType::UDP => out.write_all(b"UDP")?, + } + Ok(IsNull::No) + } +} + +impl FromSql for ProtocolType { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + match bytes.as_bytes() { + b"UnknownProtocol" => Ok(ProtocolType::UnknownProtocol), + b"GRPC" => Ok(ProtocolType::GRPC), + b"HTTP" => Ok(ProtocolType::HTTP), + b"UDP" => Ok(ProtocolType::UDP), + _ => Ok(ProtocolType::UnknownProtocol), + } + } +} +``` + +In toSql, it is important to end the implementation with Ok(IsNull::No). In the from_sql, it is important to add a catch +all case that returns an UnknownProtocol instance. In practice, the catch all rarely ever gets triggered, but in those +few corner cases when it does, it keeps the application running. + +### Rust Struct Endpoint + +```rust +#[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] +#[diesel(sql_type=crate::schema::smdb::sql_types::ServiceEndpoint)] +pub struct Endpoint { + pub name: String, + pub version: i32, + pub base_uri: String, + pub port: i32, + pub protocol: ProtocolType, +} +``` + +It is worth mentioning is that the sql_type refers to the wrapper struct generated by Diesel and stored in the schema.rs +file. Keep this in mind if you ever refactor the schema.rs file into a different folder because the macro annotation +checks the path during compilation and throws an error if it cannot find the type in the provided path. The serialize +and deserialize implementation of a custom type is not as obvious as the Enum because, internally, Postgres represent a +custom type as an anonymous typed tuple. Therefore, you have to map a struct to a typed tuple and back. + +Let’s start with the toSql implementation to store the Rust Endpoint struct as typed tuple. Luckily, Diesel provides a +helper util to do just that. + +```rust +impl ToSql for Endpoint { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( + &( + self.name.to_owned(), + self.version.to_owned(), + self.base_uri.to_owned(), + self.port.to_owned(), + self.protocol.to_owned(), + ), + &mut out.reborrow(), + ) + } +} +``` + +I cannot stress enough that it is paramount that the tuple type signature must match exactly the Postgres type signature defined in your up.sql. Ideally, you want to use the split view function in your IDE to have the up.sql +in one pane and the the toSql implementation in another pane, both side by side, to double check +that the number and types match. +If the type or number of types mismatch, you will get a compiler error telling you that somehow either +the number of fields don’t match or that the type of the fields don’t match. +Also, because the write_tuple expects values, you have to call either to_owned() or clone() on any referenced data. + +The from_sql reverses the process by converting a Postgres typed tuple back into a Rust struct. +Again, Diesel provides a convenient helper to do so: + +```rust +impl FromSql for Endpoint { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + let (name, version, base_uri, port, protocol) = + FromSql::, Pg>::from_sql(bytes)?; + + Ok(Endpoint { + name, + version, + base_uri, + port, + protocol, + }) + } +} +``` + +Similar to the serialization process, it is paramount that the type annotation match exactly the one used in toSql and +the type definition in up.sql. + +Debugging serialization issues in Diesel is relatively straight forward since the compiler usually points out the +location of the issue and, often, the issue is a type mismatch that is relatively easy to fix. + +It’s worth repeating that you are dealing with three types in total: + +1) service_endpoint: The Postgres custom type +2) ServiceEndpoint: The wrapper struct generated by Diesel that refers to the Postgres type service_endpoint. Also, only + the wrapper struct carries the postgres_type annotation in addition to the an sql_type annotation. +3) Endpoint: The Rust struct with an sql_type annotation refering to the wrapper struct ServiceEndpoint + +Make sure all of those types match correctly. + +### Rust Struct Service + +The service struct gets its serialization and deserialization implementation generated by Diesel so that saves some +typing. +On the other hand, it is a good practice to implement database operations on the actual type itself. +The wisdom here is twofold. For once, you cannot separate the database operation from the type, +therefore it makes sense to implement the operations on the type itself. This also hides implementation details +in encapsulation, +as [discussed in the book](https://doc.rust-lang.org/book/ch17-01-what-is-oo.html#encapsulation-that-hides-implementation-details). + +Second, you gain a single point of maintenance because, in case your table definition changes, you have to update the +Rust type anyways and because Diesel is all statically checked, the compiler will immediately point out where your +operations need to be corrected to match the new type definition. + +The requirements stated we want CRUD operations, and type-based programming suggest creating a different type per +operation. Because we use external primary keys, we don’t need an additional type for the delete operation, therefore, +in total we crate 3 service types: + +1) Service: For Read +2) CreateService: For create operations +3) UpdateServce: For update operations + +The Service and CreateService have identical fields. Also, both types require a primary key annotation in addition to +the table name. The UpdateServce, however, has each field wrapped into an option type. When the option type is Some, +Diesel knows that this field needs updating. If the field is set to None, Diesel ignores it. + +```rust +#[derive(Debug, Clone, Queryable, Selectable)] +#[diesel(table_name= crate::schema::smdb::service, primary_key(service_id))] +pub struct Service { + pub service_id: i32, + pub name: String, + pub version: i32, + pub online: bool, + pub description: String, + pub health_check_uri: String, + pub base_uri: String, + pub dependencies: Vec>, + pub endpoints: Vec>, +} + +#[derive(Debug, Clone, Queryable, Insertable)] +#[diesel(table_name= crate::schema::smdb::service, primary_key(service_id))] +pub struct CreateService { + pub service_id: i32, + // ... skipped + pub endpoints: Vec>, +} + +#[derive(Debug, Clone, Queryable, Insertable, AsChangeset)] +#[diesel(table_name= crate::schema::smdb::service)] +pub struct UpdateService { + pub name: Option, + pub version: Option, + pub online: Option, + pub description: Option, + pub health_check_uri: Option, + pub base_uri: Option, + pub dependencies: Option>>, + pub endpoints: Option>>, +} +``` + +Next, let’s implement the CRUD operations on the service type. +Remember the handy connection type alias defined earlier? +This service implementation is the place to use it. + +### **Create** + +```rust +impl Service { + pub fn create(db: &mut Connection, item: &CreateService) -> QueryResult { + insert_into(crate::schema::smdb::service::table) + .values(item) + .get_result::(db) + } + +} +``` + +The insert into function needs the table as target and the value to insert. It is a common convention to return the +inserted value so that the callsite can verify that the insert completed correctly. + +### **Read** + +```rust + pub fn read(db: &mut Connection, param_service_id: i32) -> QueryResult { + service + .filter(service_id.eq(param_service_id)) + .first::(db) + } +``` + +Each service has one unique ID and therefore querying for a service ID returns either exactly one result or an error. +That means, the read operation completes with capturing and returning the first result. In case you have data that may +return multiple entries for a search key, you must change the return type of the read method to QueryResult> +and use load instead of first, just as it shown in the read all method. + +### **Read All** + +```rust + pub fn read_all(db: &mut Connection) -> QueryResult> { + service.load::(db) + } +``` + +Here, we just load everything from the service table and return the entire result as a vector. Note, it is expected that +the service table only has relatively few entries. For tables that hold a large number of data, you can implement +pagination, add limit clause, or some kind of filtering to reduce the number of returned values. + +### **Update** + +```rust + pub fn update( + db: &mut Connection, + param_service_id: i32, + item: &UpdateService, + ) -> QueryResult { + diesel::update(service.filter(service_id.eq(param_service_id))) + .set(item) + .returning(Service::as_returning()) + .get_result(db) + } +``` + +The update method, similar to insert, requires the target table and update values as argument and returns the updated +service as result. Notice, the parameter is of type UpdateService to ensure compiler verification. + +### **Delete** + +```rust + pub fn delete(db: &mut Connection, param_service_id: i32) -> QueryResult { + diesel::delete(service.filter(service_id.eq(param_service_id))).execute(db) + } +``` + +Delete is a standard function provided by Diesel. Notice, the filter searched over the primary key to ensure only one +service with the matching ID gets deleted. However, if you were to filter over a non-unique attribute, you may end up +deleting more data than you though you would. In that case, always run the corresponding SQL query in a SQL console to +double check that your filter criterion returns exactly what you want it to return. + +With the CRUD methods implemented, it’s time to look at the more interesting part of the requirements. + +## Additional methods + +“Unmentioned in the requirements, a service depends on other services, and it would be great to test, before deploying a +new service, if all of its dependencies are online.” + +Here, we have to implement a method to set a service online and another one to set it offline. By experience, a few +non-trivial errors are caused by incorrectly set Boolean flags and whenever you can, hide Boolean flags in your public +API and use a dedicated method instead to the set them. + +Next, we need a method to return true if a service is online. At this point, we also want to add a method that checks if +a service actually is in the database. Imagine, you want to test if you can deploy a new service with, say 5 +dependencies, and somehow the database returns no, you can’t. The obvious question is why? There might be some services +that are offline, fair enough, just start them, but it might also be possible that somehow a service isn’t currently in +the database and that is where you need another check method. + +Then, we want another method that takes a collection of service ID’s, checks all of them and returns either true if all +services are online, or returns an error message that states which service is offline. + +And because we are already here, let’s add another method that returns all services that are online and another one that +returns all services that are currently offline. The latter is handy to quickly identify any deployment blocker. + +### Set Service Online / Offline + +Let’s start with a private helper method that sets the Boolean online flag on a service in the database. + +```rust + fn set_svc_online( + db: &mut Connection, + param_service_id: i32, + param_online: bool, + ) -> QueryResult<()> { + match diesel::update(service.filter(service_id.eq(param_service_id))) + .set(online.eq(param_online)) + .returning(Service::as_returning()) + .get_result(db) + { + Ok(_) => Ok(()), + Err(e) => Err(e), + } + } +``` + +Here the generated DSL really shines as you can easily select a field and set it to a new value. Notice, the return type +is void in case of success. Realize, if you were to return the updated Boolean flag, you would cause a non-trivial +ambiguity on the call-site. For setting the Boolean flag true, the result would be true, which is correct, but there is +also a risk of the return value being interpreted as the operation having succeeded. And that matters, because if you +set the flag to false, the same method return false, which is also correct, but could be mis-understood as the operation +didn’t completed because false was returned. A good API is mostly free of ambiguity and therefore, therefore you just +return Ok(()) in case of success because there is no way to misunderstand a returned ok. + +Now, let’s add two public wrapper methods that set the flag to either true or false. + +```rust + pub fn set_service_online(db: &mut Connection, param_service_id: i32) -> QueryResult<()> { + Self::set_svc_online(db, param_service_id, true) + } + + pub fn set_service_offline(db: &mut Connection, param_service_id: i32) -> QueryResult<()> { + Self::set_svc_online(db, param_service_id, false) + } +``` + +### Check Service Exists + +Next up, we add a method to check if service is in the database and the easiest way to implement this is to test if find +returns a result or error. + +```rust + pub fn check_if_service_id_exists( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult { + match service.find(param_service_id).first::(db) { + Ok(_) => Ok(true), + Err(_) => Ok(false), + } + } +``` + +### Check Service Online + +To check if a service is online, we again lean on the generated DSL: + +```rust + pub fn check_if_service_id_exists( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult { + match service.find(param_service_id).first::(db) { + Ok(_) => Ok(true), + Err(_) => Ok(false), + } + } +``` + +### Get Service Dependencies + +Notice, if we select dependencies instead of online, we return all the dependencies of a service. Let’s add another +method to do just that: + +```rust + pub fn get_all_service_dependencies( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult>> { + service + .filter(service_id.eq(param_service_id)) + .select(dependencies) + .first::>>(db) + } +``` + +### Get Service Endpoints + +And likewise, if we select endpoints, we get all API endpoints of a service: + +```rust + pub fn get_all_service_endpoints( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult>> { + service + .filter(service_id.eq(param_service_id)) + .select(endpoints) + .first::>>(db) + } +``` + +As an observation, when we replace the filter criterion with a test if a service is online, we get all services with +online set to true and that is very easy to implement: + +### Get All Online Services + +```rust + pub fn get_all_online_services(db: &mut Connection) -> QueryResult> { + service + .filter(online.eq(true)) + .select(Service::as_returning()) + .load::(db) + } +``` + +### Get All Offline Services + +Likewise, we use the same filter to +find and return all services with online set to false. + +```rust + pub fn get_all_offline_services(db: &mut Connection) -> QueryResult> { + service + .filter(online.eq(false)) + .select(Service::as_returning()) + .load::(db) + } +``` + +### Check All Serices Online + +Finally, we can implement the last method that takes a collection of service ID’s, checks all of them, and returns +either true, or the service ID that is offline. + +```rust + pub fn check_all_services_online( + db: &mut Connection, + services: &[i32], + ) -> QueryResult<(bool, Option)> { + for id in services { + match Service::check_if_service_id_online(db, *id) { + Ok(res) => { + if !res { + return Ok((false, Some(format!("Service {} is offline", id)))); + } + } + Err(e) => return Err(e), + }; + } + + Ok((true, None)) + } +``` + +Here, it is a matter of taste to return a Boolean flag with an optional string or to encode the case of an offline +service as a custom error. The rational for the Boolean flag is that any service might be offline for any reason so that +doesn’t count as a database error whereas if the query would fail, that would amount to a database error. That said, the +decision depends largely on the design requirements and if you already have custom errors defined, then adding a +ServiceDependencyOfflineError variant should be straight forward. + +At this point, the Service type implementation is complete. I have skipped the testing procedure, but in a nutshell, you +just create a util that connects to a running Postgres server and then returns a connection to run all tests. You find +all tests in the test folder. + +## Applied Best Practices + +Implementing the database operations on the Service type is simple, straight forward, +and follows three best practices: + +1) Take the database connection as a parameter +2) Prefer the generated DSL over custom queries whenever possible +3) Add type annotations on database return types + +### Database connection as a parameter + +The first one refers to the idea that no type should hold application state, and a database connection definitely counts as application state. Instead, you would write a database connection manager +that manages a Postgres connection pool, and then calls into the database methods implemented +in the service type giving it a pooled connection as parameter. + + +### Prefer the generated DSL over custom queries + +The second one refers to the idea that non-complex queries are usually easier and more composable expressed in the +generated DSL than in custom SQL queries and therefore the DSL is preferred in that case. However, if you have to write +complex transactions, there is a chance the DSL isn’t cutting it, so in that scenario you may end up writing custom SQL. +That said, the DSL gets you a long way before you are writing custom SQL. + +### Add type annotations + +The last one, type annotation on database return types, isn’t that obvious because Rust can infer the database return +type from the method return type, so why bother? Why would you write get_result::(db) instead of just +get_result(db) you may wonder? Here is the big secret, type inference in Rust needs occasionally a hint. When you +annotated the database return type and then wants to daisy chain another operation via map or flatmap, without the type +annotation you get a compile error. If you have the type annotation in place, you can add more operations whenever you +want and things compile right away. +To illustrate the last point, the count operator in Postgres only returns an i64 integer. Suppose your application +expects an u64 from the persistence API. Wouldn’t it be nice to make the conversion on the fly? + +Let’s take a closer look. Conventionally, you write the count operation like so: + +```rust + pub fn count(db: &mut Connection) -> QueryResult { + service.count().get_result::(db) + } +``` + +Notice, the get result has a type annotation for i64. If you remove it, the code still compiles. But leave it there for +a moment. Let’s add a map operation that takes an i64 returned from the count query and converts it into an u64. Notice, +we have to update the return type of the method as well. + +```rust + pub fn count_u64(db: &mut Connection) -> QueryResult { + service.count() + .get_result::(db) + .map(|c| c as u64) + } +``` + +Run the compile and see that it works. Now, if you were to remove the type annotation from get_result, you get a +compile error saying that trait FromSql is not implemented. + +If you write a database layer for an existing system, this technique comes in handy as you can seamlessly convert +between Diesel DB types and your target types while leaving your target types as it. And because you never know when you have to do this, it’s generally recommended to add type annotations to DB return types. + diff --git a/examples/postgres/custom_arrays/diesel.toml b/examples/postgres/custom_arrays/diesel.toml new file mode 100644 index 000000000000..334408479f8f --- /dev/null +++ b/examples/postgres/custom_arrays/diesel.toml @@ -0,0 +1,11 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/schema.rs" +custom_type_derives = ["diesel::query_builder::QueryId", "Clone"] + +schema = "smdb" + +[migrations_directory] +dir = "migrations" \ No newline at end of file diff --git a/examples/postgres/custom_arrays/migrations/.keep b/examples/postgres/custom_arrays/migrations/.keep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/down.sql b/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 000000000000..a9f526091194 --- /dev/null +++ b/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/up.sql b/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 000000000000..d68895b1a7b7 --- /dev/null +++ b/examples/postgres/custom_arrays/migrations/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql new file mode 100644 index 000000000000..2068b001f400 --- /dev/null +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql @@ -0,0 +1,5 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS smdb.service; +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP schema IF EXISTS smdb; \ No newline at end of file diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql new file mode 100644 index 000000000000..9a0c021b7c1a --- /dev/null +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql @@ -0,0 +1,29 @@ +-- Your SQL goes here +CREATE SCHEMA IF NOT EXISTS smdb; + +CREATE TYPE smdb.protocol_type AS ENUM ( + 'UnknownProtocol', + 'GRPC', + 'HTTP', + 'UDP' +); + +CREATE TYPE smdb.service_endpoint AS ( + "name" Text, + "version" INTEGER, + "base_uri" Text, + "port" INTEGER, + "protocol" smdb.protocol_type +); + +CREATE TABLE smdb.service( + "service_id" INTEGER NOT NULL PRIMARY KEY, + "name" Text NOT NULL, + "version" INTEGER NOT NULL, + "online" BOOLEAN NOT NULL, + "description" Text NOT NULL, + "health_check_uri" Text NOT NULL, + "base_uri" Text NOT NULL, + "dependencies" INTEGER[] NOT NULL, + "endpoints" smdb.service_endpoint[] NOT NULL +); \ No newline at end of file diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs new file mode 100644 index 000000000000..5845dc0f98fa --- /dev/null +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -0,0 +1,5 @@ +pub mod model; +mod schema; + +pub type Connection = + diesel::r2d2::PooledConnection>; diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs new file mode 100644 index 000000000000..066d3227f78f --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs @@ -0,0 +1,37 @@ +use crate::model::endpoint_type::Endpoint; +use crate::model::protocol_type::PgProtocolType; +use diesel::deserialize::FromSql; +use diesel::pg::{Pg, PgValue}; +use diesel::serialize::{Output, ToSql}; +use diesel::sql_types::{Integer, Record, Text}; +use diesel::{deserialize, serialize}; + +impl ToSql for Endpoint { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( + &( + self.name.to_owned(), + self.version.to_owned(), + self.base_uri.to_owned(), + self.port.to_owned(), + self.protocol.to_owned(), + ), + &mut out.reborrow(), + ) + } +} + +impl FromSql for Endpoint { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + let (name, version, base_uri, port, protocol) = + FromSql::, Pg>::from_sql(bytes)?; + + Ok(Endpoint { + name, + version, + base_uri, + port, + protocol, + }) + } +} diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs new file mode 100644 index 000000000000..09c1b41cbab9 --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs @@ -0,0 +1,33 @@ +use crate::model::protocol_type::ProtocolType; +use diesel::deserialize::FromSqlRow; +use diesel::expression::AsExpression; + +mod endpoint_type_impl; + +#[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] +#[diesel(sql_type=crate::schema::smdb::sql_types::ServiceEndpoint)] +pub struct Endpoint { + pub name: String, + pub version: i32, + pub base_uri: String, + pub port: i32, + pub protocol: ProtocolType, +} + +impl Endpoint { + pub fn new( + name: String, + version: i32, + base_uri: String, + port: i32, + protocol: ProtocolType, + ) -> Self { + Self { + name, + version, + base_uri, + port, + protocol, + } + } +} diff --git a/examples/postgres/custom_arrays/src/model/mod.rs b/examples/postgres/custom_arrays/src/model/mod.rs new file mode 100644 index 000000000000..c9cf7fa59aca --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/mod.rs @@ -0,0 +1,7 @@ +pub mod endpoint_type; +pub mod service; + +pub mod protocol_type; + +pub type Connection = + diesel::r2d2::PooledConnection>; diff --git a/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs b/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs new file mode 100644 index 000000000000..5af43e599b65 --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs @@ -0,0 +1,47 @@ +use diesel::deserialize::{FromSql, FromSqlRow}; +use diesel::expression::AsExpression; +use diesel::pg::{Pg, PgValue}; +use diesel::serialize::{IsNull, Output, ToSql}; +use diesel::sql_types::SqlType; +use diesel::{deserialize, serialize}; +use std::io::Write; + +// Diesel type mapping requires a struct to derive a SqlType for custom ToSql and FromSql implementations +#[derive(SqlType)] +#[diesel(sql_type = protocol_type)] +#[diesel(postgres_type(name = "protocol_type"))] +pub struct PgProtocolType; + +#[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] +#[diesel(sql_type = PgProtocolType)] +#[allow(clippy::upper_case_acronyms)] +pub enum ProtocolType { + UnknownProtocol, + GRPC, + HTTP, + UDP, +} + +impl ToSql for ProtocolType { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + match *self { + ProtocolType::UnknownProtocol => out.write_all(b"UnknownProtocol")?, + ProtocolType::GRPC => out.write_all(b"GRPC")?, + ProtocolType::HTTP => out.write_all(b"HTTP")?, + ProtocolType::UDP => out.write_all(b"UDP")?, + } + Ok(IsNull::No) + } +} + +impl FromSql for ProtocolType { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + match bytes.as_bytes() { + b"UnknownProtocol" => Ok(ProtocolType::UnknownProtocol), + b"GRPC" => Ok(ProtocolType::GRPC), + b"HTTP" => Ok(ProtocolType::HTTP), + b"UDP" => Ok(ProtocolType::UDP), + _ => Ok(ProtocolType::UnknownProtocol), + } + } +} diff --git a/examples/postgres/custom_arrays/src/model/service/mod.rs b/examples/postgres/custom_arrays/src/model/service/mod.rs new file mode 100644 index 000000000000..d58cca7b6040 --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/service/mod.rs @@ -0,0 +1,97 @@ +use crate::model::endpoint_type::Endpoint; +use diesel::{AsChangeset, Insertable, Queryable, Selectable}; + +mod service_impl; + +#[derive(Debug, Clone, Queryable, Selectable)] +#[diesel(table_name= crate::schema::smdb::service, primary_key(service_id))] +pub struct Service { + pub service_id: i32, + pub name: String, + pub version: i32, + pub online: bool, + pub description: String, + pub health_check_uri: String, + pub base_uri: String, + pub dependencies: Vec>, + pub endpoints: Vec>, +} + +#[derive(Debug, Clone, Queryable, Insertable)] +#[diesel(table_name= crate::schema::smdb::service, primary_key(service_id))] +pub struct CreateService { + pub service_id: i32, + pub name: String, + pub version: i32, + pub online: bool, + pub description: String, + pub health_check_uri: String, + pub base_uri: String, + pub dependencies: Vec>, + pub endpoints: Vec>, +} + +impl CreateService { + #[allow(clippy::too_many_arguments)] + pub fn new( + service_id: i32, + name: String, + version: i32, + online: bool, + description: String, + health_check_uri: String, + base_uri: String, + dependencies: Vec>, + endpoints: Vec>, + ) -> Self { + Self { + service_id, + name, + version, + online, + description, + health_check_uri, + base_uri, + dependencies, + endpoints, + } + } +} + +#[derive(Debug, Clone, Queryable, Insertable, AsChangeset)] +#[diesel(table_name= crate::schema::smdb::service)] +pub struct UpdateService { + pub name: Option, + pub version: Option, + pub online: Option, + pub description: Option, + pub health_check_uri: Option, + pub base_uri: Option, + pub dependencies: Option>>, + pub endpoints: Option>>, +} + +impl UpdateService { + #[allow(clippy::too_many_arguments)] + pub fn new( + name: Option, + version: Option, + online: Option, + description: Option, + health_check_uri: Option, + base_uri: Option, + dependencies: Option>>, + endpoints: Option>>, + ) -> Self { + Self { + name, + version, + online, + description, + health_check_uri, + base_uri, + dependencies, + endpoints, + } + } +} diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs new file mode 100644 index 000000000000..e2137702581e --- /dev/null +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -0,0 +1,147 @@ +use crate::model::endpoint_type::Endpoint; +use crate::model::service::{CreateService, Service, UpdateService}; +use crate::model::Connection; +use crate::schema::smdb::service::dsl::*; +use diesel::{ + insert_into, ExpressionMethods, QueryDsl, QueryResult, RunQueryDsl, SelectableHelper, +}; + +impl Service { + pub fn create(db: &mut Connection, item: &CreateService) -> QueryResult { + insert_into(crate::schema::smdb::service::table) + .values(item) + .get_result::(db) + } + + pub fn count(db: &mut Connection) -> QueryResult { + service.count().get_result::(db) + } + + pub fn count_u64(db: &mut Connection) -> QueryResult { + service.count().get_result::(db).map(|c| c as u64) + } + + pub fn check_if_service_id_exists( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult { + match service.find(param_service_id).first::(db) { + Ok(_) => Ok(true), + Err(_) => Ok(false), + } + } + + pub fn check_all_services_online( + db: &mut Connection, + services: &[i32], + ) -> QueryResult<(bool, Option)> { + for id in services { + match Service::check_if_service_id_online(db, *id) { + Ok(res) => { + if !res { + return Ok((false, Some(format!("Service {} is offline", id)))); + } + } + Err(e) => return Err(e), + }; + } + + Ok((true, None)) + } + + pub fn check_if_service_id_online( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult { + match service + .filter(service_id.eq(param_service_id)) + .select(online) + .first::(db) + { + Ok(res) => Ok(res), + Err(e) => Err(e), + } + } + + pub fn get_all_online_services(db: &mut Connection) -> QueryResult> { + service + .filter(online.eq(true)) + .select(Service::as_returning()) + .load::(db) + } + + pub fn get_all_offline_services(db: &mut Connection) -> QueryResult> { + service + .filter(online.eq(false)) + .select(Service::as_returning()) + .load::(db) + } + + pub fn get_all_service_dependencies( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult>> { + service + .filter(service_id.eq(param_service_id)) + .select(dependencies) + .first::>>(db) + } + + pub fn get_all_service_endpoints( + db: &mut Connection, + param_service_id: i32, + ) -> QueryResult>> { + service + .filter(service_id.eq(param_service_id)) + .select(endpoints) + .first::>>(db) + } + + pub fn read(db: &mut Connection, param_service_id: i32) -> QueryResult { + service + .filter(service_id.eq(param_service_id)) + .first::(db) + } + + pub fn read_all(db: &mut Connection) -> QueryResult> { + service.load::(db) + } + + pub fn set_service_online(db: &mut Connection, param_service_id: i32) -> QueryResult<()> { + Self::set_svc_online(db, param_service_id, true) + } + + pub fn set_service_offline(db: &mut Connection, param_service_id: i32) -> QueryResult<()> { + Self::set_svc_online(db, param_service_id, false) + } + + fn set_svc_online( + db: &mut Connection, + param_service_id: i32, + param_online: bool, + ) -> QueryResult<()> { + match diesel::update(service.filter(service_id.eq(param_service_id))) + .set(online.eq(param_online)) + .returning(Service::as_returning()) + .get_result(db) + { + Ok(_) => Ok(()), + Err(e) => Err(e), + } + } + + pub fn update( + db: &mut Connection, + param_service_id: i32, + item: &UpdateService, + ) -> QueryResult { + diesel::update(service.filter(service_id.eq(param_service_id))) + .set(item) + .returning(Service::as_returning()) + .get_result(db) + } + + pub fn delete(db: &mut Connection, param_service_id: i32) -> QueryResult { + diesel::delete(service.filter(service_id.eq(param_service_id))).execute(db) + } +} diff --git a/examples/postgres/custom_arrays/src/schema.rs b/examples/postgres/custom_arrays/src/schema.rs new file mode 100644 index 000000000000..91378b5ced2e --- /dev/null +++ b/examples/postgres/custom_arrays/src/schema.rs @@ -0,0 +1,26 @@ +// @generated automatically by Diesel CLI. + +pub mod smdb { + pub mod sql_types { + #[derive(diesel::query_builder::QueryId, Clone, diesel::sql_types::SqlType)] + #[diesel(postgres_type(name = "service_endpoint", schema = "smdb"))] + pub struct ServiceEndpoint; + } + + diesel::table! { + use diesel::sql_types::*; + use super::sql_types::ServiceEndpoint; + + smdb.service (service_id) { + service_id -> Int4, + name -> Text, + version -> Int4, + online -> Bool, + description -> Text, + health_check_uri -> Text, + base_uri -> Text, + dependencies -> Array>, + endpoints -> Array>, + } + } +} diff --git a/examples/postgres/custom_arrays/tests/mod.rs b/examples/postgres/custom_arrays/tests/mod.rs new file mode 100644 index 000000000000..67ae88e7ba49 --- /dev/null +++ b/examples/postgres/custom_arrays/tests/mod.rs @@ -0,0 +1,2 @@ +#[cfg(test)] +mod service_tests; diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs new file mode 100644 index 000000000000..9809478a6c63 --- /dev/null +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -0,0 +1,268 @@ +use custom_arrays::model::endpoint_type::Endpoint; +use custom_arrays::model::protocol_type::ProtocolType; +use custom_arrays::model::service; +use custom_arrays::model::service::{CreateService, UpdateService}; +use custom_arrays::Connection; +use diesel::r2d2::{ConnectionManager, Pool}; +use diesel::PgConnection; +use dotenv::dotenv; +use std::env; + +fn postgres_connection_pool() -> Pool> { + dotenv().ok(); + + let database_url = env::var("DATABASE_URL") + .or_else(|_| env::var("POSTGRES_DATABASE_URL")) + .expect("DATABASE_URL must be set"); + + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Could not build connection pool") +} + +#[test] +fn test_service() { + let pool = postgres_connection_pool(); + let conn = &mut pool.get().unwrap(); + + println!("Test create!"); + test_create_service(conn); + + println!("Test count!"); + test_count_service(conn); + + println!("Test check if exists!"); + test_check_if_service_id_exists(conn); + + println!("Test check if online!"); + test_check_if_service_id_online(conn); + + println!("Test get all online services!"); + test_get_all_online_services(conn); + + println!("Test get all offline services!"); + test_get_all_offline_services(conn); + + println!("Test get all service dependencies!"); + test_get_all_service_dependencies(conn); + + println!("Test get all service endpoints!"); + test_get_all_service_endpoints(conn); + + println!("Test read!"); + test_service_read(conn); + + println!("Test read_all!"); + test_service_read_all(conn); + + println!("Test set service online!"); + test_set_service_online(conn); + + println!("Test set service offline!"); + test_set_service_offline(conn); + + println!("Test update service!"); + test_service_update(conn); + + println!("Test delete service!"); + test_service_delete(conn); +} + +fn test_create_service(conn: &mut Connection) { + let grpc_endpoint = Endpoint::new( + "test_grpc_endpoint".to_string(), + 1, + "/grpc".to_string(), + 7070, + ProtocolType::GRPC, + ); + + let http_endpoint = Endpoint::new( + "test_http_endpoint".to_string(), + 1, + "/http".to_string(), + 8080, + ProtocolType::HTTP, + ); + + let endpoints = vec![Some(grpc_endpoint.clone()), Some(http_endpoint.clone())]; + + let dependencies = vec![Some(42)]; + + let service = CreateService { + service_id: 1, + name: "test".to_string(), + version: 1, + online: true, + description: "test".to_string(), + health_check_uri: "http://example.com".to_string(), + base_uri: "http://example.com".to_string(), + dependencies, + endpoints, + }; + + let result = service::Service::create(conn, &service); + + dbg!(&result); + + assert!(result.is_ok()); + + let service = result.unwrap(); + + assert_eq!(service.service_id, 1); + assert_eq!(service.name, "test"); + assert_eq!(service.version, 1); + assert_eq!(service.online, true); + assert_eq!(service.description, "test"); + assert_eq!(service.health_check_uri, "http://example.com"); + assert_eq!(service.base_uri, "http://example.com"); + assert_eq!(service.dependencies, vec![Some(42)]); + assert_eq!( + service.endpoints, + vec![Some(grpc_endpoint.clone()), Some(http_endpoint.clone())] + ); +} + +fn test_count_service(conn: &mut Connection) { + let result = service::Service::count(conn); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 1); +} + +fn test_check_if_service_id_exists(conn: &mut Connection) { + let result = service::Service::check_if_service_id_exists(conn, 1); + assert!(result.is_ok()); + assert!(result.unwrap()); +} + +fn test_check_if_service_id_online(conn: &mut Connection) { + let result = service::Service::check_if_service_id_online(conn, 1); + assert!(result.is_ok()); + assert!(result.unwrap()); +} + +fn test_get_all_online_services(conn: &mut Connection) { + let result = service::Service::get_all_online_services(conn); + assert!(result.is_ok()); + assert!(result.unwrap().len() > 0); +} + +fn test_get_all_offline_services(conn: &mut Connection) { + let result = service::Service::get_all_offline_services(conn); + assert!(result.is_ok()); + assert_eq!(result.unwrap().len(), 0); +} + +fn test_get_all_service_dependencies(conn: &mut Connection) { + let service_id = 1; + + let result = service::Service::get_all_service_dependencies(conn, service_id); + assert!(result.is_ok()); + assert_eq!(result.unwrap().len(), 1); +} + +fn test_get_all_service_endpoints(conn: &mut Connection) { + let service_id = 1; + + let result = service::Service::get_all_service_endpoints(conn, service_id); + assert!(result.is_ok()); + assert_eq!(result.unwrap().len(), 2); +} + +fn test_service_read(conn: &mut Connection) { + let service_id = 1; + + let result = service::Service::read(conn, service_id); + assert!(result.is_ok()); + + let service = result.unwrap(); + + assert_eq!(service.service_id, 1); + assert_eq!(service.name, "test"); + assert_eq!(service.version, 1); + assert_eq!(service.online, true); + assert_eq!(service.description, "test"); + assert_eq!(service.health_check_uri, "http://example.com"); + assert_eq!(service.base_uri, "http://example.com"); + assert_eq!(service.dependencies, vec![Some(42)]); +} + +fn test_service_read_all(conn: &mut Connection) { + let result = service::Service::read_all(conn); + assert!(result.is_ok()); + + let services = result.unwrap(); + assert!(services.len() > 0); +} + +fn test_set_service_online(conn: &mut Connection) { + let service_id = 1; + + let result = service::Service::set_service_online(conn, service_id); + assert!(result.is_ok()); + + let result = service::Service::check_if_service_id_online(conn, service_id); + assert!(result.is_ok()); + assert!(result.unwrap()); +} + +fn test_set_service_offline(conn: &mut Connection) { + let service_id = 1; + + let result = service::Service::set_service_offline(conn, service_id); + assert!(result.is_ok()); + + let result = service::Service::check_if_service_id_online(conn, service_id); + assert!(result.is_ok()); + assert!(!result.unwrap()); +} + +fn test_service_update(conn: &mut Connection) { + // check if service_id exists so we can update the service + let result = service::Service::check_if_service_id_exists(conn, 1); + assert!(result.is_ok()); + assert!(result.unwrap()); + + let update = UpdateService::new( + Some("new_test".to_string()), + Some(2), + Some(true), + None, + None, + None, + None, + None, + ); + + let result = service::Service::update(conn, 1, &update); + assert!(result.is_ok()); + + let service = result.unwrap(); + + assert_eq!(service.service_id, 1); + assert_eq!(service.name, "new_test"); + assert_eq!(service.version, 2); + assert_eq!(service.online, true); + assert_eq!(service.description, "test"); + assert_eq!(service.health_check_uri, "http://example.com"); + assert_eq!(service.base_uri, "http://example.com"); + assert_eq!(service.dependencies.len(), 1); + assert_eq!(service.dependencies, vec![Some(42)]); +} + +fn test_service_delete(conn: &mut Connection) { + let result = service::Service::read(conn, 1); + assert!(result.is_ok()); + + let result = service::Service::delete(conn, 1); + assert!(result.is_ok()); + + let result = service::Service::read(conn, 1); + assert!(result.is_err()); + + let result = service::Service::count(conn); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 0); +} From 80edcdf0692b52309c7d2a392a80a27dfaf31226 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 06:03:52 +0800 Subject: [PATCH 23/73] Fixed a bunch of typos in the custom array example Readme. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 340907276e99..77eaafb49e73 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -15,7 +15,7 @@ project. Your company decided to create a database of all microservices in its system; the high-level requirements are: -* CRUD: Create, read, update, and delete a service in the databse +* CRUD: Create, read, update, and delete a service in the database * List all offline services and all services that are online * Retrieve all API endpoints for a service @@ -369,7 +369,7 @@ message, make sure to check: 1) Do I have a wrapper struct? 2) Does my a wrapper struct derives SqlType? 3) Does my wrapper type has both, sql_type and postgres_type declared? -4) Are sql_type and postgres_type both refering to the correct Postgres type? +4) Are sql_type and postgres_type both referring to the correct Postgres type? 5) Does my Rust type refers to the wrapper struct type? If all those checks pass and you still see errors, it’s most likely a serialization error. @@ -486,7 +486,7 @@ It’s worth repeating that you are dealing with three types in total: 1) service_endpoint: The Postgres custom type 2) ServiceEndpoint: The wrapper struct generated by Diesel that refers to the Postgres type service_endpoint. Also, only the wrapper struct carries the postgres_type annotation in addition to the an sql_type annotation. -3) Endpoint: The Rust struct with an sql_type annotation refering to the wrapper struct ServiceEndpoint +3) Endpoint: The Rust struct with an sql_type annotation referring to the wrapper struct ServiceEndpoint Make sure all of those types match correctly. @@ -510,7 +510,7 @@ in total we crate 3 service types: 1) Service: For Read 2) CreateService: For create operations -3) UpdateServce: For update operations +3) UpdateService: For update operations The Service and CreateService have identical fields. Also, both types require a primary key annotation in addition to the table name. The UpdateServce, however, has each field wrapped into an option type. When the option type is Some, @@ -679,7 +679,7 @@ Here the generated DSL really shines as you can easily select a field and set it is void in case of success. Realize, if you were to return the updated Boolean flag, you would cause a non-trivial ambiguity on the call-site. For setting the Boolean flag true, the result would be true, which is correct, but there is also a risk of the return value being interpreted as the operation having succeeded. And that matters, because if you -set the flag to false, the same method return false, which is also correct, but could be mis-understood as the operation +set the flag to false, the same method return false, which is also correct, but could be misunderstood as the operation didn’t completed because false was returned. A good API is mostly free of ambiguity and therefore, therefore you just return Ok(()) in case of success because there is no way to misunderstand a returned ok. @@ -789,7 +789,7 @@ find and return all services with online set to false. } ``` -### Check All Serices Online +### Check All Services Online Finally, we can implement the last method that takes a collection of service ID’s, checks all of them, and returns either true, or the service ID that is offline. From c2aa26f6df8b7b7d5752d07dd13b91fa42afa0c0 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 06:14:35 +0800 Subject: [PATCH 24/73] Updated and edited Readme in the custom array example. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 77eaafb49e73..e6a5165b8578 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -493,27 +493,26 @@ Make sure all of those types match correctly. ### Rust Struct Service The service struct gets its serialization and deserialization implementation generated by Diesel so that saves some -typing. -On the other hand, it is a good practice to implement database operations on the actual type itself. +typing. On the other hand, it is a good practice to implement database operations on the actual type itself. The wisdom here is twofold. For once, you cannot separate the database operation from the type, -therefore it makes sense to implement the operations on the type itself. This also hides implementation details -in encapsulation, +therefore it makes sense to implement the operations on the type itself. +This also hides implementation details in encapsulation, as [discussed in the book](https://doc.rust-lang.org/book/ch17-01-what-is-oo.html#encapsulation-that-hides-implementation-details). Second, you gain a single point of maintenance because, in case your table definition changes, you have to update the -Rust type anyways and because Diesel is all statically checked, the compiler will immediately point out where your +Rust type anyways and because Diesel is statically checked, the compiler will immediately point out where your operations need to be corrected to match the new type definition. The requirements stated we want CRUD operations, and type-based programming suggest creating a different type per operation. Because we use external primary keys, we don’t need an additional type for the delete operation, therefore, -in total we crate 3 service types: +in total, we crate 3 service types: 1) Service: For Read 2) CreateService: For create operations 3) UpdateService: For update operations The Service and CreateService have identical fields. Also, both types require a primary key annotation in addition to -the table name. The UpdateServce, however, has each field wrapped into an option type. When the option type is Some, +the table name. The UpdateService, however, has each field wrapped into an option type. When the option type is Some, Diesel knows that this field needs updating. If the field is set to None, Diesel ignores it. ```rust @@ -635,10 +634,12 @@ With the CRUD methods implemented, it’s time to look at the more interesting p ## Additional methods -“Unmentioned in the requirements, a service depends on other services, and it would be great to test, before deploying a +In the requirement, it was stated that: + +“... a service depends on other services, and it would be great to test before deploying a new service, if all of its dependencies are online.” -Here, we have to implement a method to set a service online and another one to set it offline. By experience, a few +To do so we have to implement a method that sets a service online and another one to set it offline. By experience, a few non-trivial errors are caused by incorrectly set Boolean flags and whenever you can, hide Boolean flags in your public API and use a dedicated method instead to the set them. @@ -651,8 +652,8 @@ the database and that is where you need another check method. Then, we want another method that takes a collection of service ID’s, checks all of them and returns either true if all services are online, or returns an error message that states which service is offline. -And because we are already here, let’s add another method that returns all services that are online and another one that -returns all services that are currently offline. The latter is handy to quickly identify any deployment blocker. +Finally, let’s add another method that returns all services that are online and another one that +returns all services that are currently offline. The latter is handy to quickly identify a deployment blocker. ### Set Service Online / Offline From 231616429dcc5e87f2a90c907a097a86b7f94926 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 06:29:59 +0800 Subject: [PATCH 25/73] Applied multiple lints to tests in custom array example. Added custom array example to GH CI action. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/tests/service_tests.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index 9809478a6c63..7a98312b4b04 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -114,7 +114,7 @@ fn test_create_service(conn: &mut Connection) { assert_eq!(service.service_id, 1); assert_eq!(service.name, "test"); assert_eq!(service.version, 1); - assert_eq!(service.online, true); + assert!(service.online); assert_eq!(service.description, "test"); assert_eq!(service.health_check_uri, "http://example.com"); assert_eq!(service.base_uri, "http://example.com"); @@ -146,7 +146,7 @@ fn test_check_if_service_id_online(conn: &mut Connection) { fn test_get_all_online_services(conn: &mut Connection) { let result = service::Service::get_all_online_services(conn); assert!(result.is_ok()); - assert!(result.unwrap().len() > 0); + assert!(!result.unwrap().is_empty()); } fn test_get_all_offline_services(conn: &mut Connection) { @@ -182,7 +182,7 @@ fn test_service_read(conn: &mut Connection) { assert_eq!(service.service_id, 1); assert_eq!(service.name, "test"); assert_eq!(service.version, 1); - assert_eq!(service.online, true); + assert!(service.online); assert_eq!(service.description, "test"); assert_eq!(service.health_check_uri, "http://example.com"); assert_eq!(service.base_uri, "http://example.com"); @@ -194,7 +194,7 @@ fn test_service_read_all(conn: &mut Connection) { assert!(result.is_ok()); let services = result.unwrap(); - assert!(services.len() > 0); + assert!(!services.is_empty()); } fn test_set_service_online(conn: &mut Connection) { @@ -244,7 +244,7 @@ fn test_service_update(conn: &mut Connection) { assert_eq!(service.service_id, 1); assert_eq!(service.name, "new_test"); assert_eq!(service.version, 2); - assert_eq!(service.online, true); + assert!(service.online); assert_eq!(service.description, "test"); assert_eq!(service.health_check_uri, "http://example.com"); assert_eq!(service.base_uri, "http://example.com"); From 5b01b942aa80d630a007f823ab8010bf00883cce Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 06:57:39 +0800 Subject: [PATCH 26/73] Fixed tests in custom array example. Signed-off-by: Marvin Hansen --- .../migrations/2024-08-15-070500_services/down.sql | 4 ++-- .../migrations/2024-08-15-070500_services/up.sql | 8 ++++---- examples/postgres/custom_arrays/src/schema.rs | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql index 2068b001f400..11dba6558335 100644 --- a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql @@ -1,5 +1,5 @@ -- This file should undo anything in `up.sql` DROP TABLE IF EXISTS smdb.service; -DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; -DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP TYPE IF EXISTS service_endpoint CASCADE; +DROP TYPE IF EXISTS protocol_type CASCADE; DROP schema IF EXISTS smdb; \ No newline at end of file diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql index 9a0c021b7c1a..9b473e75fdcd 100644 --- a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql @@ -1,19 +1,19 @@ -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE smdb.protocol_type AS ENUM ( +CREATE TYPE protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', 'UDP' ); -CREATE TYPE smdb.service_endpoint AS ( +CREATE TYPE service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" smdb.protocol_type + "protocol" protocol_type ); CREATE TABLE smdb.service( @@ -25,5 +25,5 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" smdb.service_endpoint[] NOT NULL + "endpoints" service_endpoint[] NOT NULL ); \ No newline at end of file diff --git a/examples/postgres/custom_arrays/src/schema.rs b/examples/postgres/custom_arrays/src/schema.rs index 91378b5ced2e..8b6e84a6af49 100644 --- a/examples/postgres/custom_arrays/src/schema.rs +++ b/examples/postgres/custom_arrays/src/schema.rs @@ -3,7 +3,7 @@ pub mod smdb { pub mod sql_types { #[derive(diesel::query_builder::QueryId, Clone, diesel::sql_types::SqlType)] - #[diesel(postgres_type(name = "service_endpoint", schema = "smdb"))] + #[diesel(postgres_type(name = "service_endpoint"))] pub struct ServiceEndpoint; } From a46e66060df66f7b7d49f9621166bff027767a69 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 07:07:22 +0800 Subject: [PATCH 27/73] Updated Readme in custom array example to reflect correct custom schema usage. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 49 +++++++++++++---------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index e6a5165b8578..bba47610c815 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -74,10 +74,12 @@ a good security practice. To create a new schema with Diesel, you follow three s 2) Declare the schema in the migration up/down.sql files 3) Prefix all table names in the migration up/down.sql files with the schema -Postgres uses internally a schema as something like a search path for a table and, by default, searches in the public -schema for a table. If that fails, Postgres returns an error that the table does not exists (in the default schema). -Because of the search path mechanism, you only have to declare the schema name and consistently prefix the tables in the -schema with the schema name and that’s it. +Postgres uses internally a schema as something like a search path for a table and, by default, +searches in the public schema for a table. If that fails, Postgres returns an error that the table does not exists (in the default schema). +Because of the search path mechanism, you only have to declare the schema name and consistently prefix thetables in the schema with the schema name and that’s it. + +It is important to know that a custom schema only applies to tables. Custom types and enum still +reside in the default public schema. In your diesel.toml file, add the following entry: @@ -114,7 +116,7 @@ Postgres ENUM in your up.sql looks like this: -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE smdb.protocol_type AS ENUM ( +CREATE TYPE protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', @@ -122,11 +124,10 @@ CREATE TYPE smdb.protocol_type AS ENUM ( ); ``` -Notice, the table name is prefixed by the DB schema to ensure Postgres find it. Also, you add the corresponding drop -type operation to your down.sql file: +Add the corresponding drop type operation to your down.sql file: ```sql -DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP TYPE IF EXISTS protocol_type CASCADE; DROP schema IF EXISTS smdb; ``` @@ -147,20 +148,20 @@ your endpoint type looks like this in your up.sql: ```sql -- Your SQL goes here -CREATE TYPE smdb.service_endpoint AS ( +CREATE TYPE service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" smdb.protocol_type + "protocol" protocol_type ); ``` And the matching drop operation in the down.sql file: ```sql -DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; -DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP TYPE IF EXISTS service_endpoint CASCADE; +DROP TYPE IF EXISTS protocol_type CASCADE; DROP schema IF EXISTS smdb; ``` @@ -203,24 +204,28 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" smdb.service_endpoint[] NOT NULL + "endpoints" service_endpoint[] NOT NULL ); ``` -And add the matching drop statement to your down.sql file: +As mentioned earlier, the custom schema applies to tables therefore the service table is +prefixed with the schema name whereas the endpoint type is not as it resides in the public schema. + +Add the matching drop statement to your down.sql file: ```sql -- This file should undo anything in `up.sql` DROP TABLE IF EXISTS smdb.service; -DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; -DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP TYPE IF EXISTS service_endpoint CASCADE; +DROP TYPE IF EXISTS protocol_type CASCADE; DROP schema IF EXISTS smdb; ``` A few notes on the service table: -* Notice the schema prefix appears in both, the table name and in referenced types -* The array type follows the usual convention type[] +* Notice the schema prefix appears only in the table name. +* The custom type and custom Enum do not have any prefix because they are in the default schema. +* The array type follows the usual convention type[]. * The NOT NULL attribute means that the array itself must be set; Values inside the array still might be null. The Diesel team decided that array values are nullable by default because there so many things that may go wrong when @@ -233,19 +238,19 @@ In total, the up.sql looks as below: -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE smdb.protocol_type AS ENUM ( +CREATE TYPE protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', 'UDP' ); -CREATE TYPE smdb.service_endpoint AS ( +CREATE TYPE service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" smdb.protocol_type + "protocol" protocol_type ); CREATE TABLE smdb.service( @@ -257,7 +262,7 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" smdb.service_endpoint[] NOT NULL + "endpoints" service_endpoint[] NOT NULL ); ``` From e3eeecf5d5de84f96039391b92864fe2d55f5b96 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 19:31:06 +0800 Subject: [PATCH 28/73] Custom Array Example: Added embedded schema migration and updated the Readme with a section on testing. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/Cargo.toml | 5 +- examples/postgres/custom_arrays/README.md | 219 +++++++++++++++++- examples/postgres/custom_arrays/src/lib.rs | 56 +++++ .../custom_arrays/tests/service_tests.rs | 31 ++- 4 files changed, 302 insertions(+), 9 deletions(-) diff --git a/examples/postgres/custom_arrays/Cargo.toml b/examples/postgres/custom_arrays/Cargo.toml index b953ab0a5f24..156d7749b536 100644 --- a/examples/postgres/custom_arrays/Cargo.toml +++ b/examples/postgres/custom_arrays/Cargo.toml @@ -5,5 +5,8 @@ edition.workspace = true publish = false [dependencies] -diesel = { path = "../../../diesel", version = "2.2.0", features = ["postgres", "r2d2"] } +diesel = { path = "../../../diesel", version = "2.2.0", default-features = false, features = ["postgres", "r2d2"] } +diesel_migrations = { path = "../../../diesel_migrations", version = "2.2.0", default-features = false, features = ["postgres"] } + + dotenv = "0.15.0" diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index bba47610c815..4d85beb3cb99 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -1,9 +1,23 @@ -# Custom array and custom data in Postgres +# Custom Array and Data Types in Postgres + +Table of Content: +1. [Concepts](#concepts) +2. [The project](#the-project) +3. [Getting started](#getting-started) +4. [Postgres schema](#postgres-schema) +5. [Postgres Enum](#postgres-enum) +6. [Postgres custom type](#postgres-custom-type) +7. [Postgres SERIAL vs INTEGER primary key](#postgres-serial-vs-integer-primary-key) +8. [Postgres array with custom type](#postgres-array-with-custom-type) +9. [Rust Types](#rust-types) +10. [Additional methods](#additional-methods) +11. [Applied Best Practices](#applied-best-practices) +12. [Testing](#testing) In this guide, you learn more about the concepts listed below and illustrate the actual usage in Diesel with a sample project. -**Concepts**: +## Concepts: * Custom Enum * Custom type that uses a custom Enum @@ -190,7 +204,7 @@ depends on other services. In order to insert a service that depends on any othe already been inserted or not, you have to know the service ID upfront. With that out of the way, let’s define the service table. -## Postgres array with a custom type +## Postgres array with custom type To define the service table, the add the following to your up.sql file @@ -826,9 +840,7 @@ doesn’t count as a database error whereas if the query would fail, that would decision depends largely on the design requirements and if you already have custom errors defined, then adding a ServiceDependencyOfflineError variant should be straight forward. -At this point, the Service type implementation is complete. I have skipped the testing procedure, but in a nutshell, you -just create a util that connects to a running Postgres server and then returns a connection to run all tests. You find -all tests in the test folder. +At this point, the Service type implementation is complete. ## Applied Best Practices @@ -890,3 +902,198 @@ compile error saying that trait FromSql is not implemented. If you write a database layer for an existing system, this technique comes in handy as you can seamlessly convert between Diesel DB types and your target types while leaving your target types as it. And because you never know when you have to do this, it’s generally recommended to add type annotations to DB return types. +## Testing + +Diesel enables you to test your database schema and migration early on in the development process. +To do so, you need only meed: + +* A util that creates a DB connection +* A util that runs the DB migration +* And your DB integration tests + +### DB Connection Types + +Broadly speaking, there are two ways to handle database connection. One way is to create one connection per application +and use it until the application shuts down. Another way is to create a pool of connection and, +whenever a database connection is needed for an DB operation, a connection is taken from the pool +and after the DB operation has been completed, the connection is returned to the pool. +The first approach is quite common in small application whereas the second one is commonly used in server application +that expected consistent database usage. + +**Important** + +Using database connection pool in Diesel requires you to enable the `r2d2` feature in your cargo.toml file. + +In Diesel, you can handle connections either way, but the only noticeable difference is the actual connection type +used as type parameter. For that reason, a type alias for the DB connection was declared early on because +that would allow you to switch between a single connection and a pooled connection without refactoring. +However, because connection pooling in Diesel is so well supported, I suggest to use it by default. + +### DB Connection Test Util + +Let's start with a small util that returns a DB connection. First you need to get a database URI from somewhere, +then construct a connection pool, and lastly return the connection pool. +Remember, this is just a test util so there is no need to add anything more than necessary. + +```rust +fn postgres_connection_pool() -> Pool> { + dotenv().ok(); + + let database_url = env::var("DATABASE_URL") + .or_else(|_| env::var("POSTGRES_DATABASE_URL")) + .expect("DATABASE_URL must be set"); + + let manager = ConnectionManager::::new(database_url); + Pool::builder() + .test_on_check_out(true) + .build(manager) + .expect("Could not build connection pool") +} +``` + +Here we use the dotenv crate to test if there is an environment variable of either DATABASE_URL or POSTGRES_DATABASE_URL +and if so, parse the string and use it to build a connection pool. Therefore, make sure you have one of +those variables set in your environment. Notice, the test_on_check_out flag is set to true, +which means the database will be pinged to check if the database is actually reachable and the connection is working correctly. + +### DB Migration Util + +Diesel can run a database migration in one of two ways. First, you can use the Diesel CLI in your terminal +to generate, run, or revert migrations manually. This is ideal for development when you frequently +change the database schema. The second way is programmatically via the embedded migration macro, +which is ideal to build a single binary with all migrations compiled into it so that +you don't have to install and run the Diesel CLI on the target machine. +This simplifies deployment of your application and streamlines continuous integration testing. + +**Important** + +You must add the crate `diesel_migrations` to your cargo.toml and set the target database as feature flag +to enable the embedded migration macro. + +To serve both purposes, deployment and CI testing, let's add a new function `run_db_migration` to the lib.rs file +of the crate that takes a connection as parameter, checks if the DB connection is valid, +checks if there are any pending migrations, and if so, runs all pending migrations. +The implementation is straight forward, as you can see below: + +```rust +pub fn run_db_migration( + conn: &mut Connection, +) -> Result<(), Box> { + // Check DB connection! + match conn.ping() { + Ok(_) => {} + Err(e) => { + eprint!("[run_db_migration]: Error connecting to database: {}", e); + return Err(Box::new(e)); + } + } + + // Check if DB has pending migrations + let has_pending = match conn.has_pending_migration(MIGRATIONS) { + Ok(has_pending) => has_pending, + Err(e) => { + eprint!( + "[run_db_migration]: Error checking for pending database migrations: {}", + e + ); + return Err(e); + } + }; + + // If so, run all pending migrations. + if has_pending { + match conn.run_pending_migrations(MIGRATIONS) { + Ok(_) => Ok(()), + Err(e) => { + eprint!("[run_db_migration]: Error migrating database: {}", e); + Err(e) + } + } + } else { + // Nothing pending, just return + Ok(()) + } +} +``` + +The rational to check for all potential errors is twofold. For once, because the database connection is given as a parameter, +you just don't know if the creating pool has checked the connection, therefore you better check it to catch a dead connection before using it. +Second, even if you have a correct database connection, this does not guarantee that the migration will succeed. +There might be some types left from a previously aborted drop operations or +a random error might happened at any stage of the migration, therefore you have to handle the error where it occurs. +Also, because you run the db migration during application startup or before testing, +ensure you have clear error messages to speed up diagnostic and debugging. + + +### DB Integration Tests + +Database integration tests become flaky when executed in parallel usually because of conflicting read / write operations. +While modern database systems can handle concurrent data access, test tools with assertions not so much. +That means, test assertions start to fail seemingly randomly when executed concurrently. +There are only very few viable options to deal with this reality: + +* Don't use parallel test execution +* Only parallelize test per isolated access +* Do synchronization and test the actual database state before each test and run tests as atomic transactions + +Out of the three options, the last one will almost certainly win you an over-engineering award in the unlikely case +your colleagues appreciate the resulting test complexity. If not, good luck. +In practice, not using parallel test execution is often not possible either because of the larger number +of integration tests that run on a CI server. To be clear, strictly sequential tests is a great option +for small projects with low complexity, it just doesn't scale as the project grows in size and complexity. + +And that leaves us only with the middle-ground of grouping tables into isolated access. +Suppose your database has 25 tables you are tasked to test. +Some of them are clearly unrelated, others only require read access to some tables, +and then you have those where you have to test for multi-table inserts and updates. + +Say, you can form 7 groups of tables that are clearly independent of each other, +then you can run those 7 test groups in parallel, but for all practical purpose within each group, +all tests are run in sequence unless you are testing specifically for concurrent read write access. +You want to put those test into a dedicated test group anyways as they capture errors that are more complex +than errors from your basic integration tests. +And it makes sense to stage integration tests into simple functional tests, complex workflow tests, +and chaos tests that triggers read / write conflicts randomly to test for blind spots. + +In any case, the test suite for the service example follow the sequential execution pattern so that they can be +executed in parallel along other test groups without causing randomly failing tests. Specifically, +the test structure looks as shown below. However, the full test suite is in the test folder. + +```rust +#[test] +fn test_service() { + let pool = postgres_connection_pool(); + let conn = &mut pool.get().unwrap(); + + println!("Test DB migration"); + test_db_migration(conn); + + println!("Test create!"); + test_create_service(conn); + + println!("Test count!"); + test_count_service(conn); + + //... +} + +fn test_db_migration(conn: &mut Connection) { + let res = custom_arrays::run_db_migration(conn); + //dbg!(&result); + assert!(res.is_ok()); +} +``` + +The idea here is simple yet powerful: There is just one Rust test so regardless of whether you test with Cargo, +Nextest or any other test util, this test will run everything within it in sequence. +The print statements are usually only shown if a test fails. However, there is one important details worth mentioning, +the assert macro often obfuscates the error message and if you have ever seen a complex stack trace +full of fnOnce invocations, you know that already. To get a more meaningful error message, just uncomment the dbg! +statement that unwraps the result before the assertion and you will see a helpful error message in most cases. + +You may have noticed that the DB migration util checks if there are pending migrations and if there is nothing, +it does nothing and just returns. The wisdom behind this decision is that, there are certain corner cases +that only occur when you run a database tet multiple times and you really want to run the DB migration +just once to simulate that scenario as realistic as possible. When you test locally, the same logic applies +and you really only want to run a database migration when the schema has changed. + diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs index 5845dc0f98fa..d352381d2147 100644 --- a/examples/postgres/custom_arrays/src/lib.rs +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -1,5 +1,61 @@ +use diesel::r2d2::R2D2Connection; +use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; +use std::error::Error; + pub mod model; mod schema; pub type Connection = diesel::r2d2::PooledConnection>; + +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations"); + +/// Runs all pending database migrations. +/// +/// Will return an error if the database connection is invalid, or if any of the +/// migrations fail. Otherwise, it returns Ok() +/// +/// # Errors +/// +/// * If the database connection is invalid +/// * If checking for pending database migrations fails +/// * If any of the database migrations fail +/// +pub fn run_db_migration( + conn: &mut Connection, +) -> Result<(), Box> { + // Check DB connection! + match conn.ping() { + Ok(_) => {} + Err(e) => { + eprint!("[run_db_migration]: Error connecting to database: {}", e); + return Err(Box::new(e)); + } + } + + // Check if DB has pending migrations + let has_pending = match conn.has_pending_migration(MIGRATIONS) { + Ok(has_pending) => has_pending, + Err(e) => { + eprint!( + "[run_db_migration]: Error checking for pending database migrations: {}", + e + ); + return Err(e); + } + }; + + // If so, run all pending migrations. + if has_pending { + match conn.run_pending_migrations(MIGRATIONS) { + Ok(_) => Ok(()), + Err(e) => { + eprint!("[run_db_migration]: Error migrating database: {}", e); + Err(e) + } + } + } else { + // Nothing pending, just return + Ok(()) + } +} diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index 7a98312b4b04..e840716d9656 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -22,11 +22,20 @@ fn postgres_connection_pool() -> Pool> { .expect("Could not build connection pool") } +fn test_db_migration(conn: &mut Connection) { + let res = custom_arrays::run_db_migration(conn); + //dbg!(&result); + assert!(res.is_ok()); +} + #[test] fn test_service() { let pool = postgres_connection_pool(); let conn = &mut pool.get().unwrap(); + println!("Test DB migration"); + test_db_migration(conn); + println!("Test create!"); test_create_service(conn); @@ -105,8 +114,7 @@ fn test_create_service(conn: &mut Connection) { let result = service::Service::create(conn, &service); - dbg!(&result); - + // dbg!(&result); assert!(result.is_ok()); let service = result.unwrap(); @@ -127,30 +135,35 @@ fn test_create_service(conn: &mut Connection) { fn test_count_service(conn: &mut Connection) { let result = service::Service::count(conn); + //dbg!(&result); assert!(result.is_ok()); assert_eq!(result.unwrap(), 1); } fn test_check_if_service_id_exists(conn: &mut Connection) { let result = service::Service::check_if_service_id_exists(conn, 1); + //dbg!(&result); assert!(result.is_ok()); assert!(result.unwrap()); } fn test_check_if_service_id_online(conn: &mut Connection) { let result = service::Service::check_if_service_id_online(conn, 1); + //dbg!(&result); assert!(result.is_ok()); assert!(result.unwrap()); } fn test_get_all_online_services(conn: &mut Connection) { let result = service::Service::get_all_online_services(conn); + //dbg!(&result); assert!(result.is_ok()); assert!(!result.unwrap().is_empty()); } fn test_get_all_offline_services(conn: &mut Connection) { let result = service::Service::get_all_offline_services(conn); + //dbg!(&result); assert!(result.is_ok()); assert_eq!(result.unwrap().len(), 0); } @@ -159,6 +172,7 @@ fn test_get_all_service_dependencies(conn: &mut Connection) { let service_id = 1; let result = service::Service::get_all_service_dependencies(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); assert_eq!(result.unwrap().len(), 1); } @@ -167,6 +181,7 @@ fn test_get_all_service_endpoints(conn: &mut Connection) { let service_id = 1; let result = service::Service::get_all_service_endpoints(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); assert_eq!(result.unwrap().len(), 2); } @@ -175,6 +190,7 @@ fn test_service_read(conn: &mut Connection) { let service_id = 1; let result = service::Service::read(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); let service = result.unwrap(); @@ -191,6 +207,7 @@ fn test_service_read(conn: &mut Connection) { fn test_service_read_all(conn: &mut Connection) { let result = service::Service::read_all(conn); + //dbg!(&result); assert!(result.is_ok()); let services = result.unwrap(); @@ -201,9 +218,11 @@ fn test_set_service_online(conn: &mut Connection) { let service_id = 1; let result = service::Service::set_service_online(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); let result = service::Service::check_if_service_id_online(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); assert!(result.unwrap()); } @@ -212,9 +231,11 @@ fn test_set_service_offline(conn: &mut Connection) { let service_id = 1; let result = service::Service::set_service_offline(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); let result = service::Service::check_if_service_id_online(conn, service_id); + //dbg!(&result); assert!(result.is_ok()); assert!(!result.unwrap()); } @@ -222,6 +243,7 @@ fn test_set_service_offline(conn: &mut Connection) { fn test_service_update(conn: &mut Connection) { // check if service_id exists so we can update the service let result = service::Service::check_if_service_id_exists(conn, 1); + //dbg!(&result); assert!(result.is_ok()); assert!(result.unwrap()); @@ -237,6 +259,7 @@ fn test_service_update(conn: &mut Connection) { ); let result = service::Service::update(conn, 1, &update); + //dbg!(&result); assert!(result.is_ok()); let service = result.unwrap(); @@ -254,15 +277,19 @@ fn test_service_update(conn: &mut Connection) { fn test_service_delete(conn: &mut Connection) { let result = service::Service::read(conn, 1); + //dbg!(&result); assert!(result.is_ok()); let result = service::Service::delete(conn, 1); + //dbg!(&result); assert!(result.is_ok()); let result = service::Service::read(conn, 1); + //dbg!(&result); assert!(result.is_err()); let result = service::Service::count(conn); + //dbg!(&result); assert!(result.is_ok()); assert_eq!(result.unwrap(), 0); } From b4ec7966f1c25052a10c5879914a6912ba4d2de5 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Fri, 16 Aug 2024 19:37:39 +0800 Subject: [PATCH 29/73] Update examples/postgres/custom_arrays/README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 4d85beb3cb99..85d3355039b8 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -423,7 +423,7 @@ impl FromSql for ProtocolType { } ``` -In toSql, it is important to end the implementation with Ok(IsNull::No). In the from_sql, it is important to add a catch +In ToSql, it is important to end the implementation with Ok(IsNull::No). In the from_sql, it is important to add a catch all case that returns an UnknownProtocol instance. In practice, the catch all rarely ever gets triggered, but in those few corner cases when it does, it keeps the application running. From 574822f2ee9576144fffc057630b4932a92b3ed9 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:35:37 +0800 Subject: [PATCH 30/73] Update examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql [no ci] Syle fix in down.sql Co-authored-by: Georg Semmler --- .../migrations/2024-08-15-070500_services/down.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql index 11dba6558335..8a00c31272cc 100644 --- a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql @@ -2,4 +2,4 @@ DROP TABLE IF EXISTS smdb.service; DROP TYPE IF EXISTS service_endpoint CASCADE; DROP TYPE IF EXISTS protocol_type CASCADE; -DROP schema IF EXISTS smdb; \ No newline at end of file +DROP SCHEMA IF EXISTS smdb; \ No newline at end of file From 61e0f0984a9ca20e106f351d2de60e5aaad077ab Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:37:25 +0800 Subject: [PATCH 31/73] Update examples/postgres/custom_arrays/src/model/service/service_impl.rs [skip ci] Co-authored-by: Georg Semmler --- .../custom_arrays/src/model/service/service_impl.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index e2137702581e..150258639727 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -53,14 +53,10 @@ impl Service { db: &mut Connection, param_service_id: i32, ) -> QueryResult { - match service + service .filter(service_id.eq(param_service_id)) .select(online) .first::(db) - { - Ok(res) => Ok(res), - Err(e) => Err(e), - } } pub fn get_all_online_services(db: &mut Connection) -> QueryResult> { From b11dd14f60b14cc2c89bc3e5785be6ac031bc23d Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:38:42 +0800 Subject: [PATCH 32/73] Update examples/postgres/custom_arrays/src/model/service/service_impl.rs [skip ci] Updated check if service exists to return false on error, as suggested Co-authored-by: Georg Semmler --- .../postgres/custom_arrays/src/model/service/service_impl.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index 150258639727..b4408cac15f0 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -25,10 +25,7 @@ impl Service { db: &mut Connection, param_service_id: i32, ) -> QueryResult { - match service.find(param_service_id).first::(db) { - Ok(_) => Ok(true), - Err(_) => Ok(false), - } + service.find(param_service_id).first::(db).optional().map(Option::is_some) } pub fn check_all_services_online( From 5c56d640a69631bcf14de0e2bbf55cfb01fa52eb Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:39:15 +0800 Subject: [PATCH 33/73] Update .gitignore --- .gitignore | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.gitignore b/.gitignore index 11408af749e3..ec44e67ff3ec 100644 --- a/.gitignore +++ b/.gitignore @@ -3,9 +3,3 @@ Cargo.lock !diesel_cli/Cargo.lock .env *.snap.new - -./.idea/* -/.idea/.gitignore -/.idea/diesel.iml -/.idea/modules.xml -/.idea/vcs.xml From b6e313e49d80cf42897b7ab07e6946e0900c43c9 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:43:44 +0800 Subject: [PATCH 34/73] Apply suggestions from code review [no ci] Several minor updates in the Readme Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 85d3355039b8..4410ec2092cc 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -122,7 +122,7 @@ DROP schema IF EXISTS smdb; The company only uses three types of API endpoints, gRPC, http, or UDP. However, because data entry or transmission errors happen, an UnknownProtocol has been added to catch everything else that may go wrong to ensure no serialization / -deserialization bugs crash the system. Instead, every once a while a Chron job runs over the database and searches for +deserialization bugs crash the system. Instead, every once a while a Cron job runs over the database and searches for those UnknownProtocol entries, reports it to the admin who may fixes the incorrect entries one day. Therefore the Postgres ENUM in your up.sql looks like this: @@ -393,7 +393,7 @@ message, make sure to check: If all those checks pass and you still see errors, it’s most likely a serialization error. -To serialize and deserialize a custom Enum, you write a custom toSql and fromSql implementation. Luckily, this is +To serialize and deserialize a custom Enum, you write a custom ToSql and FromSql implementation. Luckily, this is straightforward. ```rust @@ -468,7 +468,7 @@ impl ToSql for Endpoint { ``` I cannot stress enough that it is paramount that the tuple type signature must match exactly the Postgres type signature defined in your up.sql. Ideally, you want to use the split view function in your IDE to have the up.sql -in one pane and the the toSql implementation in another pane, both side by side, to double check +in one pane and the the ToSql implementation in another pane, both side by side, to double check that the number and types match. If the type or number of types mismatch, you will get a compiler error telling you that somehow either the number of fields don’t match or that the type of the fields don’t match. From fc6d49d07fe58691ccc33f47678740fbbe56b3cd Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:44:26 +0800 Subject: [PATCH 35/73] Update examples/postgres/custom_arrays/src/model/service/service_impl.rs [no ci] updated service impl Co-authored-by: Georg Semmler --- .../custom_arrays/src/model/service/service_impl.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index b4408cac15f0..4d9545138927 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -33,14 +33,9 @@ impl Service { services: &[i32], ) -> QueryResult<(bool, Option)> { for id in services { - match Service::check_if_service_id_online(db, *id) { - Ok(res) => { - if !res { - return Ok((false, Some(format!("Service {} is offline", id)))); - } - } - Err(e) => return Err(e), - }; + if !Service::check_if_service_id_online(db, *id)? { + return Ok((false, Some(format!("Service {} is offline", id)))); + } } Ok((true, None)) From b9b30d940443c6070ab265546935a3d445b86ac4 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 11:45:42 +0800 Subject: [PATCH 36/73] [no ci] Update examples/postgres/custom_arrays/src/model/service/service_impl.rs Updated update service method as to remove unneeded data loading. Co-authored-by: Georg Semmler --- .../custom_arrays/src/model/service/service_impl.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index 4d9545138927..d76e3b1cbd81 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -108,14 +108,10 @@ impl Service { param_service_id: i32, param_online: bool, ) -> QueryResult<()> { - match diesel::update(service.filter(service_id.eq(param_service_id))) + diesel::update(service.filter(service_id.eq(param_service_id))) .set(online.eq(param_online)) - .returning(Service::as_returning()) - .get_result(db) - { - Ok(_) => Ok(()), - Err(e) => Err(e), - } + .execute(db)?; + Ok(()) } pub fn update( From 27e918c0d0b5c942878aa66a3582aeb518c19543 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 12:07:41 +0800 Subject: [PATCH 37/73] changed dotenv to dotenvy Signed-off-by: Marvin Hansen --- .../postgres/custom_arrays/src/model/service/service_impl.rs | 4 ++-- examples/postgres/custom_arrays/tests/service_tests.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index d76e3b1cbd81..2d7772ee29c6 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -3,7 +3,7 @@ use crate::model::service::{CreateService, Service, UpdateService}; use crate::model::Connection; use crate::schema::smdb::service::dsl::*; use diesel::{ - insert_into, ExpressionMethods, QueryDsl, QueryResult, RunQueryDsl, SelectableHelper, + insert_into, ExpressionMethods, OptionalExtension, QueryDsl, QueryResult, RunQueryDsl, SelectableHelper, }; impl Service { @@ -25,7 +25,7 @@ impl Service { db: &mut Connection, param_service_id: i32, ) -> QueryResult { - service.find(param_service_id).first::(db).optional().map(Option::is_some) + service.find(param_service_id).first::(db).optional().map(|arg0: Option| Option::is_some(&arg0)) } pub fn check_all_services_online( diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index e840716d9656..6962e027e532 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -5,7 +5,7 @@ use custom_arrays::model::service::{CreateService, UpdateService}; use custom_arrays::Connection; use diesel::r2d2::{ConnectionManager, Pool}; use diesel::PgConnection; -use dotenv::dotenv; +use dotenvy::dotenv; use std::env; fn postgres_connection_pool() -> Pool> { From 70d66adeb84858e89b13e03078dd84463d63045e Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 12:10:13 +0800 Subject: [PATCH 38/73] Moved Endpoint impl into type definition file. (See [comment](https://github.com/diesel-rs/diesel/pull/4169#discussion_r1719596303)) Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/Cargo.toml | 4 +- .../model/endpoint_type/endpoint_type_impl.rs | 37 ----------------- .../src/model/endpoint_type/mod.rs | 40 +++++++++++++++++-- 3 files changed, 39 insertions(+), 42 deletions(-) delete mode 100644 examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs diff --git a/examples/postgres/custom_arrays/Cargo.toml b/examples/postgres/custom_arrays/Cargo.toml index 156d7749b536..c6c1782861ca 100644 --- a/examples/postgres/custom_arrays/Cargo.toml +++ b/examples/postgres/custom_arrays/Cargo.toml @@ -8,5 +8,5 @@ publish = false diesel = { path = "../../../diesel", version = "2.2.0", default-features = false, features = ["postgres", "r2d2"] } diesel_migrations = { path = "../../../diesel_migrations", version = "2.2.0", default-features = false, features = ["postgres"] } - -dotenv = "0.15.0" +[dev-dependencies] +dotenvy = "0.15" diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs deleted file mode 100644 index 066d3227f78f..000000000000 --- a/examples/postgres/custom_arrays/src/model/endpoint_type/endpoint_type_impl.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::model::endpoint_type::Endpoint; -use crate::model::protocol_type::PgProtocolType; -use diesel::deserialize::FromSql; -use diesel::pg::{Pg, PgValue}; -use diesel::serialize::{Output, ToSql}; -use diesel::sql_types::{Integer, Record, Text}; -use diesel::{deserialize, serialize}; - -impl ToSql for Endpoint { - fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { - serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( - &( - self.name.to_owned(), - self.version.to_owned(), - self.base_uri.to_owned(), - self.port.to_owned(), - self.protocol.to_owned(), - ), - &mut out.reborrow(), - ) - } -} - -impl FromSql for Endpoint { - fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { - let (name, version, base_uri, port, protocol) = - FromSql::, Pg>::from_sql(bytes)?; - - Ok(Endpoint { - name, - version, - base_uri, - port, - protocol, - }) - } -} diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs index 09c1b41cbab9..de0d1c4cd8e8 100644 --- a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs +++ b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs @@ -1,8 +1,11 @@ -use crate::model::protocol_type::ProtocolType; -use diesel::deserialize::FromSqlRow; +use crate::model::protocol_type::{PgProtocolType, ProtocolType}; +use diesel::deserialize::{FromSql, FromSqlRow}; use diesel::expression::AsExpression; +use diesel::pg::{Pg, PgValue}; +use diesel::{deserialize, serialize}; +use diesel::serialize::{Output, ToSql}; +use diesel::sql_types::{Integer, Record, Text}; -mod endpoint_type_impl; #[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] #[diesel(sql_type=crate::schema::smdb::sql_types::ServiceEndpoint)] @@ -31,3 +34,34 @@ impl Endpoint { } } } + + +impl ToSql for Endpoint { + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( + &( + self.name.to_owned(), + self.version.to_owned(), + self.base_uri.to_owned(), + self.port.to_owned(), + self.protocol.to_owned(), + ), + &mut out.reborrow(), + ) + } +} + +impl FromSql for Endpoint { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + let (name, version, base_uri, port, protocol) = + FromSql::, Pg>::from_sql(bytes)?; + + Ok(Endpoint { + name, + version, + base_uri, + port, + protocol, + }) + } +} From 58b40bece40a78b3d7dcc1da91c960764eedc6b5 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 12:25:22 +0800 Subject: [PATCH 39/73] [skip ci] Switched example code and tests from PGConnectionPool to single PGConnection. (See [comment](https://github.com/diesel-rs/diesel/pull/4169#discussion_r1719587567)) Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/src/lib.rs | 5 +++-- .../src/model/endpoint_type/mod.rs | 4 +--- .../postgres/custom_arrays/src/model/mod.rs | 3 --- .../custom_arrays/src/model/service/mod.rs | 2 +- .../src/model/service/service_impl.rs | 13 ++++++++---- .../custom_arrays/tests/service_tests.rs | 21 +++++++------------ 6 files changed, 21 insertions(+), 27 deletions(-) diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs index d352381d2147..a6246bb7cad8 100644 --- a/examples/postgres/custom_arrays/src/lib.rs +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -1,12 +1,13 @@ use diesel::r2d2::R2D2Connection; +use diesel::PgConnection; use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use std::error::Error; pub mod model; mod schema; -pub type Connection = - diesel::r2d2::PooledConnection>; +// pub type Connection = diesel::r2d2::PooledConnection>; +pub type Connection = PgConnection; pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations"); diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs index de0d1c4cd8e8..6374b9bc6b8e 100644 --- a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs +++ b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs @@ -2,10 +2,9 @@ use crate::model::protocol_type::{PgProtocolType, ProtocolType}; use diesel::deserialize::{FromSql, FromSqlRow}; use diesel::expression::AsExpression; use diesel::pg::{Pg, PgValue}; -use diesel::{deserialize, serialize}; use diesel::serialize::{Output, ToSql}; use diesel::sql_types::{Integer, Record, Text}; - +use diesel::{deserialize, serialize}; #[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] #[diesel(sql_type=crate::schema::smdb::sql_types::ServiceEndpoint)] @@ -35,7 +34,6 @@ impl Endpoint { } } - impl ToSql for Endpoint { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( diff --git a/examples/postgres/custom_arrays/src/model/mod.rs b/examples/postgres/custom_arrays/src/model/mod.rs index c9cf7fa59aca..6ee5a86732bb 100644 --- a/examples/postgres/custom_arrays/src/model/mod.rs +++ b/examples/postgres/custom_arrays/src/model/mod.rs @@ -2,6 +2,3 @@ pub mod endpoint_type; pub mod service; pub mod protocol_type; - -pub type Connection = - diesel::r2d2::PooledConnection>; diff --git a/examples/postgres/custom_arrays/src/model/service/mod.rs b/examples/postgres/custom_arrays/src/model/service/mod.rs index d58cca7b6040..02f402149801 100644 --- a/examples/postgres/custom_arrays/src/model/service/mod.rs +++ b/examples/postgres/custom_arrays/src/model/service/mod.rs @@ -59,7 +59,7 @@ impl CreateService { } #[derive(Debug, Clone, Queryable, Insertable, AsChangeset)] -#[diesel(table_name= crate::schema::smdb::service)] +#[diesel(table_name=crate::schema::smdb::service)] pub struct UpdateService { pub name: Option, pub version: Option, diff --git a/examples/postgres/custom_arrays/src/model/service/service_impl.rs b/examples/postgres/custom_arrays/src/model/service/service_impl.rs index 2d7772ee29c6..82e6cf0788e1 100644 --- a/examples/postgres/custom_arrays/src/model/service/service_impl.rs +++ b/examples/postgres/custom_arrays/src/model/service/service_impl.rs @@ -1,9 +1,10 @@ use crate::model::endpoint_type::Endpoint; use crate::model::service::{CreateService, Service, UpdateService}; -use crate::model::Connection; use crate::schema::smdb::service::dsl::*; +use crate::Connection; use diesel::{ - insert_into, ExpressionMethods, OptionalExtension, QueryDsl, QueryResult, RunQueryDsl, SelectableHelper, + insert_into, ExpressionMethods, OptionalExtension, QueryDsl, QueryResult, RunQueryDsl, + SelectableHelper, }; impl Service { @@ -25,7 +26,11 @@ impl Service { db: &mut Connection, param_service_id: i32, ) -> QueryResult { - service.find(param_service_id).first::(db).optional().map(|arg0: Option| Option::is_some(&arg0)) + service + .find(param_service_id) + .first::(db) + .optional() + .map(|arg0: Option| Option::is_some(&arg0)) } pub fn check_all_services_online( @@ -34,7 +39,7 @@ impl Service { ) -> QueryResult<(bool, Option)> { for id in services { if !Service::check_if_service_id_online(db, *id)? { - return Ok((false, Some(format!("Service {} is offline", id)))); + return Ok((false, Some(format!("Service {} is offline", id)))); } } diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index 6962e027e532..a264ee515bcf 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -3,23 +3,16 @@ use custom_arrays::model::protocol_type::ProtocolType; use custom_arrays::model::service; use custom_arrays::model::service::{CreateService, UpdateService}; use custom_arrays::Connection; -use diesel::r2d2::{ConnectionManager, Pool}; -use diesel::PgConnection; +use diesel::{Connection as DieselConnection, PgConnection}; use dotenvy::dotenv; use std::env; -fn postgres_connection_pool() -> Pool> { +fn postgres_connection() -> PgConnection { dotenv().ok(); - let database_url = env::var("DATABASE_URL") - .or_else(|_| env::var("POSTGRES_DATABASE_URL")) - .expect("DATABASE_URL must be set"); - - let manager = ConnectionManager::::new(database_url); - Pool::builder() - .test_on_check_out(true) - .build(manager) - .expect("Could not build connection pool") + let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set"); + PgConnection::establish(&database_url) + .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) } fn test_db_migration(conn: &mut Connection) { @@ -30,8 +23,8 @@ fn test_db_migration(conn: &mut Connection) { #[test] fn test_service() { - let pool = postgres_connection_pool(); - let conn = &mut pool.get().unwrap(); + let mut connection = postgres_connection(); + let conn = &mut connection; println!("Test DB migration"); test_db_migration(conn); From fc8041475e8372a02a75d25429d9f5005c6809d5 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 12:49:57 +0800 Subject: [PATCH 40/73] [skip ci] Moved all custom types into custom schema smdb. See [comment](https://github.com/diesel-rs/diesel/pull/4169#discussion_r1719613646) Added a DatabaseError to PgProtocolType in from_sql implementation to convey that the DB contains something that isn't expected. See [comment](https://github.com/diesel-rs/diesel/pull/4169#discussion_r1719595463). Signed-off-by: Marvin Hansen --- .../migrations/2024-08-15-070500_services/down.sql | 4 ++-- .../migrations/2024-08-15-070500_services/up.sql | 8 ++++---- .../custom_arrays/src/model/protocol_type/mod.rs | 13 +++++++++++-- examples/postgres/custom_arrays/src/schema.rs | 2 +- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql index 8a00c31272cc..50cbfd11a8cf 100644 --- a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/down.sql @@ -1,5 +1,5 @@ -- This file should undo anything in `up.sql` DROP TABLE IF EXISTS smdb.service; -DROP TYPE IF EXISTS service_endpoint CASCADE; -DROP TYPE IF EXISTS protocol_type CASCADE; +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; DROP SCHEMA IF EXISTS smdb; \ No newline at end of file diff --git a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql index 9b473e75fdcd..9a0c021b7c1a 100644 --- a/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql +++ b/examples/postgres/custom_arrays/migrations/2024-08-15-070500_services/up.sql @@ -1,19 +1,19 @@ -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE protocol_type AS ENUM ( +CREATE TYPE smdb.protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', 'UDP' ); -CREATE TYPE service_endpoint AS ( +CREATE TYPE smdb.service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" protocol_type + "protocol" smdb.protocol_type ); CREATE TABLE smdb.service( @@ -25,5 +25,5 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" service_endpoint[] NOT NULL + "endpoints" smdb.service_endpoint[] NOT NULL ); \ No newline at end of file diff --git a/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs b/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs index 5af43e599b65..ff464578f2c4 100644 --- a/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs +++ b/examples/postgres/custom_arrays/src/model/protocol_type/mod.rs @@ -1,6 +1,8 @@ use diesel::deserialize::{FromSql, FromSqlRow}; use diesel::expression::AsExpression; use diesel::pg::{Pg, PgValue}; +use diesel::result::DatabaseErrorKind; +use diesel::result::Error::DatabaseError; use diesel::serialize::{IsNull, Output, ToSql}; use diesel::sql_types::SqlType; use diesel::{deserialize, serialize}; @@ -9,7 +11,7 @@ use std::io::Write; // Diesel type mapping requires a struct to derive a SqlType for custom ToSql and FromSql implementations #[derive(SqlType)] #[diesel(sql_type = protocol_type)] -#[diesel(postgres_type(name = "protocol_type"))] +#[diesel(postgres_type(name = "protocol_type", schema = "smdb"))] pub struct PgProtocolType; #[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] @@ -41,7 +43,14 @@ impl FromSql for ProtocolType { b"GRPC" => Ok(ProtocolType::GRPC), b"HTTP" => Ok(ProtocolType::HTTP), b"UDP" => Ok(ProtocolType::UDP), - _ => Ok(ProtocolType::UnknownProtocol), + _ => Err(DatabaseError( + DatabaseErrorKind::SerializationFailure, + Box::new(format!( + "Unrecognized enum variant: {:?}", + String::from_utf8_lossy(bytes.as_bytes()) + )), + ) + .into()), } } } diff --git a/examples/postgres/custom_arrays/src/schema.rs b/examples/postgres/custom_arrays/src/schema.rs index 8b6e84a6af49..91378b5ced2e 100644 --- a/examples/postgres/custom_arrays/src/schema.rs +++ b/examples/postgres/custom_arrays/src/schema.rs @@ -3,7 +3,7 @@ pub mod smdb { pub mod sql_types { #[derive(diesel::query_builder::QueryId, Clone, diesel::sql_types::SqlType)] - #[diesel(postgres_type(name = "service_endpoint"))] + #[diesel(postgres_type(name = "service_endpoint", schema = "smdb"))] pub struct ServiceEndpoint; } From ba72c1ff3f793c6c470748f5bca67dee7bf7ed94 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sun, 18 Aug 2024 16:34:31 +0800 Subject: [PATCH 41/73] Updated Readme and tests to match previous code changes. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 407 ++++++++++-------- examples/postgres/custom_arrays/src/lib.rs | 3 + .../custom_arrays/tests/service_tests.rs | 2 +- 3 files changed, 239 insertions(+), 173 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 4410ec2092cc..33121873ec18 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -1,6 +1,7 @@ # Custom Array and Data Types in Postgres Table of Content: + 1. [Concepts](#concepts) 2. [The project](#the-project) 3. [Getting started](#getting-started) @@ -12,7 +13,7 @@ Table of Content: 9. [Rust Types](#rust-types) 10. [Additional methods](#additional-methods) 11. [Applied Best Practices](#applied-best-practices) -12. [Testing](#testing) +12. [Testing](#testing) In this guide, you learn more about the concepts listed below and illustrate the actual usage in Diesel with a sample project. @@ -86,14 +87,12 @@ a good security practice. To create a new schema with Diesel, you follow three s 1) Declare the schema name in the diesel.toml file 2) Declare the schema in the migration up/down.sql files -3) Prefix all table names in the migration up/down.sql files with the schema - -Postgres uses internally a schema as something like a search path for a table and, by default, -searches in the public schema for a table. If that fails, Postgres returns an error that the table does not exists (in the default schema). -Because of the search path mechanism, you only have to declare the schema name and consistently prefix thetables in the schema with the schema name and that’s it. +3) Add the schema to the SQL type as annotations -It is important to know that a custom schema only applies to tables. Custom types and enum still -reside in the default public schema. +Postgres uses internally a schema as something like a search path for a table and, by default, searches in the public +schema for a table. If that fails, Postgres returns an error that the table does not exists (in the default schema). +Because of the search path mechanism, you only have to declare the schema name and consistently prefix the tables in the +schema with the schema name and that’s it. In your diesel.toml file, add the following entry: @@ -115,22 +114,23 @@ CREATE SCHEMA IF NOT EXISTS smdb; Also, add the corresponding drop operation in the down.sql file. ```sql -DROP schema IF EXISTS smdb; +DROP SCHEMA IF EXISTS smdb; ``` ## Postgres Enum The company only uses three types of API endpoints, gRPC, http, or UDP. However, because data entry or transmission -errors happen, an UnknownProtocol has been added to catch everything else that may go wrong to ensure no serialization / -deserialization bugs crash the system. Instead, every once a while a Cron job runs over the database and searches for -those UnknownProtocol entries, reports it to the admin who may fixes the incorrect entries one day. Therefore the +errors happen, an UnknownProtocol has been added to catch everything else that may go wrong to ensure that a potential +serialization or deserialization bug does not crash the system. +Instead, every once a while a Cron job runs over the database and searches for +those UnknownProtocol entries, reports it to the admin who may fixes the incorrect entries. Therefore the Postgres ENUM in your up.sql looks like this: ```sql -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE protocol_type AS ENUM ( +CREATE TYPE smdb.protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', @@ -138,44 +138,51 @@ CREATE TYPE protocol_type AS ENUM ( ); ``` +Notice the schema prefix before the Enum name. Add the corresponding drop type operation to your down.sql file: ```sql -DROP TYPE IF EXISTS protocol_type CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; DROP schema IF EXISTS smdb; ``` ## Postgres custom type The service endpoint in this case is modelled as a custom type. -There are few cases when you want to choose a custom type over a table. - -1) The data have no relation to data in other tables -2) You want to group a bunch of fields i.e. to store a configuration file with nested fields -3) You specifically don’t want to model relations - -In the case of a service endpoint, it really is as simple as every service has one or more endpoints, but these are -certainly not worth storing in a separate table since then you would have to deal with resolving relations during query -time. Rather, when using the custom type, you just access a field that is basically a tuple. With that out of the way, +There are few cases when you want to choose a custom type over a table: + +1) The data have no relation to data in other tables. +2) You want to group a set of fields i.e. to store a complex configuration file with nested fields. +3) You specifically don’t relations because of specific requirements. + +The service endpoint is an example of the first two cases. +For once, the endpoints have no relation to any other data therefore a separate table makes little sense. Secondly, the +entire service metadata database really is more of a configuration store and, in a way, the service table really is just +one complex configuration with nested fields. +Also, worth mentioning, if you were to store endpoints in a separate table, then you would have to deal with resolving +relations during query time and that is probably a bit too much for just loading a configuration. +Rather, when using the custom type, you just access a field that is basically a tuple. +With that out of the way, your endpoint type looks like this in your up.sql: ```sql -- Your SQL goes here -CREATE TYPE service_endpoint AS ( +CREATE TYPE smdb.service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" protocol_type + "protocol" smdb.protocol_type ); ``` +Again, all custom types are prefixed with the custom schema. And the matching drop operation in the down.sql file: ```sql -DROP TYPE IF EXISTS service_endpoint CASCADE; -DROP TYPE IF EXISTS protocol_type CASCADE; +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; DROP schema IF EXISTS smdb; ``` @@ -189,8 +196,8 @@ Let’s think this through: ### Internal primary key: SERIAL type When you only need a primary key as an index, you just use the SERIAL and Postgres gives your data an automatically -incrementing integer primary key. You usually can return the primary key value after inserting so if you need to know -the key, you do get it after insert. +incrementing integer primary key. +You usually can return the primary key value after inserting so if you need to know the key, you do get it after insert. ### External primary key: INTEGER type @@ -218,28 +225,25 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" service_endpoint[] NOT NULL + "endpoints" smdb.service_endpoint[] NOT NULL ); ``` -As mentioned earlier, the custom schema applies to tables therefore the service table is -prefixed with the schema name whereas the endpoint type is not as it resides in the public schema. - Add the matching drop statement to your down.sql file: ```sql -- This file should undo anything in `up.sql` DROP TABLE IF EXISTS smdb.service; -DROP TYPE IF EXISTS service_endpoint CASCADE; -DROP TYPE IF EXISTS protocol_type CASCADE; -DROP schema IF EXISTS smdb; +DROP TYPE IF EXISTS smdb.service_endpoint CASCADE; +DROP TYPE IF EXISTS smdb.protocol_type CASCADE; +DROP SCHEMA IF EXISTS smdb; ``` A few notes on the service table: -* Notice the schema prefix appears only in the table name. -* The custom type and custom Enum do not have any prefix because they are in the default schema. -* The array type follows the usual convention type[]. +* All types and tables are prefixed with the custom schema to give Postgres a + hint where to find these types and tables. +* The array type follows the usual convention schema.type[]. * The NOT NULL attribute means that the array itself must be set; Values inside the array still might be null. The Diesel team decided that array values are nullable by default because there so many things that may go wrong when @@ -252,19 +256,19 @@ In total, the up.sql looks as below: -- Your SQL goes here CREATE SCHEMA IF NOT EXISTS smdb; -CREATE TYPE protocol_type AS ENUM ( +CREATE TYPE smdb.protocol_type AS ENUM ( 'UnknownProtocol', 'GRPC', 'HTTP', 'UDP' ); -CREATE TYPE service_endpoint AS ( +CREATE TYPE smdb.service_endpoint AS ( "name" Text, "version" INTEGER, "base_uri" Text, "port" INTEGER, - "protocol" protocol_type + "protocol" smdb.protocol_type ); CREATE TABLE smdb.service( @@ -276,7 +280,7 @@ CREATE TABLE smdb.service( "health_check_uri" Text NOT NULL, "base_uri" Text NOT NULL, "dependencies" INTEGER[] NOT NULL, - "endpoints" service_endpoint[] NOT NULL + "endpoints" smdb.service_endpoint[] NOT NULL ); ``` @@ -286,7 +290,11 @@ Now, it’s time to run the Diesel migration: diesel migration run ``` -This generates a schema.rs file in the src root: +You may use a database console to double check if all types and tables have been created inside the custom schema. If a +type somehow appears in the public default schema, double check if the type definition in up.sql has been prefixed with +the schema name. + +The Diesel migration generates the following schema.rs file in the src root: ```rust // @generated automatically by Diesel CLI. @@ -329,8 +337,11 @@ showing related warnings Next, you want to create a folder model with a mod file in it attached to your lib file. This is your place to store all Rust type implementations matching all the generated Postgres types and tables. -Lastly, you also want to add a type alias for a pooled connection in the lib file because that connection definition is -a long and convoluted type in need of a shorthand. +Lastly, you also want to add a type alias for a postgres database connection in the lib file because that gives you the +freedom to swap between a normal single connection or a connection pool without updating your type implementation or +tests. +The connection pool type alias has ben uncommented, but the type signature of the pooled connection makes it obvious why +a type alias is a good idea. At this point, your lib file looks as shown below: @@ -338,7 +349,12 @@ At this point, your lib file looks as shown below: mod schema; pub mod model; -pub type Connection = diesel::r2d2::PooledConnection>; +// Alias for a pooled connection. +// pub type Connection = diesel::r2d2::PooledConnection>; + +// Alias for a normal, single, connection. +pub type Connection = PgConnection; + ``` ## Rust Types @@ -356,11 +372,13 @@ Diesel needs a wrapper struct to store a custom Enum in Postgres, so let’s add ```rust #[derive(SqlType)] #[diesel(sql_type = protocol_type)] -#[diesel(postgres_type(name = "protocol_type"))] +#[diesel(postgres_type(name = "protocol_type", schema = "smdb"))] pub struct PgProtocolType; ``` It is important that you add both, sql_type and postgres_type, otherwise insert will fail. +Furthermore, it is important to add the database schema ("smdb") otherwise Postgres fails to find the type and aborts an +operation on that type. Next, let’s define the actual Enum. Bear in mind to use the wrapper struct as sql_type on the Enum: @@ -392,12 +410,16 @@ message, make sure to check: 5) Does my Rust type refers to the wrapper struct type? If all those checks pass and you still see errors, it’s most likely a serialization error. +To serialize and deserialize a custom Enum, you write a custom ToSql and FromSql implementation. -To serialize and deserialize a custom Enum, you write a custom ToSql and FromSql implementation. Luckily, this is -straightforward. +`ToSql` describes how to serialize a given rust type (Self) as sql side type (first generic argument) for a specific +database backend (second generic argument). +It needs to translate the type into the relevant wire protocol in that case. +For postgres/mysql enums that's just the enum value as ascii string, +but in general it's depended on the database + type. Also, it is important +to end the implementation with Ok(IsNull::No). ```rust - impl ToSql for ProtocolType { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { match *self { @@ -409,7 +431,15 @@ impl ToSql for ProtocolType { Ok(IsNull::No) } } +``` + +`FromSql` describes how to deserialize a given rust type (Self) as sql side type (first generic argument) for a specific +database backend (second generic argument). +It need to translate from the relevant wire protocol to the rust type. +For postgres/mysql enums that just means matching on the as ascii string, +but in general it's depended on the database + type. +```rust impl FromSql for ProtocolType { fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { match bytes.as_bytes() { @@ -417,15 +447,21 @@ impl FromSql for ProtocolType { b"GRPC" => Ok(ProtocolType::GRPC), b"HTTP" => Ok(ProtocolType::HTTP), b"UDP" => Ok(ProtocolType::UDP), - _ => Ok(ProtocolType::UnknownProtocol), + _ => Err(DatabaseError( + DatabaseErrorKind::SerializationFailure, + Box::new(format!( + "Unrecognized enum variant: {:?}", + String::from_utf8_lossy(bytes.as_bytes()) + )), + ) + .into()), } } } ``` -In ToSql, it is important to end the implementation with Ok(IsNull::No). In the from_sql, it is important to add a catch -all case that returns an UnknownProtocol instance. In practice, the catch all rarely ever gets triggered, but in those -few corner cases when it does, it keeps the application running. +In the from_sql, it is important to add error handling for unknown Enum variants +to catch errors that may result from incorrect database updates. ### Rust Struct Endpoint @@ -441,14 +477,16 @@ pub struct Endpoint { } ``` -It is worth mentioning is that the sql_type refers to the wrapper struct generated by Diesel and stored in the schema.rs -file. Keep this in mind if you ever refactor the schema.rs file into a different folder because the macro annotation -checks the path during compilation and throws an error if it cannot find the type in the provided path. The serialize -and deserialize implementation of a custom type is not as obvious as the Enum because, internally, Postgres represent a -custom type as an anonymous typed tuple. Therefore, you have to map a struct to a typed tuple and back. +It is worth mentioning is that the sql_type refers to the sql side type generated by Diesel and stored in the schema.rs +file. +Keep this in mind if you ever refactor the schema.rs file into a different folder because the macro annotation checks +the path during compilation and throws an error if it cannot find the type in the provided path. +The serialize and deserialize implementation of a custom type is not as obvious as the Enum because, internally, +Postgres represent a custom type as an anonymous typed tuple. Therefore, you have to map a struct to a typed tuple and +back. -Let’s start with the toSql implementation to store the Rust Endpoint struct as typed tuple. Luckily, Diesel provides a -helper util to do just that. +Let’s start with the toSql implementation to store the Rust Endpoint struct as typed tuple. +Luckily, Diesel provides a helper util to do just that. ```rust impl ToSql for Endpoint { @@ -467,14 +505,16 @@ impl ToSql for Endpoint { } ``` -I cannot stress enough that it is paramount that the tuple type signature must match exactly the Postgres type signature defined in your up.sql. Ideally, you want to use the split view function in your IDE to have the up.sql -in one pane and the the ToSql implementation in another pane, both side by side, to double check -that the number and types match. -If the type or number of types mismatch, you will get a compiler error telling you that somehow either -the number of fields don’t match or that the type of the fields don’t match. +I cannot stress enough that it is paramount that the tuple type signature must match exactly the Postgres type signature +defined in your up.sql. +Ideally, you want to use the split view function in your IDE to have the up.sql in one pane and the the ToSql +implementation in another pane, both side by side, to double check +that the number and types match. +If the type or number of types mismatch, you will get a compiler error telling you that somehow either +the number of fields don’t match or that the type of the fields don’t match. Also, because the write_tuple expects values, you have to call either to_owned() or clone() on any referenced data. -The from_sql reverses the process by converting a Postgres typed tuple back into a Rust struct. +The from_sql reverses the process by converting a Postgres typed tuple back into a Rust struct. Again, Diesel provides a convenient helper to do so: ```rust @@ -494,11 +534,12 @@ impl FromSql for Endpoint { } ``` -Similar to the serialization process, it is paramount that the type annotation match exactly the one used in toSql and -the type definition in up.sql. +Similar to the serialization process, it is paramount that the type annotation +match exactly the one used in toSql and the type definition in up.sql. -Debugging serialization issues in Diesel is relatively straight forward since the compiler usually points out the -location of the issue and, often, the issue is a type mismatch that is relatively easy to fix. +Debugging serialization issues in Diesel is relatively straight forward +since the compiler usually points out the location of the issue and, often, the issue is a type mismatch that is +relatively easy to fix. It’s worth repeating that you are dealing with three types in total: @@ -514,7 +555,7 @@ Make sure all of those types match correctly. The service struct gets its serialization and deserialization implementation generated by Diesel so that saves some typing. On the other hand, it is a good practice to implement database operations on the actual type itself. The wisdom here is twofold. For once, you cannot separate the database operation from the type, -therefore it makes sense to implement the operations on the type itself. +therefore it makes sense to implement the operations on the type itself. This also hides implementation details in encapsulation, as [discussed in the book](https://doc.rust-lang.org/book/ch17-01-what-is-oo.html#encapsulation-that-hides-implementation-details). @@ -530,9 +571,17 @@ in total, we crate 3 service types: 2) CreateService: For create operations 3) UpdateService: For update operations -The Service and CreateService have identical fields. Also, both types require a primary key annotation in addition to -the table name. The UpdateService, however, has each field wrapped into an option type. When the option type is Some, -Diesel knows that this field needs updating. If the field is set to None, Diesel ignores it. +The Service and CreateService have identical fields. +Also, both types require a primary key annotation in addition to +the table name. For more details about inserts, refer to the +official [all about inserts guide](https://diesel.rs/guides/all-about-inserts.html). + +The UpdateService, however, has each field wrapped into an option type. When the option type is Some, Diesel knows that +this field needs updating. If the field is set to None, Diesel ignores it. +For more details about updates, refer to the +official [all about updates guide](https://diesel.rs/guides/all-about-updates.html) + +The relevant type declarations are: ```rust #[derive(Debug, Clone, Queryable, Selectable)] @@ -572,7 +621,7 @@ pub struct UpdateService { ``` Next, let’s implement the CRUD operations on the service type. -Remember the handy connection type alias defined earlier? +Remember the handy connection type alias defined earlier? This service implementation is the place to use it. ### **Create** @@ -591,7 +640,7 @@ impl Service { The insert into function needs the table as target and the value to insert. It is a common convention to return the inserted value so that the callsite can verify that the insert completed correctly. -### **Read** +### **Read** ```rust pub fn read(db: &mut Connection, param_service_id: i32) -> QueryResult { @@ -653,12 +702,13 @@ With the CRUD methods implemented, it’s time to look at the more interesting p ## Additional methods -In the requirement, it was stated that: +In the requirement, it was stated that: “... a service depends on other services, and it would be great to test before deploying a new service, if all of its dependencies are online.” -To do so we have to implement a method that sets a service online and another one to set it offline. By experience, a few +To do so we have to implement a method that sets a service online and another one to set it offline. By experience, a +few non-trivial errors are caused by incorrectly set Boolean flags and whenever you can, hide Boolean flags in your public API and use a dedicated method instead to the set them. @@ -853,10 +903,10 @@ and follows three best practices: ### Database connection as a parameter -The first one refers to the idea that no type should hold application state, and a database connection definitely counts as application state. Instead, you would write a database connection manager -that manages a Postgres connection pool, and then calls into the database methods implemented -in the service type giving it a pooled connection as parameter. - +The first one refers to the idea that no type should hold application state, and a database connection definitely counts +as application state. Instead, you would write a database connection manager +that manages a Postgres connection pool, and then calls into the database methods implemented +in the service type giving it a pooled connection as parameter. ### Prefer the generated DSL over custom queries @@ -896,86 +946,92 @@ we have to update the return type of the method as well. } ``` -Run the compile and see that it works. Now, if you were to remove the type annotation from get_result, you get a -compile error saying that trait FromSql is not implemented. +Run the compile and see that it works. Now, if you were to remove the type annotation from get_result, +you get a compile error saying that trait FromSql is not implemented. +The type annotation is required because there can be more than one FromSql impl for the same rust type and there can be +more than one FromSql type for the same sql type. That gives a lot flexibility how you actually map data between your +database and your rust code, but it requires explicit type annotation. If you write a database layer for an existing system, this technique comes in handy as you can seamlessly convert -between Diesel DB types and your target types while leaving your target types as it. And because you never know when you have to do this, it’s generally recommended to add type annotations to DB return types. +between Diesel DB types and your target types while leaving your target types as it. And because you never know when you +have to do this, it’s generally recommended to add type annotations to DB return types. ## Testing -Diesel enables you to test your database schema and migration early on in the development process. +Diesel enables you to test your database schema and migration early on in the development process. To do so, you need only meed: * A util that creates a DB connection -* A util that runs the DB migration +* A util that runs the DB migration * And your DB integration tests -### DB Connection Types +### DB Connection Types -Broadly speaking, there are two ways to handle database connection. One way is to create one connection per application -and use it until the application shuts down. Another way is to create a pool of connection and, -whenever a database connection is needed for an DB operation, a connection is taken from the pool -and after the DB operation has been completed, the connection is returned to the pool. -The first approach is quite common in small application whereas the second one is commonly used in server application -that expected consistent database usage. +Broadly speaking, there are two ways to handle database connection. One way is to create one connection per application +and use it until the application shuts down. Another way is to create a pool of connection and, +whenever a database connection is needed for an DB operation, a connection is taken from the pool +and after the DB operation has been completed, the connection is returned to the pool. +The first approach is quite common in small application whereas the second one is commonly used in server application +that expected consistent database usage. -**Important** +**Important** -Using database connection pool in Diesel requires you to enable the `r2d2` feature in your cargo.toml file. +Using database connection pool in Diesel requires you to enable the `r2d2` feature in your cargo.toml file. -In Diesel, you can handle connections either way, but the only noticeable difference is the actual connection type -used as type parameter. For that reason, a type alias for the DB connection was declared early on because -that would allow you to switch between a single connection and a pooled connection without refactoring. -However, because connection pooling in Diesel is so well supported, I suggest to use it by default. +In Diesel, you can handle connections either way, but the only noticeable difference is the actual connection type used +as type parameter. For that reason, a type alias for the DB connection was declared in the lib.rs file because +that would allow you to switch between a single connection and a pooled connection without refactoring. -### DB Connection Test Util +### DB Connection Test Util -Let's start with a small util that returns a DB connection. First you need to get a database URI from somewhere, -then construct a connection pool, and lastly return the connection pool. +Let's start with a small util that returns a simple DB connection. First you need to get a database URI from somewhere, +then construct a connection, and lastly return the connection. Remember, this is just a test util so there is no need to add anything more than necessary. ```rust -fn postgres_connection_pool() -> Pool> { - dotenv().ok(); +use dotenvy::dotenv; - let database_url = env::var("DATABASE_URL") - .or_else(|_| env::var("POSTGRES_DATABASE_URL")) - .expect("DATABASE_URL must be set"); +fn postgres_connection() -> PgConnection { + dotenv().ok(); - let manager = ConnectionManager::::new(database_url); - Pool::builder() - .test_on_check_out(true) - .build(manager) - .expect("Could not build connection pool") + let database_url = env::var("POSTGRES_DATABASE_URL") + .expect("POSTGRES_DATABASE_URL must be set"); + + PgConnection::establish(&database_url) + .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) } ``` -Here we use the dotenv crate to test if there is an environment variable of either DATABASE_URL or POSTGRES_DATABASE_URL -and if so, parse the string and use it to build a connection pool. Therefore, make sure you have one of -those variables set in your environment. Notice, the test_on_check_out flag is set to true, -which means the database will be pinged to check if the database is actually reachable and the connection is working correctly. +Here we use the dotenv crate to test if there is an environment variable of either DATABASE_URL or POSTGRES_DATABASE_URL +and if so, parse the string and use it to establish a connection. Therefore, make sure the POSTGRES_DATABASE_URL is set +correctly. + +### DB Migration Util -### DB Migration Util +Diesel can run a database migration in one of two ways. +First, you can use the Diesel CLI in your terminal +to generate, run, or revert migrations manually. This is ideal for development when you frequently change the database +schema. -Diesel can run a database migration in one of two ways. First, you can use the Diesel CLI in your terminal -to generate, run, or revert migrations manually. This is ideal for development when you frequently -change the database schema. The second way is programmatically via the embedded migration macro, -which is ideal to build a single binary with all migrations compiled into it so that -you don't have to install and run the Diesel CLI on the target machine. -This simplifies deployment of your application and streamlines continuous integration testing. +The second way is programmatically via the embedded migration macro, +which is ideal to build a single binary with all migrations compiled into it so that +you don't have to install and run the Diesel CLI on the target machine. +This simplifies deployment of your application and streamlines continuous integration testing. **Important** -You must add the crate `diesel_migrations` to your cargo.toml and set the target database as feature flag -to enable the embedded migration macro. +You must add the crate `diesel_migrations` to your cargo.toml and set the target database as feature flag to enable the +embedded migration macro. -To serve both purposes, deployment and CI testing, let's add a new function `run_db_migration` to the lib.rs file -of the crate that takes a connection as parameter, checks if the DB connection is valid, -checks if there are any pending migrations, and if so, runs all pending migrations. -The implementation is straight forward, as you can see below: +To serve both purposes, deployment and CI testing, let's add a new function `run_db_migration` to the lib.rs file of the +crate that takes a connection as parameter, checks if the DB connection is valid, checks if there are any pending +migrations, and if so, runs all pending migrations. The implementation is straight forward, as you can see below: ```rust +pub type Connection = PgConnection; + +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations"); + pub fn run_db_migration( conn: &mut Connection, ) -> Result<(), Box> { @@ -1016,54 +1072,56 @@ pub fn run_db_migration( } ``` -The rational to check for all potential errors is twofold. For once, because the database connection is given as a parameter, -you just don't know if the creating pool has checked the connection, therefore you better check it to catch a dead connection before using it. -Second, even if you have a correct database connection, this does not guarantee that the migration will succeed. -There might be some types left from a previously aborted drop operations or -a random error might happened at any stage of the migration, therefore you have to handle the error where it occurs. -Also, because you run the db migration during application startup or before testing, -ensure you have clear error messages to speed up diagnostic and debugging. - +The rational to check for all potential errors is twofold. For once, because the database connection is given as a +parameter, +you just don't know if the component that created the connection has checked the connection, therefore you better check +it to catch a dead connection. +Second, even if you have a correct database connection, this does not guarantee that the migration will succeed. +There might be some types left from a previously aborted drop operations or +a random error might happened at any stage of the migration, therefore you have to handle the error where it occurs. +Also, because you run the db migration during application startup or before testing, +ensure you have clear error messages to speed up diagnostic and debugging. ### DB Integration Tests -Database integration tests become flaky when executed in parallel usually because of conflicting read / write operations. -While modern database systems can handle concurrent data access, test tools with assertions not so much. -That means, test assertions start to fail seemingly randomly when executed concurrently. +Database integration tests become flaky when executed in parallel usually because of conflicting read / write +operations. +While modern database systems can handle concurrent data access, test tools with assertions not so much. +That means, test assertions start to fail seemingly randomly when executed concurrently. There are only very few viable options to deal with this reality: * Don't use parallel test execution * Only parallelize test per isolated access -* Do synchronization and test the actual database state before each test and run tests as atomic transactions +* Do synchronization and test the actual database state before each test and run tests as atomic transactions -Out of the three options, the last one will almost certainly win you an over-engineering award in the unlikely case -your colleagues appreciate the resulting test complexity. If not, good luck. -In practice, not using parallel test execution is often not possible either because of the larger number -of integration tests that run on a CI server. To be clear, strictly sequential tests is a great option -for small projects with low complexity, it just doesn't scale as the project grows in size and complexity. +Out of the three options, the last one will almost certainly win you an over-engineering award in the unlikely case +your colleagues appreciate the resulting test complexity. If not, good luck. +In practice, not using parallel test execution is often not possible either because of the larger number +of integration tests that run on a CI server. To be clear, strictly sequential tests is a great option +for small projects with low complexity, it just doesn't scale as the project grows in size and complexity. -And that leaves us only with the middle-ground of grouping tables into isolated access. -Suppose your database has 25 tables you are tasked to test. -Some of them are clearly unrelated, others only require read access to some tables, +And that leaves us only with the middle-ground of grouping tables into isolated access. +Suppose your database has 25 tables you are tasked to test. +Some of them are clearly unrelated, others only require read access to some tables, and then you have those where you have to test for multi-table inserts and updates. -Say, you can form 7 groups of tables that are clearly independent of each other, -then you can run those 7 test groups in parallel, but for all practical purpose within each group, -all tests are run in sequence unless you are testing specifically for concurrent read write access. -You want to put those test into a dedicated test group anyways as they capture errors that are more complex -than errors from your basic integration tests. -And it makes sense to stage integration tests into simple functional tests, complex workflow tests, -and chaos tests that triggers read / write conflicts randomly to test for blind spots. +Say, you can form 7 groups of tables that are clearly independent of each other, +then you can run those 7 test groups in parallel, but for all practical purpose within each group, +all tests are run in sequence unless you are testing specifically for concurrent read write access. +You want to put those test into a dedicated test group anyways as they capture errors that are more complex +than errors from your basic integration tests. +And it makes sense to stage integration tests into simple functional tests, complex workflow tests, +and chaos tests that triggers read / write conflicts randomly to test for blind spots. In any case, the test suite for the service example follow the sequential execution pattern so that they can be -executed in parallel along other test groups without causing randomly failing tests. Specifically, +executed in parallel along other test groups without causing randomly failing tests. Specifically, the test structure looks as shown below. However, the full test suite is in the test folder. ```rust #[test] fn test_service() { - let pool = postgres_connection_pool(); - let conn = &mut pool.get().unwrap(); + let mut connection = postgres_connection(); + let conn = &mut connection; println!("Test DB migration"); test_db_migration(conn); @@ -1084,16 +1142,21 @@ fn test_db_migration(conn: &mut Connection) { } ``` -The idea here is simple yet powerful: There is just one Rust test so regardless of whether you test with Cargo, -Nextest or any other test util, this test will run everything within it in sequence. -The print statements are usually only shown if a test fails. However, there is one important details worth mentioning, -the assert macro often obfuscates the error message and if you have ever seen a complex stack trace -full of fnOnce invocations, you know that already. To get a more meaningful error message, just uncomment the dbg! +The idea here is simple yet powerful: +There is just one Rust test so regardless of whether you test with Cargo, +Nextest or any other test util, this test will run everything within it in sequence. +The print statements are usually only shown if a test fails. +However, there is one important details worth mentioning, +the assert macro often obfuscates the error message and if you have ever seen a complex stack trace full of fnOnce +invocations, you know that already. +To get a more meaningful error message, just uncomment the dbg! statement that unwraps the result before the assertion and you will see a helpful error message in most cases. -You may have noticed that the DB migration util checks if there are pending migrations and if there is nothing, -it does nothing and just returns. The wisdom behind this decision is that, there are certain corner cases -that only occur when you run a database tet multiple times and you really want to run the DB migration -just once to simulate that scenario as realistic as possible. When you test locally, the same logic applies -and you really only want to run a database migration when the schema has changed. +You may have noticed that the DB migration util checks if there are pending migrations and if there is nothing, it does +nothing and just returns. +The wisdom behind this decision is that, there are certain corner cases +that only occur when you run a database tet multiple times and you really want to run the DB migration just once to +simulate that scenario as realistic as possible. +When you test locally, the same logic applies and you really only want to run a database migration when the schema has +changed. diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs index a6246bb7cad8..b115b8cbc580 100644 --- a/examples/postgres/custom_arrays/src/lib.rs +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -6,7 +6,10 @@ use std::error::Error; pub mod model; mod schema; +// Alias for a pooled connection. // pub type Connection = diesel::r2d2::PooledConnection>; + +// Alias for a normal, single, connection. pub type Connection = PgConnection; pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations"); diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index a264ee515bcf..7815aa0a89eb 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -10,7 +10,7 @@ use std::env; fn postgres_connection() -> PgConnection { dotenv().ok(); - let database_url = env::var("DATABASE_URL").expect("DATABASE_URL must be set"); + let database_url = env::var("POSTGRES_DATABASE_URL").expect("POSTGRES_DATABASE_URL must be set"); PgConnection::establish(&database_url) .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) } From cdcc0960f682d6e37befb9eb6dfd90c8e9b168a2 Mon Sep 17 00:00:00 2001 From: Georg Semmler Date: Fri, 23 Aug 2024 14:48:47 +0200 Subject: [PATCH 42/73] Fix formating --- examples/postgres/custom_arrays/tests/service_tests.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index 7815aa0a89eb..4a3701571e57 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -10,7 +10,8 @@ use std::env; fn postgres_connection() -> PgConnection { dotenv().ok(); - let database_url = env::var("POSTGRES_DATABASE_URL").expect("POSTGRES_DATABASE_URL must be set"); + let database_url = + env::var("POSTGRES_DATABASE_URL").expect("POSTGRES_DATABASE_URL must be set"); PgConnection::establish(&database_url) .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) } From 74cf685764fbf4ccf69bf6bf8958a11ce000a832 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:35:00 +0800 Subject: [PATCH 43/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 33121873ec18..d07e4e049391 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -153,7 +153,7 @@ There are few cases when you want to choose a custom type over a table: 1) The data have no relation to data in other tables. 2) You want to group a set of fields i.e. to store a complex configuration file with nested fields. -3) You specifically don’t relations because of specific requirements. +3) You specifically don’t want relations because of specific requirements. The service endpoint is an example of the first two cases. For once, the endpoints have no relation to any other data therefore a separate table makes little sense. Secondly, the From 0118be145f6b6a22a591af4e818b41ce7ae1248c Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:35:19 +0800 Subject: [PATCH 44/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index d07e4e049391..4acf63763044 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -202,7 +202,7 @@ You usually can return the primary key value after inserting so if you need to k ### External primary key: INTEGER type In case you need to know the specific value of the primary key and you need to know it before inserting the data, you -have to assign unique primary keys before inserting data and you have to use the INTEGE type (or any of the many integer +have to assign unique primary keys before inserting data and you have to use the INTEGER type (or any of the many integer variations) to convey to Postgres that you set the primary key yourself. Notice, you still have to set the NOT NULL and PRIMARY KEY attribute to ensure data consistency. From 4d7ca4170569dba968d14a40623233f66173142b Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:35:29 +0800 Subject: [PATCH 45/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 4acf63763044..918b682b320d 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -340,7 +340,7 @@ Rust type implementations matching all the generated Postgres types and tables. Lastly, you also want to add a type alias for a postgres database connection in the lib file because that gives you the freedom to swap between a normal single connection or a connection pool without updating your type implementation or tests. -The connection pool type alias has ben uncommented, but the type signature of the pooled connection makes it obvious why +The connection pool type alias has been uncommented, but the type signature of the pooled connection makes it obvious why a type alias is a good idea. At this point, your lib file looks as shown below: From e1cd605006dac9c4ed685ca0d901d9d49155d480 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:35:41 +0800 Subject: [PATCH 46/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 918b682b320d..0ed399aee8a4 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -359,7 +359,7 @@ pub type Connection = PgConnection; ## Rust Types -In total, you implement three Rust types: +In total, you need implement three Rust types: * Enum: ProtocolType * Struct: Endpoint From f0c3bc42068f3e4f2aecea5c64f242fad95871f7 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:35:50 +0800 Subject: [PATCH 47/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 0ed399aee8a4..a27dd661065a 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -327,8 +327,8 @@ pub mod smdb { Notice, the Postgres types are stored in Postgres therefore you are not seeing them in the generated schema. Only tables will show up in the generates schema. Furthermore, you will need a wrapper struct to map from Rust to the Postgres type. -For the ServiceEndpoint, Diesel already generated a matching wrapper struct with the correct annotations. The service -table then uses that wrapper struct “ServiceEndpoint” instead of the native Postgres type to reference the custom type +For the ServiceEndpoint, Diesel already generated a matching zero sized SQL type struct with the correct annotations. The service +table then uses that SQL type `ServiceEndpoint` instead of the native Postgres type to reference the custom type in the endpoints array. You want to attach the generated schema.rs to your lib file to access it from within your crate and stop your IDE from From 6aec7f80e38400aef26c6cd5ca7c62e41258bb33 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:36:12 +0800 Subject: [PATCH 48/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index a27dd661065a..70744dbe4578 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -367,7 +367,7 @@ In total, you need implement three Rust types: ### Rust Enum: ProtocolType -Diesel needs a wrapper struct to store a custom Enum in Postgres, so let’s add one: +Diesel needs a zero sized SQL type struct to represent a custom Enum in Postgres, so let’s add one: ```rust #[derive(SqlType)] From 931dc4b683bbb1a85da300c8efa7ba8fc5a633a2 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:36:33 +0800 Subject: [PATCH 49/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 70744dbe4578..e5e384583718 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -376,8 +376,7 @@ Diesel needs a zero sized SQL type struct to represent a custom Enum in Postgres pub struct PgProtocolType; ``` -It is important that you add both, sql_type and postgres_type, otherwise insert will fail. -Furthermore, it is important to add the database schema ("smdb") otherwise Postgres fails to find the type and aborts an +It is important to add the database schema ("smdb") and type name ("protocol_type") otherwise Postgres fails to find the type and aborts an operation on that type. Next, let’s define the actual Enum. Bear in mind to use the wrapper struct as sql_type on the Enum: From 5dc16112644e7f5ea7b49697a51d565a07d9e43d Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:36:56 +0800 Subject: [PATCH 50/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index e5e384583718..1eb0475f7b03 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -371,7 +371,6 @@ Diesel needs a zero sized SQL type struct to represent a custom Enum in Postgres ```rust #[derive(SqlType)] -#[diesel(sql_type = protocol_type)] #[diesel(postgres_type(name = "protocol_type", schema = "smdb"))] pub struct PgProtocolType; ``` From d6425dbbf104d5116ffd45c76cd573dc93695ba1 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:37:09 +0800 Subject: [PATCH 51/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 1eb0475f7b03..b1f996763170 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -378,7 +378,7 @@ pub struct PgProtocolType; It is important to add the database schema ("smdb") and type name ("protocol_type") otherwise Postgres fails to find the type and aborts an operation on that type. -Next, let’s define the actual Enum. Bear in mind to use the wrapper struct as sql_type on the Enum: +Next, let’s define the actual Enum. Bear in mind to use the SQL type struct as sql_type on the Enum: ```rust #[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] From 85fdbdaff591080c7784827f6f73c24c92ed7695 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:37:20 +0800 Subject: [PATCH 52/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index b1f996763170..540c60f6d987 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -394,7 +394,7 @@ pub enum ProtocolType { It’s worth pointing out that you are dealing with three types in total: 1) protocol_type: The Postgres Enum type -2) PGProtocolType: The Wrapper struct that refers to the Postgres Enum type +2) PgProtocolType: The SQL type struct that refers to the Postgres Enum type 3) ProtocolType: The Rust Enum that refers to the wrapper struct PGProtocolType Mixing any of those three types will result in a complicated Diesel trait error. However, these error messages are just From 5c3e9390f52d42f6ce531e615237291ae921ad18 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:37:37 +0800 Subject: [PATCH 53/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 540c60f6d987..e369dffc28da 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -395,7 +395,7 @@ It’s worth pointing out that you are dealing with three types in total: 1) protocol_type: The Postgres Enum type 2) PgProtocolType: The SQL type struct that refers to the Postgres Enum type -3) ProtocolType: The Rust Enum that refers to the wrapper struct PGProtocolType +3) ProtocolType: The Rust Enum that maps to the SQL type struct PgProtocolType Mixing any of those three types will result in a complicated Diesel trait error. However, these error messages are just a convoluted way to say that the database type mismatches the Rust type. When you encounter a consulted trait error From e1924f7dd44b6a0ebff62d22fd0958a79e9ddf7e Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:37:48 +0800 Subject: [PATCH 54/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index e369dffc28da..dcc6d8b47547 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -401,11 +401,11 @@ Mixing any of those three types will result in a complicated Diesel trait error. a convoluted way to say that the database type mismatches the Rust type. When you encounter a consulted trait error message, make sure to check: -1) Do I have a wrapper struct? -2) Does my a wrapper struct derives SqlType? -3) Does my wrapper type has both, sql_type and postgres_type declared? -4) Are sql_type and postgres_type both referring to the correct Postgres type? -5) Does my Rust type refers to the wrapper struct type? +1) Do I have a SQL type struct? +2) Does my a SQL type struct derive SqlType? +3) Does my SQL type has a `#[diesel(postgres_type(_)]` attribute declared? +4) Is the `#[diesel(postgres_type(_)]` attribute referring to the correct Postgres type? +5) Does my Rust type refers to the SQL type struct? If all those checks pass and you still see errors, it’s most likely a serialization error. To serialize and deserialize a custom Enum, you write a custom ToSql and FromSql implementation. From 390eae9d14377dda368d5f4f93371459f0bf35d8 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:38:00 +0800 Subject: [PATCH 55/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index dcc6d8b47547..430137bb6cb4 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -465,7 +465,7 @@ to catch errors that may result from incorrect database updates. ```rust #[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] -#[diesel(sql_type=crate::schema::smdb::sql_types::ServiceEndpoint)] +#[diesel(sql_type = crate::schema::smdb::sql_types::ServiceEndpoint)] pub struct Endpoint { pub name: String, pub version: i32, From 642d6044772f574fe1336b83d09d816d79ecf053 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:38:12 +0800 Subject: [PATCH 56/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 430137bb6cb4..a441015a95c7 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -483,7 +483,7 @@ The serialize and deserialize implementation of a custom type is not as obvious Postgres represent a custom type as an anonymous typed tuple. Therefore, you have to map a struct to a typed tuple and back. -Let’s start with the toSql implementation to store the Rust Endpoint struct as typed tuple. +Let’s start with the ToSql implementation to store the Rust Endpoint struct as typed tuple. Luckily, Diesel provides a helper util to do just that. ```rust From 609ad61e42300b0731809234090765f21b81443f Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:38:25 +0800 Subject: [PATCH 57/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index a441015a95c7..07268fd38a3e 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -512,7 +512,7 @@ If the type or number of types mismatch, you will get a compiler error telling y the number of fields don’t match or that the type of the fields don’t match. Also, because the write_tuple expects values, you have to call either to_owned() or clone() on any referenced data. -The from_sql reverses the process by converting a Postgres typed tuple back into a Rust struct. +The FromSql reverses the process by converting a Postgres typed tuple back into a Rust struct. Again, Diesel provides a convenient helper to do so: ```rust From 236bcc539ce2d98fba487c03ba26986a28ebb48b Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:38:35 +0800 Subject: [PATCH 58/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 07268fd38a3e..f5e9746bbf23 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -533,7 +533,7 @@ impl FromSql for Endpoint { ``` Similar to the serialization process, it is paramount that the type annotation -match exactly the one used in toSql and the type definition in up.sql. +match exactly the one used in ToSql and the type definition in up.sql. Debugging serialization issues in Diesel is relatively straight forward since the compiler usually points out the location of the issue and, often, the issue is a type mismatch that is From e87554e100049285a63174ae5fe3fd7afc0ae815 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:38:46 +0800 Subject: [PATCH 59/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index f5e9746bbf23..9676621141b2 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -542,9 +542,9 @@ relatively easy to fix. It’s worth repeating that you are dealing with three types in total: 1) service_endpoint: The Postgres custom type -2) ServiceEndpoint: The wrapper struct generated by Diesel that refers to the Postgres type service_endpoint. Also, only - the wrapper struct carries the postgres_type annotation in addition to the an sql_type annotation. -3) Endpoint: The Rust struct with an sql_type annotation referring to the wrapper struct ServiceEndpoint +2) ServiceEndpoint: The SQL type struct generated by Diesel that refers to the Postgres type service_endpoint. Also, only + the SQL type struct carries the postgres_type annotation. +3) Endpoint: The Rust struct with an sql_type annotation referring to the SQL type struct ServiceEndpoint Make sure all of those types match correctly. From c1d914919a7ae4af44f71be29f46e613bc8a511f Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:38:56 +0800 Subject: [PATCH 60/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 9676621141b2..10a6afc1aa8a 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -583,7 +583,7 @@ The relevant type declarations are: ```rust #[derive(Debug, Clone, Queryable, Selectable)] -#[diesel(table_name= crate::schema::smdb::service, primary_key(service_id))] +#[diesel(table_name = crate::schema::smdb::service, primary_key(service_id))] pub struct Service { pub service_id: i32, pub name: String, From b34f35e0e739eb482ca79537fdcac42a09436246 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:39:09 +0800 Subject: [PATCH 61/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 10a6afc1aa8a..57b794072689 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -597,7 +597,7 @@ pub struct Service { } #[derive(Debug, Clone, Queryable, Insertable)] -#[diesel(table_name= crate::schema::smdb::service, primary_key(service_id))] +#[diesel(table_name = crate::schema::smdb::service, primary_key(service_id))] pub struct CreateService { pub service_id: i32, // ... skipped From 54da5335b5388cf412c4202e4f91fe38fdeeed95 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:39:19 +0800 Subject: [PATCH 62/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 57b794072689..cdb3c36d00c0 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -605,7 +605,7 @@ pub struct CreateService { } #[derive(Debug, Clone, Queryable, Insertable, AsChangeset)] -#[diesel(table_name= crate::schema::smdb::service)] +#[diesel(table_name = crate::schema::smdb::service)] pub struct UpdateService { pub name: Option, pub version: Option, From 63a3e339a16319bbdd54e6dffed59b061936761c Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:40:03 +0800 Subject: [PATCH 63/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index cdb3c36d00c0..8b0fb2c53696 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -732,14 +732,10 @@ Let’s start with a private helper method that sets the Boolean online flag on param_service_id: i32, param_online: bool, ) -> QueryResult<()> { - match diesel::update(service.filter(service_id.eq(param_service_id))) + diesel::update(service.filter(service_id.eq(param_service_id))) .set(online.eq(param_online)) - .returning(Service::as_returning()) - .get_result(db) - { - Ok(_) => Ok(()), - Err(e) => Err(e), - } + .execute(db)?; + Ok(()) } ``` From 4688f7f076b32cb0fed706477dca7f142e25feac Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Sat, 24 Aug 2024 00:40:30 +0800 Subject: [PATCH 64/73] Update README.md Co-authored-by: Georg Semmler --- examples/postgres/custom_arrays/README.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 8b0fb2c53696..f95274d082b7 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -769,10 +769,7 @@ returns a result or error. db: &mut Connection, param_service_id: i32, ) -> QueryResult { - match service.find(param_service_id).first::(db) { - Ok(_) => Ok(true), - Err(_) => Ok(false), - } + Ok(service.find(param_service_id).first::(db).optional()?.is_some()) } ``` From c62124a541035d27b4041dc2d7ce7014863df4e5 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Mon, 26 Aug 2024 12:19:08 +0800 Subject: [PATCH 65/73] postgres custom array example: Updated run_db_migration Added revert_db_migration Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/src/lib.rs | 55 ++++++++++++------- .../custom_arrays/tests/service_tests.rs | 9 +++ 2 files changed, 43 insertions(+), 21 deletions(-) diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs index b115b8cbc580..948ad073d5a4 100644 --- a/examples/postgres/custom_arrays/src/lib.rs +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -36,30 +36,43 @@ pub fn run_db_migration( return Err(Box::new(e)); } } - - // Check if DB has pending migrations - let has_pending = match conn.has_pending_migration(MIGRATIONS) { - Ok(has_pending) => has_pending, + // Run all pending migrations. + match conn.run_pending_migrations(MIGRATIONS) { + Ok(_) => Ok(()), Err(e) => { - eprint!( - "[run_db_migration]: Error checking for pending database migrations: {}", - e - ); - return Err(e); + eprint!("[run_db_migration]: Error migrating database: {}", e); + Err(e) } - }; + } +} - // If so, run all pending migrations. - if has_pending { - match conn.run_pending_migrations(MIGRATIONS) { - Ok(_) => Ok(()), - Err(e) => { - eprint!("[run_db_migration]: Error migrating database: {}", e); - Err(e) - } +/// Revert all pending database migrations. +/// +/// # Arguments +/// +/// * `conn` - A mutable reference to a `Connection` object. +/// +/// # Errors +/// +/// * If there is an error while connecting to the database. +/// * If there is an error while reverting the database migrations. +/// +pub fn revert_db_migration( + conn: &mut Connection, +) -> Result<(), Box> { + // Check DB connection! + if let Ok(_) = conn.ping() { + } else if let Err(e) = conn.ping() { + eprint!("[pg_cmdb]: Error connecting to database: {}", e); + return Err(Box::new(e)); + } + + // Revert all pending migrations + match conn.revert_all_migrations(MIGRATIONS) { + Ok(_) => Ok(()), + Err(e) => { + eprint!("[pg_cmdb]: Error reverting database migrations: {}", e); + Err(e) } - } else { - // Nothing pending, just return - Ok(()) } } diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/service_tests.rs index 4a3701571e57..756af307d9f6 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/service_tests.rs @@ -22,6 +22,12 @@ fn test_db_migration(conn: &mut Connection) { assert!(res.is_ok()); } +fn test_revert_db_migration(conn: &mut Connection) { + let res = custom_arrays::revert_db_migration(conn); + //dbg!(&result); + assert!(res.is_ok()); +} + #[test] fn test_service() { let mut connection = postgres_connection(); @@ -71,6 +77,9 @@ fn test_service() { println!("Test delete service!"); test_service_delete(conn); + + println!("Test revert DB migration"); + test_revert_db_migration(conn); } fn test_create_service(conn: &mut Connection) { From 9547c225fcf1e532563524ff1e0e022787c39e31 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Mon, 26 Aug 2024 12:26:06 +0800 Subject: [PATCH 66/73] postgres custom array Readme: Updated check_if_service_id_exists at line 790 with actual implementation, Updated check_all_services_online at line 860 with actual implementation. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 25 ++++++++--------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index f95274d082b7..8cac9ebecbd6 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -782,10 +782,11 @@ To check if a service is online, we again lean on the generated DSL: db: &mut Connection, param_service_id: i32, ) -> QueryResult { - match service.find(param_service_id).first::(db) { - Ok(_) => Ok(true), - Err(_) => Ok(false), - } + service + .find(param_service_id) + .first::(db) + .optional() + .map(|arg0: Option| Option::is_some(&arg0)) } ``` @@ -856,19 +857,11 @@ Finally, we can implement the last method that takes a collection of service ID either true, or the service ID that is offline. ```rust - pub fn check_all_services_online( - db: &mut Connection, - services: &[i32], - ) -> QueryResult<(bool, Option)> { + pub fn check_all_services_online(db: &mut Connection, services: &[i32]) -> QueryResult<(bool, Option)> { for id in services { - match Service::check_if_service_id_online(db, *id) { - Ok(res) => { - if !res { - return Ok((false, Some(format!("Service {} is offline", id)))); - } - } - Err(e) => return Err(e), - }; + if !Service::check_if_service_id_online(db, *id)? { + return Ok((false, Some(format!("Service {} is offline", id)))); + } } Ok((true, None)) From f0668769ff113f2dfcd373ca7624418614d2c318 Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Mon, 26 Aug 2024 14:03:16 +0800 Subject: [PATCH 67/73] postgres custom array example: Added parallel executable integration tests. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/src/lib.rs | 1 - examples/postgres/custom_arrays/tests/mod.rs | 5 +- .../tests/parallel_service_tests.rs | 503 ++++++++++++++++++ ...ce_tests.rs => single_service_test_run.rs} | 31 +- 4 files changed, 531 insertions(+), 9 deletions(-) create mode 100644 examples/postgres/custom_arrays/tests/parallel_service_tests.rs rename examples/postgres/custom_arrays/tests/{service_tests.rs => single_service_test_run.rs} (92%) diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs index 948ad073d5a4..9fa8709b79cf 100644 --- a/examples/postgres/custom_arrays/src/lib.rs +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -5,7 +5,6 @@ use std::error::Error; pub mod model; mod schema; - // Alias for a pooled connection. // pub type Connection = diesel::r2d2::PooledConnection>; diff --git a/examples/postgres/custom_arrays/tests/mod.rs b/examples/postgres/custom_arrays/tests/mod.rs index 67ae88e7ba49..810a236e5b21 100644 --- a/examples/postgres/custom_arrays/tests/mod.rs +++ b/examples/postgres/custom_arrays/tests/mod.rs @@ -1,2 +1,5 @@ #[cfg(test)] -mod service_tests; +mod parallel_service_tests; + +#[cfg(test)] +mod single_service_test_run; diff --git a/examples/postgres/custom_arrays/tests/parallel_service_tests.rs b/examples/postgres/custom_arrays/tests/parallel_service_tests.rs new file mode 100644 index 000000000000..104fb5b35986 --- /dev/null +++ b/examples/postgres/custom_arrays/tests/parallel_service_tests.rs @@ -0,0 +1,503 @@ +use custom_arrays::model::endpoint_type::Endpoint; +use custom_arrays::model::protocol_type::ProtocolType; +use custom_arrays::model::service; +use custom_arrays::model::service::{CreateService, UpdateService}; +use custom_arrays::Connection; +use diesel::{Connection as DieselConnection, PgConnection}; +use dotenvy::dotenv; +use std::env; +use std::process::Command; +use std::time::Duration; + +#[allow(dead_code)] +fn start_or_reuse_postgres_docker_container() { + // check if a container with name postgres-5432 is already running. + // If so, re-use that container and just return. + let output = Command::new("docker") + .arg("ps") + .arg(format!("--filter=name={}", "postgres-5432")) + .arg("--format={{.Names}}") + .output() + .expect("failed to check for running postgres container"); + if !output.stdout.is_empty() { + return; + } + + // If there is no container running, start one. + // Example: docker run --name postgres-5432 -p 5432:5432 -e POSTGRES_PASSWORD=postgres -d postgres:16.3-bookworm + println!("start_postgres_docker_container"); + Command::new("docker") + .args([ + "run", + "--name", + "postgres-5432", + "-p", + "5432:5432", + "-e", + "POSTGRES_PASSWORD=postgres", + "-d", + "postgres:16.3-bookworm", + ]) + .output() + .expect("failed to start postgres container"); + // Wait for the container to start + std::thread::sleep(Duration::from_secs(5)); +} + +fn run_db_migration(conn: &mut Connection) { + println!("run_db_migration"); + let res = custom_arrays::run_db_migration(conn); + //dbg!(&result); + assert!(res.is_ok()); +} + +fn revert_db_migration(conn: &mut Connection) { + println!("revert_db_migration"); + let res = custom_arrays::revert_db_migration(conn); + //dbg!(&result); + assert!(res.is_ok()); +} + +fn postgres_connection() -> PgConnection { + println!("postgres_connection"); + dotenv().ok(); + + let database_url = + env::var("POSTGRES_DATABASE_URL").expect("POSTGRES_DATABASE_URL must be set"); + + PgConnection::establish(&database_url) + .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) +} + +#[allow(dead_code)] +fn test_teardown() { + println!("test_teardown"); + + // Optional. In some environments, you may have to tidy up everything after testing. + let mut connection = postgres_connection(); + let conn = &mut connection; + + println!("Revert pending DB migration"); + revert_db_migration(conn); +} + +// #[test] // You can test the test setup standalone i.e. for debugging. +// Run this setup first in every test. +fn test_setup() { + println!("test_setup"); + + // Optional: May start a Docker container first if you want fully self contained testing + // If there is a container already running, the util will re-use it. + // println!("Start or reuse postgres container"); + // start_or_reuse_postgres_docker_container(); + + println!("Connect to database"); + let mut connection = postgres_connection(); + let conn = &mut connection; + + println!("Run pending DB migration"); + run_db_migration(conn); +} + +#[test] +fn test_create_service() { + test_setup(); + + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let endpoints = get_endpoints(); + let dependencies = get_dependencies(); + + let result = service::Service::create(conn, &service); + + // dbg!(&result); + assert!(result.is_ok()); + + let service = result.unwrap(); + + assert_eq!(service.service_id, 1); + assert_eq!(service.name, "test"); + assert_eq!(service.version, 1); + assert!(service.online); + assert_eq!(service.description, "test"); + assert_eq!(service.health_check_uri, "http://example.com"); + assert_eq!(service.base_uri, "http://example.com"); + assert_eq!(service.dependencies, dependencies); + assert_eq!(service.endpoints, endpoints); +} + +#[test] +fn test_count_service() { + test_setup(); + + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let result = service::Service::count(conn); + //dbg!(&result); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 0); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + let result = service::Service::count(conn); + //dbg!(&result); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 1); +} + +#[test] +fn test_check_if_service_id_exists() { + test_setup(); + + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + let result = service::Service::check_if_service_id_exists(conn, 1); + //dbg!(&result); + assert!(result.is_ok()); + assert!(result.unwrap()); +} + +#[test] +fn test_check_if_service_id_online() { + test_setup(); + + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + // Test if online + let result = service::Service::check_if_service_id_online(conn, 1); + //dbg!(&result); + assert!(result.is_ok()); + assert!(result.unwrap()); +} + +#[test] +fn test_get_all_online_services() { + test_setup(); + + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + let result = service::Service::get_all_online_services(conn); + //dbg!(&result); + assert!(result.is_ok()); + assert!(!result.unwrap().is_empty()); +} + +#[test] +fn test_get_all_offline_services() { + test_setup(); + + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + let result = service::Service::get_all_offline_services(conn); + //dbg!(&result); + assert!(result.is_ok()); + assert_eq!(result.unwrap().len(), 0); +} + +#[test] +fn test_get_all_service_dependencies() { + test_setup(); + + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + let service_id = 1; + + let result = service::Service::get_all_service_dependencies(conn, service_id); + //dbg!(&result); + assert!(result.is_ok()); + assert_eq!(result.unwrap().len(), 1); +} + +#[test] +fn test_get_all_service_endpoints() { + test_setup(); + + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + let service_id = 1; + + let result = service::Service::get_all_service_endpoints(conn, service_id); + //dbg!(&result); + assert!(result.is_ok()); + assert_eq!(result.unwrap().len(), 2); +} + +#[test] +fn test_service_read() { + test_setup(); + + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + let service_id = 1; + + let result = service::Service::read(conn, service_id); + //dbg!(&result); + assert!(result.is_ok()); + + let service = result.unwrap(); + + assert_eq!(service.service_id, 1); + assert_eq!(service.name, "test"); + assert_eq!(service.version, 1); + assert!(service.online); + assert_eq!(service.description, "test"); + assert_eq!(service.health_check_uri, "http://example.com"); + assert_eq!(service.base_uri, "http://example.com"); + assert_eq!(service.dependencies, vec![Some(42)]); +} + +#[test] +fn test_service_read_all() { + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + let result = service::Service::read_all(conn); + //dbg!(&result); + assert!(result.is_ok()); + + let services = result.unwrap(); + assert!(!services.is_empty()); +} + +#[test] +fn test_set_service_online() { + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + let service_id = 1; + + let result = service::Service::set_service_online(conn, service_id); + //dbg!(&result); + assert!(result.is_ok()); + + let result = service::Service::check_if_service_id_online(conn, service_id); + //dbg!(&result); + assert!(result.is_ok()); + assert!(result.unwrap()); +} + +#[test] +fn test_set_service_offline() { + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + let service_id = 1; + + let result = service::Service::set_service_offline(conn, service_id); + //dbg!(&result); + assert!(result.is_ok()); + + let result = service::Service::check_if_service_id_online(conn, service_id); + //dbg!(&result); + assert!(result.is_ok()); + assert!(!result.unwrap()); +} + +#[test] +fn test_service_update() { + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + // check if service_id exists so we can update the service + let result = service::Service::check_if_service_id_exists(conn, 1); + //dbg!(&result); + assert!(result.is_ok()); + assert!(result.unwrap()); + + let update = UpdateService::new( + Some("new_test".to_string()), + Some(2), + Some(true), + None, + None, + None, + None, + None, + ); + + let result = service::Service::update(conn, 1, &update); + //dbg!(&result); + assert!(result.is_ok()); + + let service = result.unwrap(); + + assert_eq!(service.service_id, 1); + assert_eq!(service.name, "new_test"); + assert_eq!(service.version, 2); + assert!(service.online); + assert_eq!(service.description, "test"); + assert_eq!(service.health_check_uri, "http://example.com"); + assert_eq!(service.base_uri, "http://example.com"); + assert_eq!(service.dependencies.len(), 1); + assert_eq!(service.dependencies, vec![Some(42)]); +} + +#[test] +fn test_service_delete() { + let mut connection = postgres_connection(); + let conn = &mut connection; + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + // Insert the service + let service = get_crate_service(); + let result = service::Service::create(conn, &service); + // dbg!(&result); + assert!(result.is_ok()); + + // Check if its there + let result = service::Service::read(conn, 1); + //dbg!(&result); + assert!(result.is_ok()); + + // Delete service + let result = service::Service::delete(conn, 1); + //dbg!(&result); + assert!(result.is_ok()); + + // Check its gone + let result = service::Service::read(conn, 1); + //dbg!(&result); + assert!(result.is_err()); + + let result = service::Service::count(conn); + //dbg!(&result); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 0); +} + +fn get_crate_service() -> CreateService { + let endpoints = get_endpoints(); + + let dependencies = get_dependencies(); + + CreateService { + service_id: 1, + name: "test".to_string(), + version: 1, + online: true, + description: "test".to_string(), + health_check_uri: "http://example.com".to_string(), + base_uri: "http://example.com".to_string(), + dependencies, + endpoints, + } +} + +fn get_endpoints() -> Vec> { + let grpc_endpoint = Endpoint::new( + "test_grpc_endpoint".to_string(), + 1, + "/grpc".to_string(), + 7070, + ProtocolType::GRPC, + ); + + let http_endpoint = Endpoint::new( + "test_http_endpoint".to_string(), + 1, + "/http".to_string(), + 8080, + ProtocolType::HTTP, + ); + + vec![Some(grpc_endpoint.clone()), Some(http_endpoint.clone())] +} + +fn get_dependencies() -> Vec> { + vec![Some(42)] +} diff --git a/examples/postgres/custom_arrays/tests/service_tests.rs b/examples/postgres/custom_arrays/tests/single_service_test_run.rs similarity index 92% rename from examples/postgres/custom_arrays/tests/service_tests.rs rename to examples/postgres/custom_arrays/tests/single_service_test_run.rs index 756af307d9f6..6f5c1eff32e2 100644 --- a/examples/postgres/custom_arrays/tests/service_tests.rs +++ b/examples/postgres/custom_arrays/tests/single_service_test_run.rs @@ -16,25 +16,28 @@ fn postgres_connection() -> PgConnection { .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) } -fn test_db_migration(conn: &mut Connection) { +fn run_db_migration(conn: &mut Connection) { + println!("run_db_migration"); let res = custom_arrays::run_db_migration(conn); //dbg!(&result); assert!(res.is_ok()); } -fn test_revert_db_migration(conn: &mut Connection) { +fn revert_db_migration(conn: &mut Connection) { + println!("revert_db_migration"); let res = custom_arrays::revert_db_migration(conn); //dbg!(&result); assert!(res.is_ok()); } -#[test] +// #[test] // Uncomment to run. Make sure to comment out the other tests first. +#[allow(dead_code)] fn test_service() { let mut connection = postgres_connection(); let conn = &mut connection; - println!("Test DB migration"); - test_db_migration(conn); + println!("Run DB migration"); + run_db_migration(conn); println!("Test create!"); test_create_service(conn); @@ -78,10 +81,11 @@ fn test_service() { println!("Test delete service!"); test_service_delete(conn); - println!("Test revert DB migration"); - test_revert_db_migration(conn); + println!("Revert DB migration"); + revert_db_migration(conn); } +#[allow(dead_code)] fn test_create_service(conn: &mut Connection) { let grpc_endpoint = Endpoint::new( "test_grpc_endpoint".to_string(), @@ -136,6 +140,7 @@ fn test_create_service(conn: &mut Connection) { ); } +#[allow(dead_code)] fn test_count_service(conn: &mut Connection) { let result = service::Service::count(conn); //dbg!(&result); @@ -143,6 +148,7 @@ fn test_count_service(conn: &mut Connection) { assert_eq!(result.unwrap(), 1); } +#[allow(dead_code)] fn test_check_if_service_id_exists(conn: &mut Connection) { let result = service::Service::check_if_service_id_exists(conn, 1); //dbg!(&result); @@ -150,6 +156,7 @@ fn test_check_if_service_id_exists(conn: &mut Connection) { assert!(result.unwrap()); } +#[allow(dead_code)] fn test_check_if_service_id_online(conn: &mut Connection) { let result = service::Service::check_if_service_id_online(conn, 1); //dbg!(&result); @@ -157,6 +164,7 @@ fn test_check_if_service_id_online(conn: &mut Connection) { assert!(result.unwrap()); } +#[allow(dead_code)] fn test_get_all_online_services(conn: &mut Connection) { let result = service::Service::get_all_online_services(conn); //dbg!(&result); @@ -164,6 +172,7 @@ fn test_get_all_online_services(conn: &mut Connection) { assert!(!result.unwrap().is_empty()); } +#[allow(dead_code)] fn test_get_all_offline_services(conn: &mut Connection) { let result = service::Service::get_all_offline_services(conn); //dbg!(&result); @@ -171,6 +180,7 @@ fn test_get_all_offline_services(conn: &mut Connection) { assert_eq!(result.unwrap().len(), 0); } +#[allow(dead_code)] fn test_get_all_service_dependencies(conn: &mut Connection) { let service_id = 1; @@ -180,6 +190,7 @@ fn test_get_all_service_dependencies(conn: &mut Connection) { assert_eq!(result.unwrap().len(), 1); } +#[allow(dead_code)] fn test_get_all_service_endpoints(conn: &mut Connection) { let service_id = 1; @@ -189,6 +200,7 @@ fn test_get_all_service_endpoints(conn: &mut Connection) { assert_eq!(result.unwrap().len(), 2); } +#[allow(dead_code)] fn test_service_read(conn: &mut Connection) { let service_id = 1; @@ -208,6 +220,7 @@ fn test_service_read(conn: &mut Connection) { assert_eq!(service.dependencies, vec![Some(42)]); } +#[allow(dead_code)] fn test_service_read_all(conn: &mut Connection) { let result = service::Service::read_all(conn); //dbg!(&result); @@ -217,6 +230,7 @@ fn test_service_read_all(conn: &mut Connection) { assert!(!services.is_empty()); } +#[allow(dead_code)] fn test_set_service_online(conn: &mut Connection) { let service_id = 1; @@ -230,6 +244,7 @@ fn test_set_service_online(conn: &mut Connection) { assert!(result.unwrap()); } +#[allow(dead_code)] fn test_set_service_offline(conn: &mut Connection) { let service_id = 1; @@ -243,6 +258,7 @@ fn test_set_service_offline(conn: &mut Connection) { assert!(!result.unwrap()); } +#[allow(dead_code)] fn test_service_update(conn: &mut Connection) { // check if service_id exists so we can update the service let result = service::Service::check_if_service_id_exists(conn, 1); @@ -278,6 +294,7 @@ fn test_service_update(conn: &mut Connection) { assert_eq!(service.dependencies, vec![Some(42)]); } +#[allow(dead_code)] fn test_service_delete(conn: &mut Connection) { let result = service::Service::read(conn, 1); //dbg!(&result); From dfcfce88e71b9f328523303afe6ae7cc24a8fcac Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Mon, 26 Aug 2024 14:54:53 +0800 Subject: [PATCH 68/73] postgres custom array example: Updated README.md Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 77 ++++++++++++++--------- 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 8cac9ebecbd6..13c99c6e9bf6 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -1068,23 +1068,16 @@ ensure you have clear error messages to speed up diagnostic and debugging. ### DB Integration Tests -Database integration tests become flaky when executed in parallel usually because of conflicting read / write -operations. +Conventionally, Database integration tests become flaky when executed in parallel usually because of conflicting read / write operations. While modern database systems can handle concurrent data access, test tools with assertions not so much. That means, test assertions start to fail seemingly randomly when executed concurrently. There are only very few viable options to deal with this reality: * Don't use parallel test execution -* Only parallelize test per isolated access -* Do synchronization and test the actual database state before each test and run tests as atomic transactions +* Form isolated test groups that run in parallel, but tests in each group run sequentially. -Out of the three options, the last one will almost certainly win you an over-engineering award in the unlikely case -your colleagues appreciate the resulting test complexity. If not, good luck. -In practice, not using parallel test execution is often not possible either because of the larger number -of integration tests that run on a CI server. To be clear, strictly sequential tests is a great option -for small projects with low complexity, it just doesn't scale as the project grows in size and complexity. -And that leaves us only with the middle-ground of grouping tables into isolated access. +In practice, the second option is often used by grouping tables into isolated test groups. Suppose your database has 25 tables you are tasked to test. Some of them are clearly unrelated, others only require read access to some tables, and then you have those where you have to test for multi-table inserts and updates. @@ -1097,9 +1090,7 @@ than errors from your basic integration tests. And it makes sense to stage integration tests into simple functional tests, complex workflow tests, and chaos tests that triggers read / write conflicts randomly to test for blind spots. -In any case, the test suite for the service example follow the sequential execution pattern so that they can be -executed in parallel along other test groups without causing randomly failing tests. Specifically, -the test structure looks as shown below. However, the full test suite is in the test folder. +In any case, the test folder contains an example of this testing style in the single_service_test file. ```rust #[test] @@ -1107,8 +1098,8 @@ fn test_service() { let mut connection = postgres_connection(); let conn = &mut connection; - println!("Test DB migration"); - test_db_migration(conn); + println!("Run DB migration"); + run_db_migration(conn); println!("Test create!"); test_create_service(conn); @@ -1117,13 +1108,10 @@ fn test_service() { test_count_service(conn); //... + + println!("Revert DB migration"); + revert_db_migration(conn); } - -fn test_db_migration(conn: &mut Connection) { - let res = custom_arrays::run_db_migration(conn); - //dbg!(&result); - assert!(res.is_ok()); -} ``` The idea here is simple yet powerful: @@ -1136,11 +1124,44 @@ invocations, you know that already. To get a more meaningful error message, just uncomment the dbg! statement that unwraps the result before the assertion and you will see a helpful error message in most cases. -You may have noticed that the DB migration util checks if there are pending migrations and if there is nothing, it does -nothing and just returns. -The wisdom behind this decision is that, there are certain corner cases -that only occur when you run a database tet multiple times and you really want to run the DB migration just once to -simulate that scenario as realistic as possible. -When you test locally, the same logic applies and you really only want to run a database migration when the schema has -changed. +Diesel, however, offers an easier way to do to parallel integration tests by leaning on a test transaction. +Conventionally, a transaction follows a the two [phase commit protocol](https://en.wikipedia.org/wiki/Two-phase_commit_protocol). In stage one, a commit request is send to the database to take the necessary steps for either committing or aborting the transaction and test if a commit would succeed. In stage two, the transaction is committed or aborted depending on whether any conflicts were detected in stage one. +A test transaction initiates the first stage, prepares everything ready to commit, and then just aborts the transaction at the end of a test. This is as realistic as a real transaction, except nothing is commited to the database and therefore all test transactions can run in parallel. + +The test folder contains an example of this testing style in the parallel_service_test file. + + +```rust +#[test] +fn test_create_service() { + test_setup(); + + let mut connection = postgres_connection(); + let conn = &mut connection; + + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + let service = get_crate_service(); + let endpoints = get_endpoints(); + let dependencies = get_dependencies(); + + let result = service::Service::create(conn, &service); + + // dbg!(&result); + assert!(result.is_ok()); + // ... +} +``` + +Here, the test setup runs first to test if the database connection works and all migrations are in place. +Also, noticed, each test gets its own database connection. After the connection has been established, +the test transaction is started next. From there, the test code is executed. Because each test runs in +a separate transaction, you have to apply first principle to each test. For example, if you want to test +to update a service, you have to insert a service first, then update it, and then +test if the update was successful. Because each test transaction aborts after each test, there is no left over +data to worry about after each test. Furthermore, since PostgreSQL executes transactions in isolation, +regardless of whether these are commited or not, no two tests can interfere with each other. +Lastly, testing this way is very fast when executed in parallel. + From 0a57904c5053741589ae9df844b77009497ffd7d Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Mon, 26 Aug 2024 15:02:06 +0800 Subject: [PATCH 69/73] Fixed linting Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 13 ++++++++++--- examples/postgres/custom_arrays/src/lib.rs | 2 +- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 13c99c6e9bf6..ed89e616c930 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -1126,10 +1126,17 @@ statement that unwraps the result before the assertion and you will see a helpfu Diesel, however, offers an easier way to do to parallel integration tests by leaning on a test transaction. Conventionally, a transaction follows a the two [phase commit protocol](https://en.wikipedia.org/wiki/Two-phase_commit_protocol). In stage one, a commit request is send to the database to take the necessary steps for either committing or aborting the transaction and test if a commit would succeed. In stage two, the transaction is committed or aborted depending on whether any conflicts were detected in stage one. -A test transaction initiates the first stage, prepares everything ready to commit, and then just aborts the transaction at the end of a test. This is as realistic as a real transaction, except nothing is commited to the database and therefore all test transactions can run in parallel. +A test transaction initiates the first stage, prepares everything ready to commit, and then just aborts the transaction at the end of a test. Specifically, you use a dangling test transaction. Diesel uses that extensively for it's own test-suite. +What that means is that you perform the following steps: + +* Open a new connection per test +* Directly start a new transaction (Diesel provides the Connection::begin_test_transaction function for this) +* Perform any test code (including schema setup for postgres) +* Rollback the transaction -The test folder contains an example of this testing style in the parallel_service_test file. +With that you end up with each test does not see the changes from any other tests. +The test folder contains an example of this testing style in the parallel_service_test file. ```rust #[test] @@ -1161,7 +1168,7 @@ a separate transaction, you have to apply first principle to each test. For exam to update a service, you have to insert a service first, then update it, and then test if the update was successful. Because each test transaction aborts after each test, there is no left over data to worry about after each test. Furthermore, since PostgreSQL executes transactions in isolation, -regardless of whether these are commited or not, no two tests can interfere with each other. +regardless of whether these are committed or not, no two tests can interfere with each other. Lastly, testing this way is very fast when executed in parallel. diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs index 9fa8709b79cf..13ae106a1d08 100644 --- a/examples/postgres/custom_arrays/src/lib.rs +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -60,7 +60,7 @@ pub fn revert_db_migration( conn: &mut Connection, ) -> Result<(), Box> { // Check DB connection! - if let Ok(_) = conn.ping() { + if conn.ping().is_ok() { } else if let Err(e) = conn.ping() { eprint!("[pg_cmdb]: Error connecting to database: {}", e); return Err(Box::new(e)); From 478972da0dc78cc180bd0e4ff034daac5e24461c Mon Sep 17 00:00:00 2001 From: Marvin Hansen Date: Mon, 26 Aug 2024 17:08:56 +0800 Subject: [PATCH 70/73] postgres custom array example: Updated README.md to ensure run_db_migration is the same as in the code. Added revert_db_migration to the Readme as well. Signed-off-by: Marvin Hansen --- examples/postgres/custom_arrays/README.md | 113 ++++++++++++--------- examples/postgres/custom_arrays/src/lib.rs | 1 + 2 files changed, 66 insertions(+), 48 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index ed89e616c930..5049419698e6 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -202,7 +202,8 @@ You usually can return the primary key value after inserting so if you need to k ### External primary key: INTEGER type In case you need to know the specific value of the primary key and you need to know it before inserting the data, you -have to assign unique primary keys before inserting data and you have to use the INTEGER type (or any of the many integer +have to assign unique primary keys before inserting data and you have to use the INTEGER type (or any of the many +integer variations) to convey to Postgres that you set the primary key yourself. Notice, you still have to set the NOT NULL and PRIMARY KEY attribute to ensure data consistency. @@ -327,7 +328,8 @@ pub mod smdb { Notice, the Postgres types are stored in Postgres therefore you are not seeing them in the generated schema. Only tables will show up in the generates schema. Furthermore, you will need a wrapper struct to map from Rust to the Postgres type. -For the ServiceEndpoint, Diesel already generated a matching zero sized SQL type struct with the correct annotations. The service +For the ServiceEndpoint, Diesel already generated a matching zero sized SQL type struct with the correct annotations. +The service table then uses that SQL type `ServiceEndpoint` instead of the native Postgres type to reference the custom type in the endpoints array. @@ -340,7 +342,8 @@ Rust type implementations matching all the generated Postgres types and tables. Lastly, you also want to add a type alias for a postgres database connection in the lib file because that gives you the freedom to swap between a normal single connection or a connection pool without updating your type implementation or tests. -The connection pool type alias has been uncommented, but the type signature of the pooled connection makes it obvious why +The connection pool type alias has been uncommented, but the type signature of the pooled connection makes it obvious +why a type alias is a good idea. At this point, your lib file looks as shown below: @@ -375,7 +378,8 @@ Diesel needs a zero sized SQL type struct to represent a custom Enum in Postgres pub struct PgProtocolType; ``` -It is important to add the database schema ("smdb") and type name ("protocol_type") otherwise Postgres fails to find the type and aborts an +It is important to add the database schema ("smdb") and type name ("protocol_type") otherwise Postgres fails to find the +type and aborts an operation on that type. Next, let’s define the actual Enum. Bear in mind to use the SQL type struct as sql_type on the Enum: @@ -542,7 +546,8 @@ relatively easy to fix. It’s worth repeating that you are dealing with three types in total: 1) service_endpoint: The Postgres custom type -2) ServiceEndpoint: The SQL type struct generated by Diesel that refers to the Postgres type service_endpoint. Also, only +2) ServiceEndpoint: The SQL type struct generated by Diesel that refers to the Postgres type service_endpoint. Also, + only the SQL type struct carries the postgres_type annotation. 3) Endpoint: The Rust struct with an sql_type annotation referring to the SQL type struct ServiceEndpoint @@ -1027,55 +1032,61 @@ pub fn run_db_migration( return Err(Box::new(e)); } } - - // Check if DB has pending migrations - let has_pending = match conn.has_pending_migration(MIGRATIONS) { - Ok(has_pending) => has_pending, + + // Run all pending migrations. + match conn.run_pending_migrations(MIGRATIONS) { + Ok(_) => Ok(()), Err(e) => { - eprint!( - "[run_db_migration]: Error checking for pending database migrations: {}", - e - ); - return Err(e); - } - }; - - // If so, run all pending migrations. - if has_pending { - match conn.run_pending_migrations(MIGRATIONS) { - Ok(_) => Ok(()), - Err(e) => { - eprint!("[run_db_migration]: Error migrating database: {}", e); - Err(e) - } + eprint!("[run_db_migration]: Error migrating database: {}", e); + Err(e) } - } else { - // Nothing pending, just return - Ok(()) } } ``` -The rational to check for all potential errors is twofold. For once, because the database connection is given as a -parameter, -you just don't know if the component that created the connection has checked the connection, therefore you better check -it to catch a dead connection. -Second, even if you have a correct database connection, this does not guarantee that the migration will succeed. -There might be some types left from a previously aborted drop operations or -a random error might happened at any stage of the migration, therefore you have to handle the error where it occurs. -Also, because you run the db migration during application startup or before testing, -ensure you have clear error messages to speed up diagnostic and debugging. +A few key points worth highlighting here: + +* The connection check is done to ensure that the DB connection is valid. +* The `run_pending_migrations` methods checks if there are any pending migrations, and if so, runs them. + +Likewise, we add a function `revert_db_migration` to revert all Diesel migrations +as defined in the `down.sql `file. If you don't have a `down.sql` file, +you can skip this step. +However, adding a proper `down.sql` for the revert migration function +is particularly useful for testing purposes, as discussed in the next section. + +```rust +pub fn revert_db_migration( + conn: &mut Connection, +) -> Result<(), Box> { + // Check DB connection! + if conn.ping().is_ok() { + } else if let Err(e) = conn.ping() { + eprint!("[pg_cmdb]: Error connecting to database: {}", e); + return Err(Box::new(e)); + } + + // Revert all pending migrations + match conn.revert_all_migrations(MIGRATIONS) { + Ok(_) => Ok(()), + Err(e) => { + eprint!("[pg_cmdb]: Error reverting database migrations: {}", e); + Err(e) + } + } +} +``` ### DB Integration Tests -Conventionally, Database integration tests become flaky when executed in parallel usually because of conflicting read / write operations. +Conventionally, Database integration tests become flaky when executed in parallel usually +because of conflicting read / write operations. While modern database systems can handle concurrent data access, test tools with assertions not so much. That means, test assertions start to fail seemingly randomly when executed concurrently. There are only very few viable options to deal with this reality: * Don't use parallel test execution -* Form isolated test groups that run in parallel, but tests in each group run sequentially. - +* Form isolated test groups that run in parallel, but tests in each group run sequentially. In practice, the second option is often used by grouping tables into isolated test groups. Suppose your database has 25 tables you are tasked to test. @@ -1125,8 +1136,15 @@ To get a more meaningful error message, just uncomment the dbg! statement that unwraps the result before the assertion and you will see a helpful error message in most cases. Diesel, however, offers an easier way to do to parallel integration tests by leaning on a test transaction. -Conventionally, a transaction follows a the two [phase commit protocol](https://en.wikipedia.org/wiki/Two-phase_commit_protocol). In stage one, a commit request is send to the database to take the necessary steps for either committing or aborting the transaction and test if a commit would succeed. In stage two, the transaction is committed or aborted depending on whether any conflicts were detected in stage one. -A test transaction initiates the first stage, prepares everything ready to commit, and then just aborts the transaction at the end of a test. Specifically, you use a dangling test transaction. Diesel uses that extensively for it's own test-suite. +Conventionally, a transaction follows a the +[two phase commit protocol](https://en.wikipedia.org/wiki/Two-phase_commit_protocol). +In stage one, a commit request is send to the database to take the necessary steps for either committing +or aborting the transaction and test if a commit would succeed. +In stage two, the transaction is then committed or aborted depending on whether any conflicts were detected in stage +one. + +A test transaction initiates the first stage, prepares everything ready to commit, and then +just aborts the transaction at the end of a test. Diesel uses this technique extensively for it's own test-suite. What that means is that you perform the following steps: * Open a new connection per test @@ -1134,7 +1152,6 @@ What that means is that you perform the following steps: * Perform any test code (including schema setup for postgres) * Rollback the transaction - With that you end up with each test does not see the changes from any other tests. The test folder contains an example of this testing style in the parallel_service_test file. @@ -1162,13 +1179,13 @@ fn test_create_service() { ``` Here, the test setup runs first to test if the database connection works and all migrations are in place. -Also, noticed, each test gets its own database connection. After the connection has been established, -the test transaction is started next. From there, the test code is executed. Because each test runs in +Also, noticed, each test gets its own database connection. After the connection has been established, +the test transaction is started next. From there, the test code is executed. Because each test runs in a separate transaction, you have to apply first principle to each test. For example, if you want to test -to update a service, you have to insert a service first, then update it, and then +to update a service, you have to insert a service first, then update it, and then test if the update was successful. Because each test transaction aborts after each test, there is no left over -data to worry about after each test. Furthermore, since PostgreSQL executes transactions in isolation, -regardless of whether these are committed or not, no two tests can interfere with each other. +data to worry about after each test. Furthermore, since PostgreSQL executes transactions in isolation, +regardless of whether these are committed or not, no two tests can interfere with each other. Lastly, testing this way is very fast when executed in parallel. diff --git a/examples/postgres/custom_arrays/src/lib.rs b/examples/postgres/custom_arrays/src/lib.rs index 13ae106a1d08..963580295087 100644 --- a/examples/postgres/custom_arrays/src/lib.rs +++ b/examples/postgres/custom_arrays/src/lib.rs @@ -35,6 +35,7 @@ pub fn run_db_migration( return Err(Box::new(e)); } } + // Run all pending migrations. match conn.run_pending_migrations(MIGRATIONS) { Ok(_) => Ok(()), From 973e4b0b774102d5d58a6f1ca82d05d76d0b440c Mon Sep 17 00:00:00 2001 From: Georg Semmler Date: Fri, 30 Aug 2024 10:43:57 +0200 Subject: [PATCH 71/73] Some minor cleanup and some minor additions to the text --- examples/postgres/custom_arrays/README.md | 81 +++++----- .../src/model/endpoint_type/mod.rs | 10 +- .../tests/parallel_service_tests.rs | 148 ++++++------------ .../tests/single_service_test_run.rs | 75 ++++----- 4 files changed, 122 insertions(+), 192 deletions(-) diff --git a/examples/postgres/custom_arrays/README.md b/examples/postgres/custom_arrays/README.md index 5049419698e6..bdf299f837bc 100644 --- a/examples/postgres/custom_arrays/README.md +++ b/examples/postgres/custom_arrays/README.md @@ -379,8 +379,7 @@ pub struct PgProtocolType; ``` It is important to add the database schema ("smdb") and type name ("protocol_type") otherwise Postgres fails to find the -type and aborts an -operation on that type. +type and aborts an operation on that type. Next, let’s define the actual Enum. Bear in mind to use the SQL type struct as sql_type on the Enum: @@ -395,9 +394,15 @@ pub enum ProtocolType { } ``` +It's important to have a +[`#[derive(FromSqlRow)]`](https://docs.diesel.rs/master/diesel/deserialize/derive.FromSqlRow.html) +for loading values of this enum from the database and it's important to have a +[`#[derive(AsExpression)]`](https://docs.diesel.rs/master/diesel/expression/derive.AsExpression.html) +to send values of this enum to the database. + It’s worth pointing out that you are dealing with three types in total: -1) protocol_type: The Postgres Enum type +1) protocol_type: The Postgres Enum type, defined in your SQL migration 2) PgProtocolType: The SQL type struct that refers to the Postgres Enum type 3) ProtocolType: The Rust Enum that maps to the SQL type struct PgProtocolType @@ -408,13 +413,13 @@ message, make sure to check: 1) Do I have a SQL type struct? 2) Does my a SQL type struct derive SqlType? 3) Does my SQL type has a `#[diesel(postgres_type(_)]` attribute declared? -4) Is the `#[diesel(postgres_type(_)]` attribute referring to the correct Postgres type? -5) Does my Rust type refers to the SQL type struct? +4) Is the `#[diesel(sql_type = _)]` attribute referring to the correct Postgres type? If all those checks pass and you still see errors, it’s most likely a serialization error. -To serialize and deserialize a custom Enum, you write a custom ToSql and FromSql implementation. +To serialize and deserialize a custom Enum, you write a custom `ToSql` and `FromSql` implementation. -`ToSql` describes how to serialize a given rust type (Self) as sql side type (first generic argument) for a specific +[`ToSql`](https://docs.diesel.rs/master/diesel/serialize/trait.ToSql.html) +describes how to serialize a given rust type (Self) as sql side type (first generic argument) for a specific database backend (second generic argument). It needs to translate the type into the relevant wire protocol in that case. For postgres/mysql enums that's just the enum value as ascii string, @@ -435,7 +440,8 @@ impl ToSql for ProtocolType { } ``` -`FromSql` describes how to deserialize a given rust type (Self) as sql side type (first generic argument) for a specific +[`FromSql`](https://docs.diesel.rs/master/diesel/deserialize/trait.FromSql.html) +describes how to deserialize a given rust type (Self) as sql side type (first generic argument) for a specific database backend (second generic argument). It need to translate from the relevant wire protocol to the rust type. For postgres/mysql enums that just means matching on the as ascii string, @@ -462,11 +468,20 @@ impl FromSql for ProtocolType { } ``` -In the from_sql, it is important to add error handling for unknown Enum variants +In the `FromSql`, it is important to add error handling for unknown Enum variants to catch errors that may result from incorrect database updates. ### Rust Struct Endpoint +Conceptually Diesel supports custom record types the same way as custom enums (and any other custom type mapping). +You need to handle the same three types: + +* The `service_endpoint` type in your SQL migration +* The `ServiceEndpoint` type in your schema.rs file, representing the SQL side type +* The `Endpoint` type holding the rust side values. + +The details of the implementation differ slightly due to differences how these types are represented by postgresql. + ```rust #[derive(Debug, Clone, FromSqlRow, AsExpression, PartialEq, Eq)] #[diesel(sql_type = crate::schema::smdb::sql_types::ServiceEndpoint)] @@ -479,27 +494,27 @@ pub struct Endpoint { } ``` -It is worth mentioning is that the sql_type refers to the sql side type generated by Diesel and stored in the schema.rs +It is worth mentioning is that the `#[diesel(sql_type = _)]` refers to the sql side type generated by Diesel and stored in the schema.rs file. Keep this in mind if you ever refactor the schema.rs file into a different folder because the macro annotation checks the path during compilation and throws an error if it cannot find the type in the provided path. -The serialize and deserialize implementation of a custom type is not as obvious as the Enum because, internally, +The `FromSql` and `ToSql` implementation of a custom record type is not as obvious as the Enum because, internally, Postgres represent a custom type as an anonymous typed tuple. Therefore, you have to map a struct to a typed tuple and back. Let’s start with the ToSql implementation to store the Rust Endpoint struct as typed tuple. -Luckily, Diesel provides a helper util to do just that. +Luckily, Diesel provides a [helper util](https://docs.diesel.rs/master/diesel/serialize/trait.WriteTuple.html) to do just that. ```rust impl ToSql for Endpoint { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( &( - self.name.to_owned(), - self.version.to_owned(), - self.base_uri.to_owned(), - self.port.to_owned(), - self.protocol.to_owned(), + &self.name, + &self.version, + &self.base_uri, + &self.port, + &self.protocol, ), &mut out.reborrow(), ) @@ -514,9 +529,8 @@ implementation in another pane, both side by side, to double check that the number and types match. If the type or number of types mismatch, you will get a compiler error telling you that somehow either the number of fields don’t match or that the type of the fields don’t match. -Also, because the write_tuple expects values, you have to call either to_owned() or clone() on any referenced data. -The FromSql reverses the process by converting a Postgres typed tuple back into a Rust struct. +The `FromSql` reverses the process by converting a Postgres typed tuple back into a Rust struct. Again, Diesel provides a convenient helper to do so: ```rust @@ -543,16 +557,6 @@ Debugging serialization issues in Diesel is relatively straight forward since the compiler usually points out the location of the issue and, often, the issue is a type mismatch that is relatively easy to fix. -It’s worth repeating that you are dealing with three types in total: - -1) service_endpoint: The Postgres custom type -2) ServiceEndpoint: The SQL type struct generated by Diesel that refers to the Postgres type service_endpoint. Also, - only - the SQL type struct carries the postgres_type annotation. -3) Endpoint: The Rust struct with an sql_type annotation referring to the SQL type struct ServiceEndpoint - -Make sure all of those types match correctly. - ### Rust Struct Service The service struct gets its serialization and deserialization implementation generated by Diesel so that saves some @@ -877,7 +881,7 @@ Here, it is a matter of taste to return a Boolean flag with an optional string o service as a custom error. The rational for the Boolean flag is that any service might be offline for any reason so that doesn’t count as a database error whereas if the query would fail, that would amount to a database error. That said, the decision depends largely on the design requirements and if you already have custom errors defined, then adding a -ServiceDependencyOfflineError variant should be straight forward. +`ServiceDependencyOfflineError` variant should be straight forward. At this point, the Service type implementation is complete. @@ -909,7 +913,7 @@ That said, the DSL gets you a long way before you are writing custom SQL. The last one, type annotation on database return types, isn’t that obvious because Rust can infer the database return type from the method return type, so why bother? Why would you write get_result::(db) instead of just get_result(db) you may wonder? Here is the big secret, type inference in Rust needs occasionally a hint. When you -annotated the database return type and then wants to daisy chain another operation via map or flatmap, without the type +annotated the database return type and then wants to daisy chain another operation via `map` or `flat_map`, without the type annotation you get a compile error. If you have the type annotation in place, you can add more operations whenever you want and things compile right away. To illustrate the last point, the count operator in Postgres only returns an i64 integer. Suppose your application @@ -936,8 +940,8 @@ we have to update the return type of the method as well. ``` Run the compile and see that it works. Now, if you were to remove the type annotation from get_result, -you get a compile error saying that trait FromSql is not implemented. -The type annotation is required because there can be more than one FromSql impl for the same rust type and there can be +you get a compile error saying that trait `FromSql` is not implemented. +The type annotation is required because there can be more than one `FromSql` impl for the same rust type and there can be more than one FromSql type for the same sql type. That gives a lot flexibility how you actually map data between your database and your rust code, but it requires explicit type annotation. @@ -965,7 +969,7 @@ that expected consistent database usage. **Important** -Using database connection pool in Diesel requires you to enable the `r2d2` feature in your cargo.toml file. +Using database connection pool in Diesel requires you to enable the `r2d2` feature in your Cargo.toml file. In Diesel, you can handle connections either way, but the only noticeable difference is the actual connection type used as type parameter. For that reason, a type alias for the DB connection was declared in the lib.rs file because @@ -983,16 +987,17 @@ use dotenvy::dotenv; fn postgres_connection() -> PgConnection { dotenv().ok(); - let database_url = env::var("POSTGRES_DATABASE_URL") - .expect("POSTGRES_DATABASE_URL must be set"); + let database_url = env::var("PG_DATABASE_URL") + .or_else(|_| env::var("DATABASE_URL")) + .expect("PG_DATABASE_URL must be set"); PgConnection::establish(&database_url) .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) } ``` -Here we use the dotenv crate to test if there is an environment variable of either DATABASE_URL or POSTGRES_DATABASE_URL -and if so, parse the string and use it to establish a connection. Therefore, make sure the POSTGRES_DATABASE_URL is set +Here we use the dotenvy crate to test if there is an environment variable of either DATABASE_URL or PG_DATABASE_URL +and if so, parse the string and use it to establish a connection. Therefore, make sure the PG_DATABASE_URL is set correctly. ### DB Migration Util diff --git a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs index 6374b9bc6b8e..ffe14c61522e 100644 --- a/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs +++ b/examples/postgres/custom_arrays/src/model/endpoint_type/mod.rs @@ -38,11 +38,11 @@ impl ToSql for Endpoint { fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { serialize::WriteTuple::<(Text, Integer, Text, Integer, PgProtocolType)>::write_tuple( &( - self.name.to_owned(), - self.version.to_owned(), - self.base_uri.to_owned(), - self.port.to_owned(), - self.protocol.to_owned(), + &self.name, + &self.version, + &self.base_uri, + &self.port, + &self.protocol, ), &mut out.reborrow(), ) diff --git a/examples/postgres/custom_arrays/tests/parallel_service_tests.rs b/examples/postgres/custom_arrays/tests/parallel_service_tests.rs index 104fb5b35986..6c6e2499dd9b 100644 --- a/examples/postgres/custom_arrays/tests/parallel_service_tests.rs +++ b/examples/postgres/custom_arrays/tests/parallel_service_tests.rs @@ -47,26 +47,29 @@ fn start_or_reuse_postgres_docker_container() { fn run_db_migration(conn: &mut Connection) { println!("run_db_migration"); let res = custom_arrays::run_db_migration(conn); - //dbg!(&result); - assert!(res.is_ok()); + assert!(res.is_ok(), "{:?}", res.unwrap_err()); } fn revert_db_migration(conn: &mut Connection) { println!("revert_db_migration"); let res = custom_arrays::revert_db_migration(conn); - //dbg!(&result); - assert!(res.is_ok()); + assert!(res.is_ok(), "{:?}", res.unwrap_err()); } fn postgres_connection() -> PgConnection { println!("postgres_connection"); dotenv().ok(); - let database_url = - env::var("POSTGRES_DATABASE_URL").expect("POSTGRES_DATABASE_URL must be set"); + let database_url = env::var("PG_DATABASE_URL") + .or_else(|_| env::var("DATABASE_URL")) + .expect("PG_DATABASE_URL must be set"); - PgConnection::establish(&database_url) - .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) + let mut conn = PgConnection::establish(&database_url) + .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)); + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + + conn } #[allow(dead_code)] @@ -105,8 +108,6 @@ fn test_create_service() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let endpoints = get_endpoints(); @@ -114,8 +115,7 @@ fn test_create_service() { let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let service = result.unwrap(); @@ -136,22 +136,17 @@ fn test_count_service() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let result = service::Service::count(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap(), 0); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::count(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap(), 1); } @@ -161,17 +156,13 @@ fn test_check_if_service_id_exists() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::check_if_service_id_exists(conn, 1); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(result.unwrap()); } @@ -181,18 +172,14 @@ fn test_check_if_service_id_online() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); // Test if online let result = service::Service::check_if_service_id_online(conn, 1); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(result.unwrap()); } @@ -202,17 +189,13 @@ fn test_get_all_online_services() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::get_all_online_services(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(!result.unwrap().is_empty()); } @@ -222,17 +205,13 @@ fn test_get_all_offline_services() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::get_all_offline_services(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap().len(), 0); } @@ -242,19 +221,15 @@ fn test_get_all_service_dependencies() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let service_id = 1; let result = service::Service::get_all_service_dependencies(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap().len(), 1); } @@ -264,19 +239,15 @@ fn test_get_all_service_endpoints() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let service_id = 1; let result = service::Service::get_all_service_endpoints(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap().len(), 2); } @@ -286,19 +257,15 @@ fn test_service_read() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let service_id = 1; let result = service::Service::read(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let service = result.unwrap(); @@ -316,17 +283,13 @@ fn test_service_read() { fn test_service_read_all() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::read_all(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let services = result.unwrap(); assert!(!services.is_empty()); @@ -336,23 +299,18 @@ fn test_service_read_all() { fn test_set_service_online() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let service_id = 1; let result = service::Service::set_service_online(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::check_if_service_id_online(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(result.unwrap()); } @@ -360,23 +318,18 @@ fn test_set_service_online() { fn test_set_service_offline() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let service_id = 1; let result = service::Service::set_service_offline(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::check_if_service_id_online(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(!result.unwrap()); } @@ -384,18 +337,14 @@ fn test_set_service_offline() { fn test_service_update() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); // check if service_id exists so we can update the service let result = service::Service::check_if_service_id_exists(conn, 1); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(result.unwrap()); let update = UpdateService::new( @@ -410,8 +359,7 @@ fn test_service_update() { ); let result = service::Service::update(conn, 1, &update); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let service = result.unwrap(); @@ -430,33 +378,27 @@ fn test_service_update() { fn test_service_delete() { let mut connection = postgres_connection(); let conn = &mut connection; - conn.begin_test_transaction() - .expect("Failed to begin test transaction"); // Insert the service let service = get_crate_service(); let result = service::Service::create(conn, &service); - // dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); // Check if its there let result = service::Service::read(conn, 1); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); // Delete service let result = service::Service::delete(conn, 1); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); // Check its gone let result = service::Service::read(conn, 1); //dbg!(&result); - assert!(result.is_err()); + assert!(result.is_err(), "{:?}", result.unwrap_err()); let result = service::Service::count(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap(), 0); } diff --git a/examples/postgres/custom_arrays/tests/single_service_test_run.rs b/examples/postgres/custom_arrays/tests/single_service_test_run.rs index 6f5c1eff32e2..31028eca7ec6 100644 --- a/examples/postgres/custom_arrays/tests/single_service_test_run.rs +++ b/examples/postgres/custom_arrays/tests/single_service_test_run.rs @@ -10,24 +10,26 @@ use std::env; fn postgres_connection() -> PgConnection { dotenv().ok(); - let database_url = - env::var("POSTGRES_DATABASE_URL").expect("POSTGRES_DATABASE_URL must be set"); - PgConnection::establish(&database_url) - .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)) + let database_url = env::var("PG_DATABASE_URL") + .or_else(|_| env::var("DATABASE_URL")) + .expect("PG_DATABASE_URL must be set"); + let mut conn = PgConnection::establish(&database_url) + .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)); + conn.begin_test_transaction() + .expect("Failed to begin test transaction"); + conn } fn run_db_migration(conn: &mut Connection) { println!("run_db_migration"); let res = custom_arrays::run_db_migration(conn); - //dbg!(&result); - assert!(res.is_ok()); + assert!(res.is_ok(), "{:?}", res.unwrap_err()); } fn revert_db_migration(conn: &mut Connection) { println!("revert_db_migration"); let res = custom_arrays::revert_db_migration(conn); - //dbg!(&result); - assert!(res.is_ok()); + assert!(res.is_ok(), "{:?}", res.unwrap_err()); } // #[test] // Uncomment to run. Make sure to comment out the other tests first. @@ -143,40 +145,35 @@ fn test_create_service(conn: &mut Connection) { #[allow(dead_code)] fn test_count_service(conn: &mut Connection) { let result = service::Service::count(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap(), 1); } #[allow(dead_code)] fn test_check_if_service_id_exists(conn: &mut Connection) { let result = service::Service::check_if_service_id_exists(conn, 1); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(result.unwrap()); } #[allow(dead_code)] fn test_check_if_service_id_online(conn: &mut Connection) { let result = service::Service::check_if_service_id_online(conn, 1); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(result.unwrap()); } #[allow(dead_code)] fn test_get_all_online_services(conn: &mut Connection) { let result = service::Service::get_all_online_services(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(!result.unwrap().is_empty()); } #[allow(dead_code)] fn test_get_all_offline_services(conn: &mut Connection) { let result = service::Service::get_all_offline_services(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap().len(), 0); } @@ -185,8 +182,7 @@ fn test_get_all_service_dependencies(conn: &mut Connection) { let service_id = 1; let result = service::Service::get_all_service_dependencies(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap().len(), 1); } @@ -195,8 +191,7 @@ fn test_get_all_service_endpoints(conn: &mut Connection) { let service_id = 1; let result = service::Service::get_all_service_endpoints(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap().len(), 2); } @@ -205,8 +200,7 @@ fn test_service_read(conn: &mut Connection) { let service_id = 1; let result = service::Service::read(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let service = result.unwrap(); @@ -223,8 +217,7 @@ fn test_service_read(conn: &mut Connection) { #[allow(dead_code)] fn test_service_read_all(conn: &mut Connection) { let result = service::Service::read_all(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let services = result.unwrap(); assert!(!services.is_empty()); @@ -235,12 +228,10 @@ fn test_set_service_online(conn: &mut Connection) { let service_id = 1; let result = service::Service::set_service_online(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::check_if_service_id_online(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(result.unwrap()); } @@ -249,12 +240,10 @@ fn test_set_service_offline(conn: &mut Connection) { let service_id = 1; let result = service::Service::set_service_offline(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::check_if_service_id_online(conn, service_id); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(!result.unwrap()); } @@ -262,8 +251,7 @@ fn test_set_service_offline(conn: &mut Connection) { fn test_service_update(conn: &mut Connection) { // check if service_id exists so we can update the service let result = service::Service::check_if_service_id_exists(conn, 1); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert!(result.unwrap()); let update = UpdateService::new( @@ -278,8 +266,7 @@ fn test_service_update(conn: &mut Connection) { ); let result = service::Service::update(conn, 1, &update); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let service = result.unwrap(); @@ -297,19 +284,15 @@ fn test_service_update(conn: &mut Connection) { #[allow(dead_code)] fn test_service_delete(conn: &mut Connection) { let result = service::Service::read(conn, 1); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::delete(conn, 1); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); let result = service::Service::read(conn, 1); - //dbg!(&result); - assert!(result.is_err()); + assert!(result.is_err(), "{:?}", result.unwrap()); let result = service::Service::count(conn); - //dbg!(&result); - assert!(result.is_ok()); + assert!(result.is_ok(), "{:?}", result.unwrap_err()); assert_eq!(result.unwrap(), 0); } From d9b0677ee3ad0b3ceb6aa58e002e93c249ee23ba Mon Sep 17 00:00:00 2001 From: Georg Semmler Date: Fri, 30 Aug 2024 10:44:11 +0200 Subject: [PATCH 72/73] Drive by fix of an non-deterministic test --- diesel_tests/tests/joins.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/diesel_tests/tests/joins.rs b/diesel_tests/tests/joins.rs index ac299e7bc33a..68dceaba8fc0 100644 --- a/diesel_tests/tests/joins.rs +++ b/diesel_tests/tests/joins.rs @@ -438,12 +438,14 @@ fn join_with_explicit_on_clause() { let data = users::table .inner_join(posts::table.on(posts::title.eq("Post One"))) + .order(users::id) .load(connection); assert_eq!(expected_data, data); let data = users::table .inner_join(posts::table.on(posts::title.eq_any(vec!["Post One"]))) + .order(users::id) .load(connection); assert_eq!(expected_data, data); From 405f29b468aeba56bffb77681df66729541d7f21 Mon Sep 17 00:00:00 2001 From: Georg Semmler Date: Fri, 30 Aug 2024 14:05:03 +0200 Subject: [PATCH 73/73] Cleanup the parallel testsetup --- .../tests/parallel_service_tests.rs | 137 ++---------------- 1 file changed, 16 insertions(+), 121 deletions(-) diff --git a/examples/postgres/custom_arrays/tests/parallel_service_tests.rs b/examples/postgres/custom_arrays/tests/parallel_service_tests.rs index 6c6e2499dd9b..42575e57f11f 100644 --- a/examples/postgres/custom_arrays/tests/parallel_service_tests.rs +++ b/examples/postgres/custom_arrays/tests/parallel_service_tests.rs @@ -6,43 +6,6 @@ use custom_arrays::Connection; use diesel::{Connection as DieselConnection, PgConnection}; use dotenvy::dotenv; use std::env; -use std::process::Command; -use std::time::Duration; - -#[allow(dead_code)] -fn start_or_reuse_postgres_docker_container() { - // check if a container with name postgres-5432 is already running. - // If so, re-use that container and just return. - let output = Command::new("docker") - .arg("ps") - .arg(format!("--filter=name={}", "postgres-5432")) - .arg("--format={{.Names}}") - .output() - .expect("failed to check for running postgres container"); - if !output.stdout.is_empty() { - return; - } - - // If there is no container running, start one. - // Example: docker run --name postgres-5432 -p 5432:5432 -e POSTGRES_PASSWORD=postgres -d postgres:16.3-bookworm - println!("start_postgres_docker_container"); - Command::new("docker") - .args([ - "run", - "--name", - "postgres-5432", - "-p", - "5432:5432", - "-e", - "POSTGRES_PASSWORD=postgres", - "-d", - "postgres:16.3-bookworm", - ]) - .output() - .expect("failed to start postgres container"); - // Wait for the container to start - std::thread::sleep(Duration::from_secs(5)); -} fn run_db_migration(conn: &mut Connection) { println!("run_db_migration"); @@ -50,12 +13,6 @@ fn run_db_migration(conn: &mut Connection) { assert!(res.is_ok(), "{:?}", res.unwrap_err()); } -fn revert_db_migration(conn: &mut Connection) { - println!("revert_db_migration"); - let res = custom_arrays::revert_db_migration(conn); - assert!(res.is_ok(), "{:?}", res.unwrap_err()); -} - fn postgres_connection() -> PgConnection { println!("postgres_connection"); dotenv().ok(); @@ -68,52 +25,19 @@ fn postgres_connection() -> PgConnection { .unwrap_or_else(|_| panic!("Error connecting to {}", database_url)); conn.begin_test_transaction() .expect("Failed to begin test transaction"); - + run_db_migration(&mut conn); conn } -#[allow(dead_code)] -fn test_teardown() { - println!("test_teardown"); - - // Optional. In some environments, you may have to tidy up everything after testing. - let mut connection = postgres_connection(); - let conn = &mut connection; - - println!("Revert pending DB migration"); - revert_db_migration(conn); -} - -// #[test] // You can test the test setup standalone i.e. for debugging. -// Run this setup first in every test. -fn test_setup() { - println!("test_setup"); - - // Optional: May start a Docker container first if you want fully self contained testing - // If there is a container already running, the util will re-use it. - // println!("Start or reuse postgres container"); - // start_or_reuse_postgres_docker_container(); - - println!("Connect to database"); - let mut connection = postgres_connection(); - let conn = &mut connection; - - println!("Run pending DB migration"); - run_db_migration(conn); -} - #[test] fn test_create_service() { - test_setup(); - - let mut connection = postgres_connection(); - let conn = &mut connection; + let connection = &mut postgres_connection(); let service = get_crate_service(); let endpoints = get_endpoints(); let dependencies = get_dependencies(); - let result = service::Service::create(conn, &service); + let result = service::Service::create(connection, &service); assert!(result.is_ok(), "{:?}", result.unwrap_err()); @@ -132,10 +56,7 @@ fn test_create_service() { #[test] fn test_count_service() { - test_setup(); - - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let result = service::Service::count(conn); assert!(result.is_ok(), "{:?}", result.unwrap_err()); @@ -152,10 +73,7 @@ fn test_count_service() { #[test] fn test_check_if_service_id_exists() { - test_setup(); - - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -168,10 +86,7 @@ fn test_check_if_service_id_exists() { #[test] fn test_check_if_service_id_online() { - test_setup(); - - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -185,10 +100,7 @@ fn test_check_if_service_id_online() { #[test] fn test_get_all_online_services() { - test_setup(); - - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -201,10 +113,7 @@ fn test_get_all_online_services() { #[test] fn test_get_all_offline_services() { - test_setup(); - - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -217,10 +126,7 @@ fn test_get_all_offline_services() { #[test] fn test_get_all_service_dependencies() { - test_setup(); - - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -235,10 +141,7 @@ fn test_get_all_service_dependencies() { #[test] fn test_get_all_service_endpoints() { - test_setup(); - - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -253,10 +156,7 @@ fn test_get_all_service_endpoints() { #[test] fn test_service_read() { - test_setup(); - - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -281,8 +181,7 @@ fn test_service_read() { #[test] fn test_service_read_all() { - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -297,8 +196,7 @@ fn test_service_read_all() { #[test] fn test_set_service_online() { - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -316,8 +214,7 @@ fn test_set_service_online() { #[test] fn test_set_service_offline() { - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -335,8 +232,7 @@ fn test_set_service_offline() { #[test] fn test_service_update() { - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); let service = get_crate_service(); let result = service::Service::create(conn, &service); @@ -376,8 +272,7 @@ fn test_service_update() { #[test] fn test_service_delete() { - let mut connection = postgres_connection(); - let conn = &mut connection; + let conn = &mut postgres_connection(); // Insert the service let service = get_crate_service();