diff --git a/.github/workflows/code-coverage.yml b/.github/workflows/code-coverage.yml index efeac0d9..0f019f69 100644 --- a/.github/workflows/code-coverage.yml +++ b/.github/workflows/code-coverage.yml @@ -1,7 +1,8 @@ -name: Linux CI && Code Coverage +name: Linux CI on: push: + branches: 'development' tags: - 'v[0-9]+.[0-9]+.[0-9]+' - 'v[0-9]+.[0-9]+.[0-9]+rc[0-9]+' @@ -43,6 +44,7 @@ jobs: - name: Run tests run: | + cargo test initialize_sql_server_docker_instance -p tests --all-features --no-fail-fast -- --show-output --nocapture --include-ignored cargo test --all-features --no-fail-fast --target=x86_64-unknown-linux-gnu -- --show-output --test-threads=1 - name: Waking up docker diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml index a1ad7058..ff194b49 100644 --- a/.github/workflows/code-quality.yml +++ b/.github/workflows/code-quality.yml @@ -45,7 +45,7 @@ jobs: strategy: fail-fast: false matrix: - crate: [canyon_connection, canyon_crud, canyon_macros, canyon_observer, canyon_observer, canyon_sql] + crate: [canyon_connection, canyon_crud, canyon_macros, canyon_observer, canyon_sql] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml index 82c4ffb9..83c1861b 100644 --- a/.github/workflows/continuous-integration.yml +++ b/.github/workflows/continuous-integration.yml @@ -2,9 +2,9 @@ name: Continuous Integration on: push: - branches: 'main' + branches: '*' pull_request: - branches: 'main' + branches: '*' env: CARGO_TERM_COLOR: always diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index be13e148..77fc1110 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ -# Contributing to CONTRIBUTING.md +# Contributing to Canyon-SQL -First off, thanks for taking the time to contribute! +First off, thanks for taking the time to contribute! All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. @@ -24,7 +24,6 @@ All types of contributions are encouraged and valued. See the [Table of Contents - [Commit Messages](#commit-messages) - [Join The Project Team](#join-the-project-team) - ## Code of Conduct This project and everyone participating in it is governed by the @@ -32,7 +31,6 @@ This project and everyone participating in it is governed by the By participating, you are expected to uphold this code. Please report unacceptable behavior to <>. - ## I Have a Question > If you want to ask a question, we assume that you have read the available [Documentation](https://github.com/zerodaycode/canyon-book). @@ -47,16 +45,14 @@ If you then still feel the need to ask a question and need clarification, we rec We will then take care of the issue as soon as possible. - - ## I Want To Contribute -> ### Legal Notice +### Legal Notice + > When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. ### Reporting Bugs - #### Before Submitting a Bug Report A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. @@ -72,12 +68,10 @@ A good bug report shouldn't leave others needing to chase you up for more inform - Possibly your input and the output - Can you reliably reproduce the issue? And can you also reproduce it with older versions? - #### How Do I Submit a Good Bug Report? > You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to <>. - We use GitHub issues to track bugs and errors. If you run into an issue with the project: - Open an [Issue](/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) @@ -93,7 +87,7 @@ Once it's filed: ### Suggesting Enhancements -If you want to suggest an enhacement or new feature for the project, please [open a new issue](/issues) describing what you desire to improve, and, potentially, how you plan to contribute to the project. +If you want to suggest an enhancement or new feature for the project, please [open a new issue](/issues) describing what you desire to improve, and, potentially, how you plan to contribute to the project. #### Before Submitting an Enhancement @@ -112,6 +106,6 @@ Enhancement suggestions are tracked as [GitHub issues](/issues). - You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. - **Explain why this enhancement would be useful** to most CONTRIBUTING.md users. You may also want to point out the other projects that solved it better and which could serve as inspiration. - ## Attribution + This guide is based on the **contributing.md**. [Make your own](https://contributing.md/)! diff --git a/README.md b/README.md index 6d181874..c62a762d 100755 --- a/README.md +++ b/README.md @@ -2,8 +2,9 @@ **A full written in `Rust` ORM for multiple databases.** -- [![Linux CI](https://github.com/zerodaycode/Canyon-SQL/actions/workflows/code-coverage.yml/badge.svg)](https://github.com/zerodaycode/Canyon-SQL/actions/workflows/code-coverage.yml) +- ![crates.io](https://img.shields.io/crates/v/canyon_sql.svg) - [![Code Coverage Measure](https://zerodaycode.github.io/Canyon-SQL/badges/flat.svg)](https://zerodaycode.github.io/Canyon-SQL) +- [![Linux CI](https://github.com/zerodaycode/Canyon-SQL/actions/workflows/code-coverage.yml/badge.svg)](https://github.com/zerodaycode/Canyon-SQL/actions/workflows/code-coverage.yml) - [![Tests on macOS](https://github.com/zerodaycode/Canyon-SQL/actions/workflows/macos-tests.yml/badge.svg)](https://github.com/zerodaycode/Canyon-SQL/actions/workflows/macos-tests.yml) - [![Tests on Windows](https://github.com/zerodaycode/Canyon-SQL/actions/workflows/windows-tests.yml/badge.svg)](https://github.com/zerodaycode/Canyon-SQL/actions/workflows/windows-tests.yml) @@ -27,8 +28,8 @@ You can read it [by clicking this link](https://zerodaycode.github.io/canyon-boo - Use of multiple datasources. You can query multiple databases at the same time, even different ones!. This means that you will be able to query concurrently a `PostgreSQL` database and an `SqlServer` one in the same project. - Is macro based. With a few annotations and a configuration file, you are ready to write your data access. -- Allows **migrations**. `Canyon-SQL` comes with a *god-mode* that will manage every table on your database for you. You can modify in `Canyon` code your tables internally, altering columns, setting up constraints... -Also, in the future, we have plans to allow you to manipulate the whole server, like creating databases, altering configurations... everything, but in a programatically approach with `Canyon`! +- Allows **migrations**. `Canyon-SQL` comes with a *god-mode* that will manage every table on your database for you. You can modify in `Canyon` code your tables internally, altering columns, setting up constraints... +Also, in the future, we have plans to allow you to manipulate the whole server, like creating databases, altering configurations... everything, but in a programmatically approach with `Canyon`! ## Supported databases @@ -40,3 +41,93 @@ Also, in the future, we have plans to allow you to manipulate the whole server, Every crate listed above is an `async` based crate, in line with the guidelines of the `Canyon-SQL` design. There are plans for include more databases engines. + +## Better by example + +Let's take a look to see how the `Canyon` code looks like! + +### The classical SELECT * FROM {table_name} + +```rust +let find_all_result: Result, Box> = League::find_all().await; + +// Connection doesn't return an error +assert!(find_all_result.is_ok()); +// We retrieved elements from the League table +assert!(!find_all_result.unwrap().is_empty()); +``` + +### Performing a search over the primary key column + +```rust +let find_by_pk_result: Result, Box> = League::find_by_pk(&1).await; + +assert!(find_by_pk_result.as_ref().unwrap().is_some()); + +let some_league = find_by_pk_result.unwrap().unwrap(); +assert_eq!(some_league.id, 1); +assert_eq!(some_league.ext_id, 100695891328981122_i64); +assert_eq!(some_league.slug, "european-masters"); +assert_eq!(some_league.name, "European Masters"); +assert_eq!(some_league.region, "EUROPE"); +assert_eq!( + some_league.image_url, + "http://static.lolesports.com/leagues/EM_Bug_Outline1.png" +); +``` + +Note the leading reference on the `find_by_pk(...)` parameter. This associated function receives an `&dyn QueryParameter<'_>` as argument, not a value. + +### Building more complex queries + +For exemplify the capabilities of `Canyon`, we will use `SelectQueryBuilder`, which implements the `QueryBuilder` trait +for build a more complex where, filteing data and joining tables. + +```rust +let mut select_with_joins = LeagueTournament::select_query(); + select_with_joins + .inner_join("tournament", "league.id", "tournament.league_id") + .left_join("team", "tournament.id", "player.tournament_id") + .r#where(LeagueFieldValue::id(&7), Comp::Gt) + .and(LeagueFieldValue::name(&"KOREA"), Comp::Eq) + .and_values_in(LeagueField::name, &["LCK", "STRANGER THINGS"]); + // NOTE: We don't have in the docker the generated relationships + // with the joins, so for now, we are just going to check that the + // generated SQL by the SelectQueryBuilder is the spected + assert_eq!( + select_with_joins.read_sql(), + "SELECT * FROM league INNER JOIN tournament ON league.id = tournament.league_id LEFT JOIN team ON tournament.id = player.tournament_id WHERE id > $1 AND name = $2 AND name IN ($2, $3) " + ) +``` + +> Note: For now, when you use joins, you will need to create a new model with the columns in both tables (in case that you desire the data in such columns), but just follows the habitual process with the CanyonMapper. +It will try to retrieve the data for every field declared. If you don't declare a field that is in the open clause, in this case (*), that field won't be retrieved. No problem. But if you have fields that aren't map +able with some column in the database, the program will panic. + +## More examples + +If you want to see more examples, you can take a look into the `tests` folder, at the root of this repository. Every available database operation is tested there, so you can use it to find the usage of the described operations in the documentation mentioned above + +## Contributing to CANYON-SQL + +First of all, thanks for take in consideration help us with the project. +You can take a look to our [templated guide]((./CONTRIBUTING.md)). + +But, to summarize: + +- Take a look at the already opened issues, to see if already exists of it's someone already taking care about solving it. Even tho, you can enter to participate and explain your point of view, or even help to accomplish the task +- Make a fork of `Canyon-SQL` +- If you opened an issue, create a branch from the base branch of the repo (that's the default), and point it to your fork +- After complete your changes, open a `PR` to the default branch. Fill the template provided in the best way you're able to do it +- Wait for the approval. In most of cases, a test over the feature will be required before approve your changes + +## What about the tests? + +Typically in `Canyon`, isolated unit tests are written as doc-tests, and the integration ones are under the folder `./tests` + +If you want to run the tests (because this is the first thing that you want to do after fork the repo), a couple of things have to be considered before. + +- You will need Docker installed in the target machine +- If you have Docker, and `Canyon-SQL` cloned of forked, you can run our docker-compose file `(docker/docker-compose.yml)`, which will initialize a `PostgreSQL` database and will put content on it to make the tests able to work. +- Finally, some tests runs against `MSSQL`. We didn't found a nice way of inserting data directly when the Docker wakes up, but instead, we run a very special test located at `tests/crud/mod.rs`, that is named `initialize_sql_server_docker_instance`. When you run this one, initial data will be inserted into the tables that are created when this test run. +(If you know a better way of doing this, please, open a issue to let us know it, and improve this process!) diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs index bfb9bc43..7da2c2ed 100644 --- a/canyon_connection/src/canyon_database_connector.rs +++ b/canyon_connection/src/canyon_database_connector.rs @@ -61,7 +61,7 @@ impl DatabaseConnection { tokio::spawn(async move { if let Err(e) = new_connection.await { - eprintln!("An error occured while trying to connect to the PostgreSQL database: {e}"); + eprintln!("An error occurred while trying to connect to the PostgreSQL database: {e}"); } }); @@ -96,7 +96,7 @@ impl DatabaseConnection { // TcpStream to connect to the server. let tcp = TcpStream::connect(config.get_addr()) .await - .expect("Error instanciating the SqlServer TCP Stream"); + .expect("Error instantiating the SqlServer TCP Stream"); // We'll disable the Nagle algorithm. Buffering is handled // internally with a `Sink`. diff --git a/canyon_connection/src/lib.rs b/canyon_connection/src/lib.rs index f762c985..9a4ebe90 100644 --- a/canyon_connection/src/lib.rs +++ b/canyon_connection/src/lib.rs @@ -40,7 +40,7 @@ lazy_static! { /// in the configuration file. /// /// This avoids Canyon to create a new connection to the database on every query, potentially avoiding bottlenecks -/// derivated from the instanciation of that new conn every time. +/// derivated from the instantiation of that new conn every time. /// /// Note: We noticed with the integration tests that the [`tokio_postgres`] crate (PostgreSQL) is able to work in an async environment /// with a new connection per query without no problem, but the [`tiberius`] crate (MSSQL) sufferes a lot when it has continuous diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs index 48767585..9a00b12c 100644 --- a/canyon_crud/src/bounds.rs +++ b/canyon_crud/src/bounds.rs @@ -45,7 +45,7 @@ where /// and convert it to a tuple struct formed by the column name as an String, /// and the dynamic value of the [`QueryParameter<'_>`] trait object contained /// inside the variant requested, -/// enabling a convertion of that value into something +/// enabling a conversion of that value into something /// that can be part of an SQL query. /// /// @@ -229,7 +229,7 @@ pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { /// This implementation is necessary because of the generic amplitude /// of the arguments of the [`Transaction::query`], that should work with /// a collection of [`QueryParameter<'a>`], in order to allow a workflow -/// that is not dependant of the specific type of the argument that holds +/// that is not dependent of the specific type of the argument that holds /// the query parameters of the database connectors impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { fn into_sql(self) -> ColumnData<'a> { diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index 595ec513..aed59307 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -210,7 +210,7 @@ mod sqlserver_query_launcher { where Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, { - // Re-generate de insert statement to adecuate it to the SQL SERVER syntax to retrieve the PK value(s) after insert + // Re-generate de insert statement to adequate it to the SQL SERVER syntax to retrieve the PK value(s) after insert if stmt.contains("RETURNING") { let c = stmt.clone(); let temp = c diff --git a/canyon_crud/src/query_elements/operators.rs b/canyon_crud/src/query_elements/operators.rs index 00b42154..7a91e7ef 100644 --- a/canyon_crud/src/query_elements/operators.rs +++ b/canyon_crud/src/query_elements/operators.rs @@ -9,9 +9,9 @@ pub enum Comp { Eq, /// Operator "!=" not equals Neq, - /// Operator ">" greather than value + /// Operator ">" greater than value Gt, - /// Operator ">=" greather or equals than value + /// Operator ">=" greater or equals than value GtEq, /// Operator "<" less than value Lt, diff --git a/canyon_crud/src/query_elements/query_builder.rs b/canyon_crud/src/query_elements/query_builder.rs index cfe52f2b..3676d93c 100644 --- a/canyon_crud/src/query_elements/query_builder.rs +++ b/canyon_crud/src/query_elements/query_builder.rs @@ -337,7 +337,7 @@ where /// * `col1` - The left side of the ON operator for the join /// * `col2` - The right side of the ON operator for the join /// - /// > Note: The order on the column paramenters is irrelevant + /// > Note: The order on the column parameters is irrelevant pub fn left_join(&mut self, join_table: &str, col1: &str, col2: &str) -> &mut Self { self._inner .query @@ -353,7 +353,7 @@ where /// * `col1` - The left side of the ON operator for the join /// * `col2` - The right side of the ON operator for the join /// - /// > Note: The order on the column paramenters is irrelevant + /// > Note: The order on the column parameters is irrelevant pub fn inner_join(&mut self, join_table: &str, col1: &str, col2: &str) -> &mut Self { self._inner .query @@ -369,7 +369,7 @@ where /// * `col1` - The left side of the ON operator for the join /// * `col2` - The right side of the ON operator for the join /// - /// > Note: The order on the column paramenters is irrelevant + /// > Note: The order on the column parameters is irrelevant pub fn right_join(&mut self, join_table: &str, col1: &str, col2: &str) -> &mut Self { self._inner .query @@ -385,7 +385,7 @@ where /// * `col1` - The left side of the ON operator for the join /// * `col2` - The right side of the ON operator for the join /// - /// > Note: The order on the column paramenters is irrelevant + /// > Note: The order on the column parameters is irrelevant pub fn full_join(&mut self, join_table: &str, col1: &str, col2: &str) -> &mut Self { self._inner .query @@ -511,7 +511,7 @@ where ) } - let cap = columns.len() * 50; // Reserving an enought initial capacity per set clause + let cap = columns.len() * 50; // Reserving an enough initial capacity per set clause let mut set_clause = String::with_capacity(cap); set_clause.push_str(" SET "); diff --git a/canyon_crud/src/result.rs b/canyon_crud/src/result.rs index a8e0ba45..1a2cae29 100644 --- a/canyon_crud/src/result.rs +++ b/canyon_crud/src/result.rs @@ -33,7 +33,7 @@ impl DatabaseResult { } /// Returns a [`Vec`] filled with instances of the type T. - /// Z param it's used to constrait the types that can call this method. + /// Z param it's used to constraint the types that can call this method. /// /// Also, provides a way to statically call `Z::deserialize_` method, /// which it's the implementation used by the macros to automatically diff --git a/canyon_macros/src/lib.rs b/canyon_macros/src/lib.rs index 6641da6a..9257fd38 100755 --- a/canyon_macros/src/lib.rs +++ b/canyon_macros/src/lib.rs @@ -279,7 +279,7 @@ pub fn canyon_entity( // Fill the register with the data of the attached struct CANYON_REGISTER_ENTITIES .lock() - .expect("Error adquiring Mutex guard on Canyon Entity macro") + .expect("Error acquiring Mutex guard on Canyon Entity macro") .push(new_entity); // Assemble everything @@ -307,7 +307,7 @@ pub fn crud_operations(input: proc_macro::TokenStream) -> proc_macro::TokenStrea // Construct a representation of Rust code as a syntax tree // that we can manipulate - // Calls the helper struct to build the tokens that generates the final CRUD methos + // Calls the helper struct to build the tokens that generates the final CRUD methods let ast: DeriveInput = syn::parse(input).expect("Error parsing `Canyon Entity for generate the CRUD methods"); let macro_data = MacroTokens::new(&ast); @@ -366,7 +366,7 @@ fn impl_crud_operations_trait_for_struct( let fk_method_signatures = _search_by_fk_tokens.iter().map(|(sign, _)| sign); let fk_method_implementations = _search_by_fk_tokens.iter().map(|(_, m_impl)| m_impl); - // The tokens for generating the methods that enable Canyon to retrive the child entities that are of T type + // The tokens for generating the methods that enable Canyon to retrieve the child entities that are of T type // given a parent entity U: ForeignKeyable, as an associated function for the child type (T) let _search_by_revese_fk_tokens: Vec<(TokenStream, TokenStream)> = generate_find_by_reverse_foreign_key_tokens(macro_data, &table_schema_data); @@ -426,7 +426,7 @@ fn impl_crud_operations_trait_for_struct( /// Hidden trait for generate the foreign key operations available /// in Canyon without have to define them before hand in CrudOperations - /// because it's just imposible with the actual system (where the methods + /// because it's just impossible with the actual system (where the methods /// are generated dynamically based on some properties of the `foreign_key` /// annotation) #[canyon_sql::macros::async_trait] diff --git a/canyon_macros/src/query_operations/delete.rs b/canyon_macros/src/query_operations/delete.rs index bd45facb..4d5f3fce 100644 --- a/canyon_macros/src/query_operations/delete.rs +++ b/canyon_macros/src/query_operations/delete.rs @@ -4,7 +4,7 @@ use quote::quote; use crate::utils::macro_tokens::MacroTokens; /// Generates the TokenStream for the __delete() CRUD operation -/// returning a result, indicating a posible failure querying the database +/// returning a result, indicating a possible failure querying the database pub fn generate_delete_tokens(macro_data: &MacroTokens, table_schema_data: &String) -> TokenStream { let ty = macro_data.ty; @@ -24,7 +24,7 @@ pub fn generate_delete_tokens(macro_data: &MacroTokens, table_schema_data: &Stri quote! { /// Deletes from a database entity the row that matches /// the current instance of a T type, returning a result - /// indicating a posible failure querying the database. + /// indicating a possible failure querying the database. async fn delete(&self) -> Result<(), Box<(dyn std::error::Error + Send + Sync + 'static)>> { let stmt = format!("DELETE FROM {} WHERE {:?} = $1", #table_schema_data, #primary_key); @@ -41,7 +41,7 @@ pub fn generate_delete_tokens(macro_data: &MacroTokens, table_schema_data: &Stri /// Deletes from a database entity the row that matches /// the current instance of a T type, returning a result - /// indicating a posible failure querying the database with the specified datasource. + /// indicating a possible failure querying the database with the specified datasource. async fn delete_datasource<'a>(&self, datasource_name: &'a str) -> Result<(), Box<(dyn std::error::Error + Send + Sync + 'static)>> { @@ -101,7 +101,7 @@ pub fn generate_delete_query_tokens( /// /// It performs an `DELETE FROM table_name`, where `table_name` it's the name of your /// entity but converted to the corresponding database convention, - /// unless concrete values are setted on the available parameters of the + /// unless concrete values are set on the available parameters of the /// `canyon_macro(table_name = "table_name", schema = "schema")` fn delete_query<'a>() -> canyon_sql::query::DeleteQueryBuilder<'a, #ty> { canyon_sql::query::DeleteQueryBuilder::new(#table_schema_data, "") @@ -112,7 +112,7 @@ pub fn generate_delete_query_tokens( /// /// It performs an `DELETE FROM table_name`, where `table_name` it's the name of your /// entity but converted to the corresponding database convention, - /// unless concrete values are setted on the available parameters of the + /// unless concrete values are set on the available parameters of the /// `canyon_macro(table_name = "table_name", schema = "schema")` /// /// The query it's made against the database with the configured datasource diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index d62af003..e5b8fc12 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -14,7 +14,7 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri // Returns a String with the generic $x placeholder for the query parameters. let placeholders = macro_data.placeholders_generator(); - // Retrives the fields of the Struct + // Retrieves the fields of the Struct let fields = macro_data.get_struct_fields(); let insert_values = fields.iter().map(|ident| { @@ -71,7 +71,7 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri self.#pk_ident = res.sqlserver.get(0) .expect("No value found on the returning clause") .get::<#pk_type, &str>(#primary_key) - .expect("SQL Server primary key type failed to be setted as value") + .expect("SQL Server primary key type failed to be set as value") .to_owned(); Ok(()) @@ -118,7 +118,7 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri /// operation, you instance will have the correct value that is the *PRIMARY KEY* /// of the database row that represents. /// - /// This operation returns a result type, indicating a posible failure querying the database. + /// This operation returns a result type, indicating a possible failure querying the database. /// /// ## *Examples* ///``` @@ -164,7 +164,7 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri /// operation, you instance will have the correct value that is the *PRIMARY KEY* /// of the database row that represents. /// - /// This operation returns a result type, indicating a posible failure querying the database. + /// This operation returns a result type, indicating a possible failure querying the database. /// /// ## *Examples* ///``` @@ -215,7 +215,7 @@ pub fn generate_multiple_insert_tokens( // Retrieves the fields of the Struct as continuous String let column_names = macro_data.get_struct_fields_as_strings(); - // Retrives the fields of the Struct + // Retrieves the fields of the Struct let fields = macro_data.get_struct_fields(); let macro_fields = fields.iter().map(|field| quote! { &instance.#field }); @@ -239,14 +239,14 @@ pub fn generate_multiple_insert_tokens( .collect::>() .join(", "); - let mut splitted = mapped_fields.split(", ") + let mut split = mapped_fields.split(", ") .collect::>(); - let pk_value_index = splitted.iter() + let pk_value_index = split.iter() .position(|pk| *pk == format!("\"{}\"", #pk).as_str()) .expect("Error. No primary key found when should be there"); - splitted.retain(|pk| *pk != format!("\"{}\"", #pk).as_str()); - mapped_fields = splitted.join(", ").to_string(); + split.retain(|pk| *pk != format!("\"{}\"", #pk).as_str()); + mapped_fields = split.join(", ").to_string(); let mut fields_placeholders = String::new(); @@ -321,7 +321,7 @@ pub fn generate_multiple_insert_tokens( .get(idx) .expect("Failed getting the returned IDs for a multi insert") .get::<#pk_type, &str>(#pk) - .expect("SQL Server primary key type failed to be setted as value"); + .expect("SQL Server primary key type failed to be set as value"); } Ok(()) @@ -339,7 +339,7 @@ pub fn generate_multiple_insert_tokens( .collect::>() .join(", "); - let mut splitted = mapped_fields.split(", ") + let mut split = mapped_fields.split(", ") .collect::>(); let mut fields_placeholders = String::new(); diff --git a/canyon_macros/src/query_operations/select.rs b/canyon_macros/src/query_operations/select.rs index 2a37a018..c54a2a09 100644 --- a/canyon_macros/src/query_operations/select.rs +++ b/canyon_macros/src/query_operations/select.rs @@ -18,7 +18,7 @@ pub fn generate_find_all_unchecked_tokens( quote! { /// Performns a `SELECT * FROM table_name`, where `table_name` it's /// the name of your entity but converted to the corresponding - /// database convention. P.ej. PostgreSQL preferes table names declared + /// database convention. P.ej. PostgreSQL prefers table names declared /// with snake_case identifiers. async fn find_all_unchecked<'a>() -> Vec<#ty> { <#ty as canyon_sql::crud::Transaction<#ty>>::query( @@ -33,7 +33,7 @@ pub fn generate_find_all_unchecked_tokens( /// Performns a `SELECT * FROM table_name`, where `table_name` it's /// the name of your entity but converted to the corresponding - /// database convention. P.ej. PostgreSQL preferes table names declared + /// database convention. P.ej. PostgreSQL prefers table names declared /// with snake_case identifiers. /// /// The query it's made against the database with the configured datasource @@ -64,7 +64,7 @@ pub fn generate_find_all_tokens( quote! { /// Performns a `SELECT * FROM table_name`, where `table_name` it's /// the name of your entity but converted to the corresponding - /// database convention. P.ej. PostgreSQL preferes table names declared + /// database convention. P.ej. PostgreSQL prefers table names declared /// with snake_case identifiers. async fn find_all<'a>() -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> @@ -84,7 +84,7 @@ pub fn generate_find_all_tokens( /// Performns a `SELECT * FROM table_name`, where `table_name` it's /// the name of your entity but converted to the corresponding - /// database convention. P.ej. PostgreSQL preferes table names declared + /// database convention. P.ej. PostgreSQL prefers table names declared /// with snake_case identifiers. /// /// The query it's made against the database with the configured datasource @@ -125,7 +125,7 @@ pub fn generate_find_all_query_tokens( /// /// It performs a `SELECT * FROM table_name`, where `table_name` it's the name of your /// entity but converted to the corresponding database convention, - /// unless concrete values are setted on the available parameters of the + /// unless concrete values are set on the available parameters of the /// `canyon_macro(table_name = "table_name", schema = "schema")` fn select_query<'a>() -> canyon_sql::query::SelectQueryBuilder<'a, #ty> { canyon_sql::query::SelectQueryBuilder::new(#table_schema_data, "") @@ -136,7 +136,7 @@ pub fn generate_find_all_query_tokens( /// /// It performs a `SELECT * FROM table_name`, where `table_name` it's the name of your /// entity but converted to the corresponding database convention, - /// unless concrete values are setted on the available parameters of the + /// unless concrete values are set on the available parameters of the /// `canyon_macro(table_name = "table_name", schema = "schema")` /// /// The query it's made against the database with the configured datasource @@ -149,7 +149,7 @@ pub fn generate_find_all_query_tokens( } /// Performs a COUNT(*) query over some table, returning a [`Result`] wrapping -/// a posible success or error coming from the database +/// a possible success or error coming from the database pub fn generate_count_tokens( macro_data: &MacroTokens<'_>, table_schema_data: &String, @@ -187,7 +187,7 @@ pub fn generate_count_tokens( quote! { /// Performs a COUNT(*) query over some table, returning a [`Result`] rather than panicking, - /// wrapping a posible success or error coming from the database + /// wrapping a possible success or error coming from the database async fn count() -> Result> { let count = <#ty as canyon_sql::crud::Transaction<#ty>>::query( #stmt, @@ -199,7 +199,7 @@ pub fn generate_count_tokens( } /// Performs a COUNT(*) query over some table, returning a [`Result`] rather than panicking, - /// wrapping a posible success or error coming from the database with the specified datasource + /// wrapping a possible success or error coming from the database with the specified datasource async fn count_datasource<'a>(datasource_name: &'a str) -> Result> { let count = <#ty as canyon_sql::crud::Transaction<#ty>>::query( #stmt, @@ -329,7 +329,7 @@ pub fn generate_find_by_pk_tokens( /// Generates the TokenStream for build the search by foreign key feature, also as a method instance /// of a T type of as an associated function of same T type, but wrapped as a Result, representing -/// a posible failure querying the database, a bad or missing FK annotation or a missed ForeignKeyable +/// a possible failure querying the database, a bad or missing FK annotation or a missed ForeignKeyable /// derive macro on the parent side of the relation pub fn generate_find_by_foreign_key_tokens( macro_data: &MacroTokens<'_>, @@ -421,7 +421,7 @@ pub fn generate_find_by_foreign_key_tokens( /// Generates the TokenStream for build the __search_by_foreign_key() CRUD /// associated function, but wrapped as a Result, representing -/// a posible failure querying the database, a bad or missing FK annotation or a missed ForeignKeyable +/// a possible failure querying the database, a bad or missing FK annotation or a missed ForeignKeyable /// derive macro on the parent side of the relation pub fn generate_find_by_reverse_foreign_key_tokens( macro_data: &MacroTokens<'_>, @@ -465,7 +465,7 @@ pub fn generate_find_by_reverse_foreign_key_tokens( quote! { #quoted_method_signature; }, quote! { /// Given a parent entity T annotated with the derive proc macro `ForeignKeyable`, - /// performns a search to find the childs that belong to that concrete parent. + /// performns a search to find the children that belong to that concrete parent. #quoted_method_signature { let lookage_value = value.get_fk_column(#column) @@ -494,7 +494,7 @@ pub fn generate_find_by_reverse_foreign_key_tokens( quote! { #quoted_datasource_method_signature; }, quote! { /// Given a parent entity T annotated with the derive proc macro `ForeignKeyable`, - /// performns a search to find the childs that belong to that concrete parent + /// performns a search to find the children that belong to that concrete parent /// with the specified datasource. #quoted_datasource_method_signature { diff --git a/canyon_macros/src/query_operations/update.rs b/canyon_macros/src/query_operations/update.rs index fafa0acf..94a9abf3 100644 --- a/canyon_macros/src/query_operations/update.rs +++ b/canyon_macros/src/query_operations/update.rs @@ -9,7 +9,7 @@ pub fn generate_update_tokens(macro_data: &MacroTokens, table_schema_data: &Stri let update_columns = macro_data.get_column_names_pk_parsed(); - // Retrives the fields of the Struct + // Retrieves the fields of the Struct let fields = macro_data.get_struct_fields(); let mut vec_columns_values: Vec = Vec::new(); @@ -33,7 +33,7 @@ pub fn generate_update_tokens(macro_data: &MacroTokens, table_schema_data: &Stri quote! { /// Updates a database record that matches /// the current instance of a T type, returning a result - /// indicating a posible failure querying the database. + /// indicating a possible failure querying the database. async fn update(&self) -> Result<(), Box> { let stmt = format!( "UPDATE {} SET {} WHERE {} = ${:?}", @@ -53,7 +53,7 @@ pub fn generate_update_tokens(macro_data: &MacroTokens, table_schema_data: &Stri /// Updates a database record that matches /// the current instance of a T type, returning a result - /// indicating a posible failure querying the database with the + /// indicating a possible failure querying the database with the /// specified datasource async fn update_datasource<'a>(&self, datasource_name: &'a str) -> Result<(), Box> @@ -122,7 +122,7 @@ pub fn generate_update_query_tokens( /// /// It performs an `UPDATE table_name`, where `table_name` it's the name of your /// entity but converted to the corresponding database convention, - /// unless concrete values are setted on the available parameters of the + /// unless concrete values are set on the available parameters of the /// `canyon_macro(table_name = "table_name", schema = "schema")` fn update_query<'a>() -> canyon_sql::query::UpdateQueryBuilder<'a, #ty> { canyon_sql::query::UpdateQueryBuilder::new(#table_schema_data, "") @@ -133,7 +133,7 @@ pub fn generate_update_query_tokens( /// /// It performs an `UPDATE table_name`, where `table_name` it's the name of your /// entity but converted to the corresponding database convention, - /// unless concrete values are setted on the available parameters of the + /// unless concrete values are set on the available parameters of the /// `canyon_macro(table_name = "table_name", schema = "schema")` /// /// The query it's made against the database with the configured datasource diff --git a/canyon_macros/src/utils/macro_tokens.rs b/canyon_macros/src/utils/macro_tokens.rs index e00c5424..370fbeea 100644 --- a/canyon_macros/src/utils/macro_tokens.rs +++ b/canyon_macros/src/utils/macro_tokens.rs @@ -28,7 +28,7 @@ impl<'a> MacroTokens<'a> { } } - /// Gives a Vec ot tuples that contains the visibilty, the name and + /// Gives a Vec of tuples that contains the visibility, the name and /// the type of every field on a Struct pub fn _fields_with_visibility_and_types(&self) -> Vec<(Visibility, Ident, Type)> { self.fields @@ -43,7 +43,7 @@ impl<'a> MacroTokens<'a> { .collect::>() } - /// Gives a Vec ot tuples that contains the name and + /// Gives a Vec of tuples that contains the name and /// the type of every field on a Struct pub fn _fields_with_types(&self) -> Vec<(Ident, Type)> { self.fields @@ -73,7 +73,7 @@ impl<'a> MacroTokens<'a> { /// /// If the type contains a `#[primary_key]` annotation (and), returns the /// name of the columns without the fields that maps against the column designed as - /// primary key (if its present and its autoincremental attribute is setted to true) + /// primary key (if its present and its autoincremental attribute is set to true) /// (autoincremental = true) or its without the autoincremental attribute, which leads /// to the same behaviour. /// diff --git a/canyon_observer/src/manager/entity.rs b/canyon_observer/src/manager/entity.rs index c68eb186..78e2f157 100644 --- a/canyon_observer/src/manager/entity.rs +++ b/canyon_observer/src/manager/entity.rs @@ -125,7 +125,7 @@ impl Parse for CanyonEntity { fn parse(input: &ParseBuffer) -> syn::Result { let _struct = input.parse::()?; - // Retrive the struct fields + // Retrieve the struct fields let mut parsed_fields: Vec = Vec::new(); for field in _struct.fields { let struct_attribute = EntityField::try_from(&field)?; diff --git a/canyon_observer/src/manager/manager_builder.rs b/canyon_observer/src/manager/manager_builder.rs index 185b4098..d717909f 100644 --- a/canyon_observer/src/manager/manager_builder.rs +++ b/canyon_observer/src/manager/manager_builder.rs @@ -56,8 +56,8 @@ pub fn generate_enum_with_fields(canyon_entity: &CanyonEntity) -> TokenStream { /// will be called though macro code to obtain the &str representation /// of the field name. /// - /// That's particulary useful in Canyon when working with queries being constructed - /// through the [`QueryBuilder`], when one of the methods requieres to get + /// That's particularly useful in Canyon when working with queries being constructed + /// through the [`QueryBuilder`], when one of the methods requires to get /// a column name (which is the name of some field of the type) as a parameter /// /// ``` diff --git a/canyon_observer/src/migrations/handler.rs b/canyon_observer/src/migrations/handler.rs index 4922f7d9..dfa84ef4 100644 --- a/canyon_observer/src/migrations/handler.rs +++ b/canyon_observer/src/migrations/handler.rs @@ -83,7 +83,7 @@ impl Migrations { } /// Fetches a concrete schema metadata by target the database - /// choosed by it's datasource name property + /// chosen by it's datasource name property async fn fetch_database( datasource_name: &str, db_type: DatabaseType, diff --git a/canyon_observer/src/migrations/memory.rs b/canyon_observer/src/migrations/memory.rs index 2c81a952..f5047baa 100644 --- a/canyon_observer/src/migrations/memory.rs +++ b/canyon_observer/src/migrations/memory.rs @@ -13,7 +13,7 @@ use crate::{constants, QUERIES_TO_EXECUTE}; /// file contains a `#[canyon_entity]` annotation and restricting it to just one /// annotated struct per file. /// -/// This limitation it's imposed by desing. Canyon, when manages all the entities in +/// This limitation it's imposed by design. Canyon, when manages all the entities in /// the user's source code, needs to know for future migrations the old data about a structure /// and the new modified one. /// @@ -40,7 +40,7 @@ use crate::{constants, QUERIES_TO_EXECUTE}; /// /// So, if the user wants or needs to modify the data of it's entity, Canyon can secure that will perform the /// correct operations because we can't "remember" how that entity was, and how it should be now, avoiding -/// potencially dangerous operations due to lack of knowing what entity relates with new data. +/// potentially dangerous operations due to lack of knowing what entity relates with new data. /// /// The `memory field` HashMap is made by the filepath as a key, and the struct's name as value #[derive(Debug)] @@ -136,7 +136,7 @@ impl CanyonMemory { .insert(datasource.name, vec![stmt]); } - // if the updated element is the struct name, whe add it to the table_rename Hashmap + // if the updated element is the struct name, we add it to the table_rename Hashmap let rename_table = old.struct_name != struct_name; if rename_table { diff --git a/canyon_observer/src/migrations/processor.rs b/canyon_observer/src/migrations/processor.rs index 1301d6fe..fb991717 100644 --- a/canyon_observer/src/migrations/processor.rs +++ b/canyon_observer/src/migrations/processor.rs @@ -16,7 +16,7 @@ use super::memory::CanyonMemory; use super::register_types::{CanyonRegisterEntity, CanyonRegisterEntityField}; /// Responsible of generating the queries to sync the database status with the -/// Rust source code managed by Canyon, for succesfully make the migrations +/// Rust source code managed by Canyon, for successfully make the migrations #[derive(Debug, Default)] pub struct MigrationsProcessor { operations: Vec>, @@ -82,7 +82,7 @@ impl MigrationsProcessor { } // Time to check annotations for the current column - // Case when we only need to add contrains + // Case when we only need to add constraints if (current_table_metadata.is_none() && !canyon_register_field.annotations.is_empty()) || (current_table_metadata.is_some() && current_column_metadata.is_none()) @@ -765,14 +765,14 @@ impl DatabaseOperation for TableOperation { /* Notes: Brackets around `old_table_name`, p.e. exec sp_rename ['league'], 'leagues' // NOT VALID! - is only allowed for compound names splitted by a dot. + is only allowed for compound names split by a dot. exec sp_rename ['random.league'], 'leagues' // OK CARE! This doesn't mean that we are including the schema. exec sp_rename ['dbo.random.league'], 'leagues' // OK exec sp_rename 'dbo.league', 'leagues' // OK - Schema doesn't need brackets - Due to the automatic mapped name from Rust to DB and viceversa, this won't + Due to the automatic mapped name from Rust to DB and vice-versa, this won't be an allowed behaviour for now, only with the table_name parameter on the CanyonEntity annotation. */ @@ -867,7 +867,7 @@ enum ColumnOperation { // SQL server specific operation - SQL server can't drop a NOT NULL column DropNotNullBeforeDropColumn(String, String, String), AlterColumnSetNotNull(String, CanyonRegisterEntityField), - // TODO if implement throught annotations, modify for both GENERATED {ALWAYS, BY DEFAULT} + // TODO if implement through annotations, modify for both GENERATED {ALWAYS, BY DEFAULT} AlterColumnAddIdentity(String, CanyonRegisterEntityField), AlterColumnDropIdentity(String, CanyonRegisterEntityField), } @@ -898,7 +898,7 @@ impl DatabaseOperation for ColumnOperation { todo!() }, ColumnOperation::DeleteColumn(table_name, column_name) => { - // TODO Check if operation for SQL server is diferent + // TODO Check if operation for SQL server is different format!("ALTER TABLE {table_name} DROP COLUMN {column_name};") }, ColumnOperation::AlterColumnType(table_name, entity_field) => @@ -933,9 +933,9 @@ impl DatabaseOperation for ColumnOperation { "ALTER TABLE {table_name} ALTER COLUMN {column_name} {column_datatype} NULL; DECLARE @tableName VARCHAR(MAX) = '{table_name}' DECLARE @columnName VARCHAR(MAX) = '{column_name}' DECLARE @ConstraintName nvarchar(200) - SELECT @ConstraintName = Name + SELECT @ConstraintName = Name FROM SYS.DEFAULT_CONSTRAINTS - WHERE PARENT_OBJECT_ID = OBJECT_ID(@tableName) + WHERE PARENT_OBJECT_ID = OBJECT_ID(@tableName) AND PARENT_COLUMN_ID = ( SELECT column_id FROM sys.columns WHERE NAME = @columnName AND object_id = OBJECT_ID(@tableName)) diff --git a/tests/constants.rs b/tests/constants.rs index c54cc8d1..f7804e43 100644 --- a/tests/constants.rs +++ b/tests/constants.rs @@ -1,4 +1,4 @@ -///! Constant values to share accross the integration tests +///! Constant values to share across the integration tests pub const PSQL_DS: &str = "postgres_docker"; pub const SQL_SERVER_DS: &str = "sqlserver_docker"; diff --git a/tests/crud/delete_operations.rs b/tests/crud/delete_operations.rs index 75dfaf1d..46d1bcaf 100644 --- a/tests/crud/delete_operations.rs +++ b/tests/crud/delete_operations.rs @@ -12,7 +12,7 @@ use crate::tests_models::league::*; /// operation, because we use that concrete field to construct the clause that targets /// that entity. /// -/// Attemp of usage the `t.delete(&self)` method on an entity without `#[primary_key]` +/// Attempt of usage the `t.delete(&self)` method on an entity without `#[primary_key]` /// will raise a runtime error. #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_method_operation() { @@ -48,7 +48,7 @@ fn test_crud_delete_method_operation() { // To check the success, we can query by the primary key value and check if, after unwrap() // the result of the operation, the find by primary key contains Some(v) or None - // Remeber that `find_by_primary_key(&dyn QueryParameter<'a>) -> Result>, Err> + // Remember that `find_by_primary_key(&dyn QueryParameter<'a>) -> Result>, Err> assert_eq!( League::find_by_pk(&new_league.id) .await @@ -94,7 +94,7 @@ fn test_crud_delete_datasource_method_operation() { // To check the success, we can query by the primary key value and check if, after unwrap() // the result of the operation, the find by primary key contains Some(v) or None - // Remeber that `find_by_primary_key(&dyn QueryParameter<'a>) -> Result>, Err> + // Remember that `find_by_primary_key(&dyn QueryParameter<'a>) -> Result>, Err> assert_eq!( League::find_by_pk_datasource(&new_league.id, SQL_SERVER_DS) .await diff --git a/tests/crud/foreign_key_operations.rs b/tests/crud/foreign_key_operations.rs index ce0ea585..b58df802 100644 --- a/tests/crud/foreign_key_operations.rs +++ b/tests/crud/foreign_key_operations.rs @@ -2,7 +2,7 @@ ///! generates and executes *SELECT* statements based on a entity ///! annotated with the `#[foreign_key(... args)]` annotation looking ///! for the related data with some entity `U` that acts as is parent, where `U` -///! impls `ForeignKeyable` (isn't requiered, but it won't unlock the +///! impls `ForeignKeyable` (isn't required, but it won't unlock the ///! reverse search features parent -> child, only the child -> parent ones). /// ///! Names of the foreign key methods are autogenerated for the direct and @@ -74,7 +74,7 @@ fn test_crud_search_reverse_side_foreign_key() { .expect("Result variant of the query is err") .expect("No result found for the given parameter"); - // Computes how many tournaments are poiting to the retrieved league + // Computes how many tournaments are pointing to the retrieved league let child_tournaments: Vec = Tournament::search_league_childrens(&some_league) .await .expect("Result variant of the query is err"); @@ -94,7 +94,7 @@ fn test_crud_search_reverse_side_foreign_key_datasource() { .expect("Result variant of the query is err") .expect("No result found for the given parameter"); - // Computes how many tournaments are poiting to the retrieved league + // Computes how many tournaments are pointing to the retrieved league let child_tournaments: Vec = Tournament::search_league_childrens_datasource(&some_league, SQL_SERVER_DS) .await diff --git a/tests/crud/insert_operations.rs b/tests/crud/insert_operations.rs index 480dc06f..29c0c9fa 100644 --- a/tests/crud/insert_operations.rs +++ b/tests/crud/insert_operations.rs @@ -9,9 +9,9 @@ use crate::tests_models::league::*; /// annotated with `#[canyon_entity]` macro over a *T* type. /// /// For insert a new record on a database, the *insert* operation needs -/// some special requeriments: -/// > - We need a mutable instance of `T`. If the operation complets -/// succesfully, the insert operation will automatically set the autogenerated +/// some special requirements: +/// > - We need a mutable instance of `T`. If the operation completes +/// successfully, the insert operation will automatically set the autogenerated /// value for the `primary_key` annotated field in it. /// /// > - It's considered a good practice to initialize that concrete field with @@ -24,7 +24,7 @@ use crate::tests_models::league::*; /// refer to the docs [here]() for more info.) /// /// If the type hasn't a `#[primary_key]` annotation, or the annotation contains -/// an argument specifiying not autoincremental behaviour, all the fields will be +/// an argument specifying not autoincremental behaviour, all the fields will be /// inserted on the database and no returning value will be placed in any field. #[canyon_sql::macros::canyon_tokio_test] fn test_crud_insert_operation() { diff --git a/tests/crud/mod.rs b/tests/crud/mod.rs index 8568b2a9..7526c8f6 100644 --- a/tests/crud/mod.rs +++ b/tests/crud/mod.rs @@ -19,7 +19,7 @@ use canyon_sql::runtime::tokio_util::compat::TokioAsyncWriteCompatExt; /// when the docker starts. SqlServer official docker from Microsoft does /// not allow you to run `.sql` files against the database (not at least, without) /// using a workaround. So, we are going to query the `SqlServer` to check if already -/// has some data (other processes, persistance or multi-threading envs), af if not, +/// has some data (other processes, persistence or multi-threading envs), af if not, /// we are going to retrieve the inserted data on the `postgreSQL` at start-up and /// inserting into the `SqlServer` instance. /// diff --git a/tests/crud/querybuilder_operations.rs b/tests/crud/querybuilder_operations.rs index 7b46fa24..4700f598 100644 --- a/tests/crud/querybuilder_operations.rs +++ b/tests/crud/querybuilder_operations.rs @@ -1,7 +1,7 @@ ///! Tests for the QueryBuilder available operations within Canyon. /// ///! QueryBuilder are the way of obtain more flexibility that with -///! the default generated queries, esentially for build the queries +///! the default generated queries, essentially for build the queries ///! with the SQL filters /// use canyon_sql::{ @@ -68,7 +68,7 @@ fn test_crud_find_with_querybuilder_datasource() { assert!(!filtered_find_players.unwrap().is_empty()); } -/// Updates the values of the range on entries defined by the constraint paramenters +/// Updates the values of the range on entries defined by the constraint parameters /// in the database entity #[canyon_sql::macros::canyon_tokio_test] fn test_crud_update_with_querybuilder() { diff --git a/tests/crud/update_operations.rs b/tests/crud/update_operations.rs index d0643cae..fc7ae733 100644 --- a/tests/crud/update_operations.rs +++ b/tests/crud/update_operations.rs @@ -13,7 +13,7 @@ use crate::tests_models::league::*; /// operation, because we use that concrete field to construct the clause that targets /// that entity. /// -/// Attemp of usage the `t.update(&self)` method on an entity without `#[primary_key]` +/// Attempt of usage the `t.update(&self)` method on an entity without `#[primary_key]` /// will raise a runtime error. #[canyon_sql::macros::canyon_tokio_test] fn test_crud_update_method_operation() { diff --git a/tests/tests_models/player.rs b/tests/tests_models/player.rs index 2a06c109..59c03daa 100644 --- a/tests/tests_models/player.rs +++ b/tests/tests_models/player.rs @@ -7,7 +7,7 @@ use canyon_sql::macros::*; /// For test the behaviour of Canyon with entities that no declares primary keys, /// or that is configuration isn't autoincremental, we will use this class. /// Note that this entity has a primary key declared in the database, but we will -/// omit this in Canyon, so for us, is like if the primary key wasn't setted up. +/// omit this in Canyon, so for us, is like if the primary key wasn't set up. /// /// Remember that the entities that does not declares at least a field as `#[primary_key]` /// does not have all the CRUD operations available, only the ones that doesn't