From e86b8a60803c94adce767c8f44e0a06b49de28a6 Mon Sep 17 00:00:00 2001 From: Matthew Johnston Date: Wed, 17 Mar 2021 21:05:33 -0500 Subject: [PATCH 1/3] Adds CHANGELOG --- CHANGELOG | 17 +++++++++++++++++ mix.exs | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 CHANGELOG diff --git a/CHANGELOG b/CHANGELOG new file mode 100644 index 00000000..9a7e757f --- /dev/null +++ b/CHANGELOG @@ -0,0 +1,17 @@ +# Changelog + +All notable changes will be documented in this file. + +The format is based on [Keep a Changelog][keepachangelog], and this project +adheres to [Semantic Versioning][semver]. + +## [Unreleased] + +## [0.5.0] - 2021-03-17 + +### Removed +- Removed `Ecto.Adapters.Exqlite`. Replace with [Ecto Sqlite3 Library][ecto_sqlite3]. + +[keepachangelog]: +[semver]: +[ecto_sqlite3]: diff --git a/mix.exs b/mix.exs index 4ce0b9ee..ba77e7c5 100644 --- a/mix.exs +++ b/mix.exs @@ -4,7 +4,7 @@ defmodule Exqlite.MixProject do def project do [ app: :exqlite, - version: "0.4.9", + version: "0.5.0", elixir: "~> 1.8", compilers: [:elixir_make] ++ Mix.compilers(), make_targets: ["all"], From b33912735736dc4bdb36433ed08a37f94d214591 Mon Sep 17 00:00:00 2001 From: Matthew Johnston Date: Wed, 17 Mar 2021 21:06:18 -0500 Subject: [PATCH 2/3] Remove ecto adapter in favor of ecto_sqlite3 --- .github/workflows/main.yml | 1 - README.md | 58 +- bench/README.md | 36 - bench/bench_helper.exs | 7 - bench/scripts/macro/all_bench.exs | 53 - bench/scripts/macro/insert_bench.exs | 46 - bench/scripts/micro/load_bench.exs | 55 - bench/scripts/micro/to_sql_bench.exs | 64 - bench/support/migrations.exs | 15 - bench/support/repo.exs | 43 - bench/support/schemas.exs | 52 - bench/support/setup.exs | 26 - integration_test/exqlite/all_test.exs | 31 - integration_test/exqlite/ecto/interval.exs | 423 --- integration_test/exqlite/ecto/type.exs | 492 --- .../exqlite/ecto_sql/migration.exs | 623 ---- .../exqlite/ecto_sql/migrator.exs | 243 -- integration_test/exqlite/test_helper.exs | 120 - lib/ecto/adapters/exqlite.ex | 256 -- lib/ecto/adapters/exqlite/codec.ex | 98 - lib/ecto/adapters/exqlite/connection.ex | 1704 ---------- lib/ecto/adapters/exqlite/data_type.ex | 48 - mix.exs | 9 - mix.lock | 11 - test/ecto/adapters/exqlite/codec_test.exs | 83 - .../ecto/adapters/exqlite/connection_test.exs | 2745 ----------------- test/ecto/adapters/exqlite/data_type_test.exs | 83 - test/ecto/adapters/exqlite_test.exs | 54 - test/ecto/integration/crud_test.exs | 195 -- test/ecto/integration/math_test.exs | 138 - test/ecto/integration/streaming_test.exs | 30 - test/ecto/integration/timestamps_test.exs | 76 - test/ecto/integration/uuid_test.exs | 18 - test/support/migration.ex | 40 - test/support/repo.ex | 15 - test/support/schemas.ex | 113 - test/test_helper.exs | 42 - 37 files changed, 9 insertions(+), 8137 deletions(-) delete mode 100644 bench/README.md delete mode 100644 bench/bench_helper.exs delete mode 100644 bench/scripts/macro/all_bench.exs delete mode 100644 bench/scripts/macro/insert_bench.exs delete mode 100644 bench/scripts/micro/load_bench.exs delete mode 100644 bench/scripts/micro/to_sql_bench.exs delete mode 100644 bench/support/migrations.exs delete mode 100644 bench/support/repo.exs delete mode 100644 bench/support/schemas.exs delete mode 100644 bench/support/setup.exs delete mode 100644 integration_test/exqlite/all_test.exs delete mode 100644 integration_test/exqlite/ecto/interval.exs delete mode 100644 integration_test/exqlite/ecto/type.exs delete mode 100644 integration_test/exqlite/ecto_sql/migration.exs delete mode 100644 integration_test/exqlite/ecto_sql/migrator.exs delete mode 100644 integration_test/exqlite/test_helper.exs delete mode 100644 lib/ecto/adapters/exqlite.ex delete mode 100644 lib/ecto/adapters/exqlite/codec.ex delete mode 100644 lib/ecto/adapters/exqlite/connection.ex delete mode 100644 lib/ecto/adapters/exqlite/data_type.ex delete mode 100644 test/ecto/adapters/exqlite/codec_test.exs delete mode 100644 test/ecto/adapters/exqlite/connection_test.exs delete mode 100644 test/ecto/adapters/exqlite/data_type_test.exs delete mode 100644 test/ecto/adapters/exqlite_test.exs delete mode 100644 test/ecto/integration/crud_test.exs delete mode 100644 test/ecto/integration/math_test.exs delete mode 100644 test/ecto/integration/streaming_test.exs delete mode 100644 test/ecto/integration/timestamps_test.exs delete mode 100644 test/ecto/integration/uuid_test.exs delete mode 100644 test/support/migration.ex delete mode 100644 test/support/repo.ex delete mode 100644 test/support/schemas.ex diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0319dc0e..4f7f6835 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -21,4 +21,3 @@ jobs: mix deps.get - run: mix test - - run: EXQLITE_INTEGRATION=true mix test diff --git a/README.md b/README.md index eef3553c..cd520377 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,9 @@ # Exqlite -An SQLite3 library with an Ecto adapter implementation. +An Elixir SQLite3 library. + +If you are looking for the Ecto adapater, take a look at the +[Ecto SQLite3 library][ecto_sqlite3]. ## Caveats @@ -9,9 +12,8 @@ An SQLite3 library with an Ecto adapter implementation. * Prepared statements are not immutable. You must be careful when manipulating statements and binding values to statements. Do not try to manipulate the statements concurrently. Keep it isolated to one process. -* Adding a `CHECK` constraint is not supported by the Ecto adapter. This is due - to how Ecto handles specifying constraints. In SQLite you must specify the - `CHECK` on creation. +* Asynchronous writing is not supported by SQLite3 and will not be supported + here. * All native calls are run through the Dirty NIF scheduler. * Datetimes are stored without offsets. This is due to how SQLite3 handles date and times. If you would like to store a timezone, you will need to create a @@ -24,12 +26,12 @@ An SQLite3 library with an Ecto adapter implementation. ```elixir defp deps do - {:exqlite, "~> 0.4.9"} + {:exqlite, "~> 0.5.0"} end ``` -## Usage Without Ecto +## Usage The `Exqlite.Sqlite3` module usage is fairly straight forward. @@ -58,47 +60,6 @@ The `Exqlite.Sqlite3` module usage is fairly straight forward. ``` -## Usage With Ecto - -Define your repo similar to this. - -```elixir -defmodule MyApp.Repo do - use Ecto.Repo, otp_app: :my_app, adapter: Ecto.Adapters.Exqlite -end -``` - -Configure your repository similar to the following. If you want to know more -about the possible options to pass the repository, checkout the documentation -for `Exqlite.Connection.connect/1`. It will have more information on what is -configurable. - -```elixir -config :my_app, - ecto_repos: [MyApp.Repo] - -config :my_app, MyApp.Repo, - database: "path/to/my/database.db", - show_sensitive_data_on_connection_error: false, - journal_mode: :wal, - cache_size: -64000, - temp_store: :memory, - pool_size: 1 -``` - - -### Note - -* Pool size is set to `1` but can be increased to `4`. When set to `10` there - was a lot of database busy errors. Currently this is a known issue and is - being looked in to. - -* Cache size is a negative number because that is how SQLite3 defines the cache - size in kilobytes. If you make it positive, that is the number of pages in - memory to use. Both have their pros and cons. Check the documentation out for - [SQLite3][2]. - - ## Why SQLite3 I needed an Ecto3 adapter to store time series data for a personal project. I @@ -123,5 +84,4 @@ complicated and error prone. Feel free to check the project out and submit pull requests. -[1]: -[2]: +[ecto_sqlite3]: diff --git a/bench/README.md b/bench/README.md deleted file mode 100644 index 6f0d5eec..00000000 --- a/bench/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# Ecto Benchmarks - -Ecto has a benchmark suite to track performance of sensitive operations. Benchmarks -are run using the [Benchee](https://github.com/PragTob/benchee) library and -need PostgreSQL and MySQL up and running. - -To run the benchmarks tests just type in the console: - -``` -# POSIX-compatible shells -$ MIX_ENV=bench mix run bench/bench_helper.exs -``` - -``` -# other shells -$ env MIX_ENV=bench mix run bench/bench_helper.exs -``` - -Benchmarks are inside the `scripts/` directory and are divided into two -categories: - -* `micro benchmarks`: Operations that don't actually interface with the database, -but might need it up and running to start the Ecto agents and processes. - -* `macro benchmarks`: Operations that are actually run in the database. This are -more likely to integration tests. - -You can also run a benchmark individually by giving the path to the benchmark -script instead of `bench/bench_helper.exs`. - -# Docker -I had Postgres already installed and running locally, but needed to get MySQL up and running. The easiest way to do this is with this command: - -``` -docker run -p 3306:3306 --name mysql_server -e MYSQL_ALLOW_EMPTY_PASSWORD=yes mysql:5.7 -``` diff --git a/bench/bench_helper.exs b/bench/bench_helper.exs deleted file mode 100644 index 16565c65..00000000 --- a/bench/bench_helper.exs +++ /dev/null @@ -1,7 +0,0 @@ -# Micro benchmarks -Code.require_file("scripts/micro/load_bench.exs", __DIR__) -Code.require_file("scripts/micro/to_sql_bench.exs", __DIR__) - -## Macro benchmarks needs postgresql and mysql up and running -Code.require_file("scripts/macro/insert_bench.exs", __DIR__) -Code.require_file("scripts/macro/all_bench.exs", __DIR__) diff --git a/bench/scripts/macro/all_bench.exs b/bench/scripts/macro/all_bench.exs deleted file mode 100644 index 2de6e3c1..00000000 --- a/bench/scripts/macro/all_bench.exs +++ /dev/null @@ -1,53 +0,0 @@ -# -----------------------------------Goal-------------------------------------- -# Compare the performance of querying all objects of the different supported -# databases - -# -------------------------------Description----------------------------------- -# This benchmark tracks performance of querying a set of objects registered in -# the database with Repo.all/2 function. The query pass through -# the steps of translating the SQL statements, sending them to the database and -# load the results into Ecto structures. Both, Ecto Adapters and Database itself -# play a role and can affect the results of this benchmark. - -# ----------------------------Factors(don't change)--------------------------- -# Different adapters supported by Ecto with the proper database up and running - -# ----------------------------Parameters(change)------------------------------- -# There is only a unique parameter in this benchmark, the User objects to be -# fetched. - -Code.require_file("../../support/setup.exs", __DIR__) - -alias Ecto.Bench.User - -limit = 5_000 - -users = - 1..limit - |> Enum.map(fn _ -> User.sample_data() end) - -# We need to insert data to fetch -Ecto.Bench.PgRepo.insert_all(User, users) -Ecto.Bench.MyXQLRepo.insert_all(User, users) - -jobs = %{ - "Pg Repo.all/2" => fn -> Ecto.Bench.PgRepo.all(User, limit: limit) end, - "MyXQL Repo.all/2" => fn -> Ecto.Bench.MyXQLRepo.all(User, limit: limit) end -} - -path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" -file = Path.join(path, "all.json") - -Benchee.run( - jobs, - formatters: [Benchee.Formatters.JSON, Benchee.Formatters.Console], - formatter_options: [json: [file: file]], - time: 10, - after_each: fn results -> - ^limit = length(results) - end -) - -# Clean inserted data -Ecto.Bench.PgRepo.delete_all(User) -Ecto.Bench.MyXQLRepo.delete_all(User) diff --git a/bench/scripts/macro/insert_bench.exs b/bench/scripts/macro/insert_bench.exs deleted file mode 100644 index 8181a7ae..00000000 --- a/bench/scripts/macro/insert_bench.exs +++ /dev/null @@ -1,46 +0,0 @@ -# -----------------------------------Goal-------------------------------------- -# Compare the performance of inserting changesets and structs in the different -# supported databases - -# -------------------------------Description----------------------------------- -# This benchmark tracks performance of inserting changesets and structs in the -# database with Repo.insert!/1 function. The query pass through -# the steps of translating the SQL statements, sending them to the database and -# returning the result of the transaction. Both, Ecto Adapters and Database itself -# play a role and can affect the results of this benchmark. - -# ----------------------------Factors(don't change)--------------------------- -# Different adapters supported by Ecto with the proper database up and running - -# ----------------------------Parameters(change)------------------------------- -# Different inputs to be inserted, aka Changesets and Structs - -Code.require_file("../../support/setup.exs", __DIR__) - -alias Ecto.Bench.User - -inputs = %{ - "Struct" => struct(User, User.sample_data()), - "Changeset" => User.changeset(User.sample_data()) -} - -jobs = %{ - "Exqlite Insert" => fn entry -> Ecto.Bench.ExqliteRepo.insert!(entry) end, - "Pg Insert" => fn entry -> Ecto.Bench.PgRepo.insert!(entry) end, - "MyXQL Insert" => fn entry -> Ecto.Bench.MyXQLRepo.insert!(entry) end -} - -path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" -file = Path.join(path, "insert.json") - -Benchee.run( - jobs, - inputs: inputs, - formatters: [Benchee.Formatters.JSON, Benchee.Formatters.Console], - formatter_options: [json: [file: file]] -) - -# Clean inserted data -Ecto.Bench.ExqliteRepo.delete_all(User) -Ecto.Bench.PgRepo.delete_all(User) -Ecto.Bench.MyXQLRepo.delete_all(User) diff --git a/bench/scripts/micro/load_bench.exs b/bench/scripts/micro/load_bench.exs deleted file mode 100644 index 62635518..00000000 --- a/bench/scripts/micro/load_bench.exs +++ /dev/null @@ -1,55 +0,0 @@ -# -----------------------------------Goal-------------------------------------- -# Compare the implementation of loading raw database data into Ecto structures by -# the different database adapters - -# -------------------------------Description----------------------------------- -# Repo.load/2 is an important step of a database query. -# This benchmark tracks performance of loading "raw" data into ecto structures -# Raw data can be in different types (e.g. keyword lists, maps), in this tests -# we benchmark against map inputs - -# ----------------------------Factors(don't change)--------------------------- -# Different adapters supported by Ecto, each one has its own implementation that -# is tested against different inputs - -# ----------------------------Parameters(change)------------------------------- -# Different sizes of raw data(small, medium, big) and different attribute types -# such as UUID, Date and Time fetched from the database and needs to be -# loaded into Ecto structures. - -Code.require_file("../../support/setup.exs", __DIR__) - -alias Ecto.Bench.User - -inputs = %{ - "Small 1 Thousand" => - 1..1_000 |> Enum.map(fn _ -> %{name: "Alice", email: "email@email.com"} end), - "Medium 100 Thousand" => - 1..100_000 |> Enum.map(fn _ -> %{name: "Alice", email: "email@email.com"} end), - "Big 1 Million" => - 1..1_000_000 |> Enum.map(fn _ -> %{name: "Alice", email: "email@email.com"} end), - "Time attr" => - 1..100_000 |> Enum.map(fn _ -> %{name: "Alice", time_attr: ~T[21:25:04.361140]} end), - "Date attr" => 1..100_000 |> Enum.map(fn _ -> %{name: "Alice", date_attr: ~D[2018-06-20]} end), - "NaiveDateTime attr" => - 1..100_000 - |> Enum.map(fn _ -> %{name: "Alice", naive_datetime_attr: ~N[2019-06-20 21:32:07.424178]} end), - "UUID attr" => - 1..100_000 - |> Enum.map(fn _ -> %{name: "Alice", uuid: Ecto.UUID.bingenerate()} end) -} - -jobs = %{ - "Pg Loader" => fn data -> Enum.map(data, &Ecto.Bench.PgRepo.load(User, &1)) end, - "MyXQL Loader" => fn data -> Enum.map(data, &Ecto.Bench.MyXQLRepo.load(User, &1)) end -} - -path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" -file = Path.join(path, "load.json") - -Benchee.run( - jobs, - inputs: inputs, - formatters: [Benchee.Formatters.JSON, Benchee.Formatters.Console], - formatter_options: [json: [file: file]] -) diff --git a/bench/scripts/micro/to_sql_bench.exs b/bench/scripts/micro/to_sql_bench.exs deleted file mode 100644 index 138750f5..00000000 --- a/bench/scripts/micro/to_sql_bench.exs +++ /dev/null @@ -1,64 +0,0 @@ -# -----------------------------------Goal-------------------------------------- -# Compare the implementation of parsing Ecto.Query objects into SQL queries by -# the different database adapters - -# -------------------------------Description----------------------------------- -# Repo.to_sql/2 is an important step of a database query. -# This benchmark tracks performance of parsing Ecto.Query structures into -# "raw" SQL query strings. -# Different Ecto.Query objects has multiple combinations and some different attributes -# depending on the query type. In this tests we benchmark against different -# query types and complexity. - -# ----------------------------Factors(don't change)--------------------------- -# Different adapters supported by Ecto, each one has its own implementation that -# is tested against different query inputs - -# ----------------------------Parameters(change)------------------------------- -# Different query objects (select, delete, update) to be translated into pure SQL -# strings. - -Code.require_file("../../support/setup.exs", __DIR__) - -import Ecto.Query - -alias Ecto.Bench.{User, Game} - -inputs = %{ - "Ordinary Select All" => {:all, from(User)}, - "Ordinary Delete All" => {:delete_all, from(User)}, - "Ordinary Update All" => {:update_all, from(User, update: [set: [name: "Thor"]])}, - "Ordinary Where" => {:all, from(User, where: [name: "Thanos", email: "blah@blah"])}, - "Fetch First Registry" => {:all, first(User)}, - "Fetch Last Registry" => {:all, last(User)}, - "Ordinary Order By" => {:all, order_by(User, desc: :name)}, - "Complex Query 2 Joins" => - {:all, - from(User, where: [name: "Thanos"]) - |> join(:left, [u], ux in User, on: u.id == ux.id) - |> join(:right, [j], uj in User, on: j.id == 1 and j.email == "email@email") - |> select([u, ux], {u.name, ux.email})}, - "Complex Query 4 Joins" => - {:all, - from(User) - |> join(:left, [u], g in Game, on: g.name == u.name) - |> join(:right, [g], u in User, on: g.id == 1 and u.email == "email@email") - |> join(:inner, [u], g in fragment("SELECT * from games where game.id = ?", u.id)) - |> join(:left, [g], u in fragment("SELECT * from users = ?", g.id)) - |> select([u, g], {u.name, g.price})} -} - -jobs = %{ - "Pg Query Builder" => fn {type, query} -> Ecto.Bench.PgRepo.to_sql(type, query) end, - "MyXQL Query Builder" => fn {type, query} -> Ecto.Bench.MyXQLRepo.to_sql(type, query) end -} - -path = System.get_env("BENCHMARKS_OUTPUT_PATH") || "bench/results" -file = Path.join(path, "to_sql.json") - -Benchee.run( - jobs, - inputs: inputs, - formatters: [Benchee.Formatters.JSON, Benchee.Formatters.Console], - formatter_options: [json: [file: file]] -) diff --git a/bench/support/migrations.exs b/bench/support/migrations.exs deleted file mode 100644 index 348016db..00000000 --- a/bench/support/migrations.exs +++ /dev/null @@ -1,15 +0,0 @@ -defmodule Ecto.Bench.CreateUser do - use Ecto.Migration - - def change do - create table(:users) do - add(:name, :string) - add(:email, :string) - add(:password, :string) - add(:time_attr, :time) - add(:date_attr, :date) - add(:naive_datetime_attr, :naive_datetime) - add(:uuid, :binary_id) - end - end -end diff --git a/bench/support/repo.exs b/bench/support/repo.exs deleted file mode 100644 index 0322a018..00000000 --- a/bench/support/repo.exs +++ /dev/null @@ -1,43 +0,0 @@ -pg_bench_url = System.get_env("PG_URL") || "postgres:postgres@localhost" -myxql_bench_url = System.get_env("MYXQL_URL") || "root@localhost" - -Application.put_env( - :ecto_sql, - Ecto.Bench.PgRepo, - url: "ecto://" <> pg_bench_url <> "/ecto_test", - adapter: Ecto.Adapters.Postgres, - show_sensitive_data_on_connection_error: true -) - -Application.put_env( - :ecto_sql, - Ecto.Bench.MyXQLRepo, - url: "ecto://" <> myxql_bench_url <> "/ecto_test_myxql", - adapter: Ecto.Adapters.MyXQL, - protocol: :tcp, - show_sensitive_data_on_connection_error: true -) - -Application.put_env( - :ecto_sql, - Ecto.Bench.ExqliteRepo, - adapter: Ecto.Adapters.Exqlite, - database: "/tmp/exqlite_bench.db", - journal_mode: :wal, - cache_size: -64000, - temp_store: :memory, - pool_size: 5, - show_sensitive_data_on_connection_error: true -) - -defmodule Ecto.Bench.PgRepo do - use Ecto.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Postgres, log: false -end - -defmodule Ecto.Bench.MyXQLRepo do - use Ecto.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.MyXQL, log: false -end - -defmodule Ecto.Bench.ExqliteRepo do - use Ecto.Repo, otp_app: :ecto_sql, adapter: Ecto.Adapters.Exqlite, log: false -end diff --git a/bench/support/schemas.exs b/bench/support/schemas.exs deleted file mode 100644 index 9a58173d..00000000 --- a/bench/support/schemas.exs +++ /dev/null @@ -1,52 +0,0 @@ -defmodule Ecto.Bench.User do - use Ecto.Schema - - schema "users" do - field(:name, :string) - field(:email, :string) - field(:password, :string) - field(:time_attr, :time) - field(:date_attr, :date) - field(:naive_datetime_attr, :naive_datetime) - field(:uuid, :binary_id) - end - - @required_attrs [ - :name, - :email, - :password, - :time_attr, - :date_attr, - :naive_datetime_attr, - :uuid - ] - - def changeset() do - changeset(sample_data()) - end - - def changeset(data) do - Ecto.Changeset.cast(%__MODULE__{}, data, @required_attrs) - end - - def sample_data do - %{ - name: "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", - email: "foobar@email.com", - password: "mypass", - time_attr: Time.utc_now() |> Time.truncate(:second), - date_attr: Date.utc_today(), - naive_datetime_attr: NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second), - uuid: Ecto.UUID.generate() - } - end -end - -defmodule Ecto.Bench.Game do - use Ecto.Schema - - schema "games" do - field(:name, :string) - field(:price, :float) - end -end diff --git a/bench/support/setup.exs b/bench/support/setup.exs deleted file mode 100644 index 90eb19b0..00000000 --- a/bench/support/setup.exs +++ /dev/null @@ -1,26 +0,0 @@ -Code.require_file("repo.exs", __DIR__) -Code.require_file("migrations.exs", __DIR__) -Code.require_file("schemas.exs", __DIR__) - -alias Ecto.Bench.{PgRepo, MyXQLRepo, ExqliteRepo, CreateUser} - -{:ok, _} = Ecto.Adapters.Postgres.ensure_all_started(PgRepo.config(), :temporary) -{:ok, _} = Ecto.Adapters.MyXQL.ensure_all_started(MyXQLRepo.config(), :temporary) -{:ok, _} = Ecto.Adapters.Exqlite.ensure_all_started(ExqliteRepo.config(), :temporary) - -_ = Ecto.Adapters.Postgres.storage_down(PgRepo.config()) -:ok = Ecto.Adapters.Postgres.storage_up(PgRepo.config()) - -_ = Ecto.Adapters.MyXQL.storage_down(MyXQLRepo.config()) -:ok = Ecto.Adapters.MyXQL.storage_up(MyXQLRepo.config()) - -_ = Ecto.Adapters.Exqlite.storage_down(ExqliteRepo.config()) -:ok = Ecto.Adapters.Exqlite.storage_up(ExqliteRepo.config()) - -{:ok, _pid} = PgRepo.start_link(log: false) -{:ok, _pid} = MyXQLRepo.start_link(log: false) -{:ok, _pid} = ExqliteRepo.start_link(log: false) - -:ok = Ecto.Migrator.up(PgRepo, 0, CreateUser, log: false) -:ok = Ecto.Migrator.up(MyXQLRepo, 0, CreateUser, log: false) -:ok = Ecto.Migrator.up(ExqliteRepo, 0, CreateUser, log: false) diff --git a/integration_test/exqlite/all_test.exs b/integration_test/exqlite/all_test.exs deleted file mode 100644 index 036ad3b7..00000000 --- a/integration_test/exqlite/all_test.exs +++ /dev/null @@ -1,31 +0,0 @@ -ecto = Mix.Project.deps_paths()[:ecto] - -Code.require_file "#{ecto}/integration_test/cases/assoc.exs", __DIR__ -Code.require_file "#{ecto}/integration_test/cases/joins.exs", __DIR__ -Code.require_file "#{ecto}/integration_test/cases/preload.exs", __DIR__ -Code.require_file "#{ecto}/integration_test/cases/repo.exs", __DIR__ -Code.require_file "#{ecto}/integration_test/cases/windows.exs", __DIR__ - -# Since sqlite does not have microsecond precision we forked these tests -# and added some additionals tests for datetime types -Code.require_file "./ecto/interval.exs", __DIR__ - -# we also added some fixes to their decimal precision tests -# we also added the :like_match_blob tag -Code.require_file "./ecto/type.exs", __DIR__ - - -ecto_sql = Mix.Project.deps_paths()[:ecto_sql] -# Code.require_file "#{ecto_sql}/integration_test/sql/lock.exs", __DIR__ -Code.require_file "#{ecto_sql}/integration_test/sql/logging.exs", __DIR__ -Code.require_file "#{ecto_sql}/integration_test/sql/sandbox.exs", __DIR__ -Code.require_file "#{ecto_sql}/integration_test/sql/sql.exs", __DIR__ -Code.require_file "#{ecto_sql}/integration_test/sql/stream.exs", __DIR__ -Code.require_file "#{ecto_sql}/integration_test/sql/subquery.exs", __DIR__ -Code.require_file "#{ecto_sql}/integration_test/sql/transaction.exs", __DIR__ - -# added :modify_column and :alter_foreign_key -Code.require_file "./ecto_sql/migration.exs", __DIR__ - -# added :prefix and :lock_for_migrations -Code.require_file "./ecto_sql/migrator.exs", __DIR__ diff --git a/integration_test/exqlite/ecto/interval.exs b/integration_test/exqlite/ecto/interval.exs deleted file mode 100644 index cf080b2d..00000000 --- a/integration_test/exqlite/ecto/interval.exs +++ /dev/null @@ -1,423 +0,0 @@ -defmodule Ecto.Integration.IntervalTest do - use Ecto.Integration.Case, async: Application.get_env(:ecto, :async_integration_tests, true) - - alias Ecto.Integration.{Post, User, Usec} - alias Ecto.Integration.TestRepo - import Ecto.Query - - @posted ~D[2014-01-01] - @inserted_at ~N[2014-01-01 02:00:00] - - setup do - TestRepo.insert!(%Post{posted: @posted, inserted_at: @inserted_at}) - :ok - end - - test "date_add with year" do - dec = Decimal.new(1) - assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 1, "year")) - assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 1.0, "year")) - assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^1, "year")) - assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^1.0, "year")) - assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "year")) - end - - test "date_add with month" do - dec = Decimal.new(3) - assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3, "month")) - assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3.0, "month")) - assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3, "month")) - assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3.0, "month")) - assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "month")) - end - - test "date_add with week" do - dec = Decimal.new(3) - assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3, "week")) - assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3.0, "week")) - assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3, "week")) - assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3.0, "week")) - assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "week")) - end - - test "date_add with day" do - dec = Decimal.new(5) - assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, 5, "day")) - assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, 5.0, "day")) - assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^5, "day")) - assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^5.0, "day")) - assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "day")) - end - - test "date_add with hour" do - dec = Decimal.new(48) - assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, 48, "hour")) - assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, 48.0, "hour")) - assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^48, "hour")) - assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^48.0, "hour")) - assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "hour")) - end - - test "date_add with dynamic" do - posted = @posted - assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(^posted, ^1, ^"year")) - assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(^posted, ^3, ^"month")) - assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(^posted, ^3, ^"week")) - assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(^posted, ^5, ^"day")) - assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(^posted, ^48, ^"hour")) - end - - test "date_add with negative interval" do - dec = Decimal.new(-1) - assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, -1, "year")) - assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, -1.0, "year")) - assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^-1, "year")) - assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^-1.0, "year")) - assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "year")) - end - - test "datetime_add with year" do - dec = Decimal.new(1) - assert [~N[2015-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1, "year")) - assert [~N[2015-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1.0, "year")) - assert [~N[2015-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1, "year")) - assert [~N[2015-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1.0, "year")) - assert [~N[2015-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "year")) - end - - test "datetime_add with month" do - dec = Decimal.new(3) - assert [~N[2014-04-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3, "month")) - assert [~N[2014-04-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3.0, "month")) - assert [~N[2014-04-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3, "month")) - assert [~N[2014-04-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3.0, "month")) - assert [~N[2014-04-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "month")) - end - - test "datetime_add with week" do - dec = Decimal.new(3) - assert [~N[2014-01-22 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3, "week")) - assert [~N[2014-01-22 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3.0, "week")) - assert [~N[2014-01-22 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3, "week")) - assert [~N[2014-01-22 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3.0, "week")) - assert [~N[2014-01-22 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "week")) - end - - test "datetime_add with day" do - dec = Decimal.new(5) - assert [~N[2014-01-06 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 5, "day")) - assert [~N[2014-01-06 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 5.0, "day")) - assert [~N[2014-01-06 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^5, "day")) - assert [~N[2014-01-06 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^5.0, "day")) - assert [~N[2014-01-06 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "day")) - end - - test "datetime_add with hour" do - dec = Decimal.new(60) - assert [~N[2014-01-03 14:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 60, "hour")) - assert [~N[2014-01-03 14:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 60.0, "hour")) - assert [~N[2014-01-03 14:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^60, "hour")) - assert [~N[2014-01-03 14:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^60.0, "hour")) - assert [~N[2014-01-03 14:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "hour")) - end - - test "datetime_add with minute" do - dec = Decimal.new(90) - assert [~N[2014-01-01 03:30:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90, "minute")) - assert [~N[2014-01-01 03:30:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90.0, "minute")) - assert [~N[2014-01-01 03:30:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90, "minute")) - assert [~N[2014-01-01 03:30:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90.0, "minute")) - assert [~N[2014-01-01 03:30:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "minute")) - end - - test "datetime_add with second" do - dec = Decimal.new(90) - assert [~N[2014-01-01 02:01:30]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90, "second")) - assert [~N[2014-01-01 02:01:30]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90.0, "second")) - assert [~N[2014-01-01 02:01:30]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90, "second")) - assert [~N[2014-01-01 02:01:30]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90.0, "second")) - assert [~N[2014-01-01 02:01:30]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "second")) - end - - @tag :uses_msec - test "datetime_add with millisecond" do - dec = Decimal.new(1500) - assert [~N[2014-01-01 02:00:01]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500, "millisecond")) - assert [~N[2014-01-01 02:00:01]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500.0, "millisecond")) - assert [~N[2014-01-01 02:00:01]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500, "millisecond")) - assert [~N[2014-01-01 02:00:01]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500.0, "millisecond")) - assert [~N[2014-01-01 02:00:01]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "millisecond")) - end - - @tag :microsecond_precision - @tag :uses_usec - test "datetime_add with microsecond" do - dec = Decimal.new(1500) - assert [~N[2014-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500, "microsecond")) - assert [~N[2014-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500.0, "microsecond")) - assert [~N[2014-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500, "microsecond")) - assert [~N[2014-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500.0, "microsecond")) - assert [~N[2014-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "microsecond")) - end - - test "datetime_add with dynamic" do - inserted_at = @inserted_at - assert [~N[2015-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^1, ^"year")) - assert [~N[2014-04-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^3, ^"month")) - assert [~N[2014-01-22 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^3, ^"week")) - assert [~N[2014-01-06 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^5, ^"day")) - assert [~N[2014-01-03 14:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^60, ^"hour")) - assert [~N[2014-01-01 03:30:00]] = - TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^90, ^"minute")) - assert [~N[2014-01-01 02:01:30]] = - TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^90, ^"second")) - end - - test "datetime_add with dynamic in filters" do - inserted_at = @inserted_at - field = :inserted_at - assert [_] = - TestRepo.all(from p in Post, where: p.inserted_at > datetime_add(^inserted_at, ^-1, "year")) - assert [_] = - TestRepo.all(from p in Post, where: p.inserted_at > datetime_add(^inserted_at, -3, "month")) - assert [_] = - TestRepo.all(from p in Post, where: field(p, ^field) > datetime_add(^inserted_at, ^-3, ^"week")) - assert [_] = - TestRepo.all(from p in Post, where: field(p, ^field) > datetime_add(^inserted_at, -5, ^"day")) - end - - test "datetime_add with negative interval" do - dec = Decimal.new(-1) - assert [~N[2013-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, -1, "year")) - assert [~N[2013-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, -1.0, "year")) - assert [~N[2013-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^-1, "year")) - assert [~N[2013-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^-1.0, "year")) - assert [~N[2013-01-01 02:00:00]] = - TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "year")) - end - - test "from_now" do - current = DateTime.utc_now().year - dec = Decimal.new(5) - assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(5, "year")) - assert year > current - assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(5.0, "year")) - assert year > current - assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(^5, "year")) - assert year > current - assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(^5.0, "year")) - assert year > current - assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(^dec, "year")) - assert year > current - end - - test "ago" do - current = DateTime.utc_now().year - dec = Decimal.new(5) - assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(5, "year")) - assert year < current - assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(5.0, "year")) - assert year < current - assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(^5, "year")) - assert year < current - assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(^5.0, "year")) - assert year < current - assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(^dec, "year")) - assert year < current - end - - test "datetime_add with utc_datetime" do - {:ok, datetime} = DateTime.from_naive(@inserted_at, "Etc/UTC") - TestRepo.insert!(%User{inserted_at: datetime}) - - {:ok, datetime} = DateTime.from_naive(~N[2015-01-01 02:00:00], "Etc/UTC") - dec = Decimal.new(1) - - assert [^datetime] = - TestRepo.all(from p in User, select: datetime_add(type(^datetime, :utc_datetime), 0, "year")) - assert [^datetime] = - TestRepo.all(from p in User, select: datetime_add(p.inserted_at, 1, "year")) - assert [^datetime] = - TestRepo.all(from p in User, select: datetime_add(p.inserted_at, 1.0, "year")) - assert [^datetime] = - TestRepo.all(from p in User, select: datetime_add(p.inserted_at, ^1, "year")) - assert [^datetime] = - TestRepo.all(from p in User, select: datetime_add(p.inserted_at, ^1.0, "year")) - assert [^datetime] = - TestRepo.all(from p in User, select: datetime_add(p.inserted_at, ^dec, "year")) - end - - @tag :microsecond_precision - test "datetime_add with naive_datetime_usec" do - TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.000001]}) - datetime = ~N[2014-01-01 02:00:00.001501] - - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :naive_datetime_usec), 0, "microsecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 1500, "microsecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 1500.0, "microsecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^1500, "microsecond")) - end - - @tag :microsecond_precision - @tag :decimal_precision - test "datetime_add with naive_datetime_usec and decimal increment" do - TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.000001]}) - dec = Decimal.new(1500) - datetime = ~N[2014-01-01 02:00:00.001501] - - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^1500.0, "microsecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^dec, "microsecond")) - end - - - @tag :microsecond_precision - test "datetime_add with utc_datetime_usec" do - {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.000001], "Etc/UTC") - TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) - - {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001501], "Etc/UTC") - - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :utc_datetime_usec), 0, "microsecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 1500, "microsecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 1500.0, "microsecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^1500, "microsecond")) - end - - @tag :microsecond_precision - @tag :decimal_precision - test "datetime_add uses utc_datetime_usec with decimal increment" do - {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.000001], "Etc/UTC") - TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) - - {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001501], "Etc/UTC") - dec = Decimal.new(1500) - - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^1500.0, "microsecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^dec, "microsecond")) - end - - - test "datetime_add with utc_datetime_usec in milliseconds" do - {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001000], "Etc/UTC") - TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) - - {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.151000], "Etc/UTC") - - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :utc_datetime_usec), 0, "millisecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 150, "millisecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 150, "millisecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^150, "millisecond")) - end - - @tag :decimal_precision - test "datetime_add uses utc_datetime_usec with decimal increment in milliseconds" do - {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001000], "Etc/UTC") - TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) - - {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.151000], "Etc/UTC") - dec = Decimal.new(150) - - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^150.0, "millisecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^dec, "millisecond")) - end - - - @tag :decimal_precision - test "datetime_add with naive_datetime_usec in milliseconds" do - TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.001000]}) - datetime = ~N[2014-01-01 02:00:00.151000] - - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :naive_datetime_usec), 0, "millisecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 150, "millisecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 150.0, "millisecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^150, "millisecond")) - end - - @tag :decimal_precision - test "datetime_add with naive_datetime_usec and decimal increment in milliseconds" do - TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.001000]}) - dec = Decimal.new(150) - datetime = ~N[2014-01-01 02:00:00.151000] - - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^150.0, "millisecond")) - assert [^datetime] = - TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^dec, "millisecond")) - end -end diff --git a/integration_test/exqlite/ecto/type.exs b/integration_test/exqlite/ecto/type.exs deleted file mode 100644 index e0b878cf..00000000 --- a/integration_test/exqlite/ecto/type.exs +++ /dev/null @@ -1,492 +0,0 @@ -defmodule Ecto.Integration.TypeTest do - use Ecto.Integration.Case, async: Application.get_env(:ecto, :async_integration_tests, true) - - alias Ecto.Integration.{Custom, Item, ItemColor, Order, Post, User, Tag, Usec} - alias Ecto.Integration.TestRepo - import Ecto.Query - - test "primitive types" do - integer = 1 - float = 0.1 - blob = <<0, 1>> - uuid = "00010203-0405-4607-8809-0a0b0c0d0e0f" - datetime = ~N[2014-01-16 20:26:51] - - TestRepo.insert!(%Post{blob: blob, public: true, visits: integer, uuid: uuid, - counter: integer, inserted_at: datetime, intensity: float}) - - # nil - assert [nil] = TestRepo.all(from Post, select: nil) - - # ID - assert [1] = TestRepo.all(from p in Post, where: p.counter == ^integer, select: p.counter) - - # Integers - assert [1] = TestRepo.all(from p in Post, where: p.visits == ^integer, select: p.visits) - assert [1] = TestRepo.all(from p in Post, where: p.visits == 1, select: p.visits) - assert [3] = TestRepo.all(from p in Post, select: p.visits + 2) - - # Floats - assert [0.1] = TestRepo.all(from p in Post, where: p.intensity == ^float, select: p.intensity) - assert [0.1] = TestRepo.all(from p in Post, where: p.intensity == 0.1, select: p.intensity) - assert [1500.0] = TestRepo.all(from p in Post, select: 1500.0) - assert [0.5] = TestRepo.all(from p in Post, select: p.intensity * 5) - - # Booleans - assert [true] = TestRepo.all(from p in Post, where: p.public == ^true, select: p.public) - assert [true] = TestRepo.all(from p in Post, where: p.public == true, select: p.public) - - # Binaries - assert [^blob] = TestRepo.all(from p in Post, where: p.blob == <<0, 1>>, select: p.blob) - assert [^blob] = TestRepo.all(from p in Post, where: p.blob == ^blob, select: p.blob) - - # UUID - assert [^uuid] = TestRepo.all(from p in Post, where: p.uuid == ^uuid, select: p.uuid) - - # NaiveDatetime - assert [^datetime] = TestRepo.all(from p in Post, where: p.inserted_at == ^datetime, select: p.inserted_at) - - # Datetime - datetime = DateTime.from_unix!(System.os_time(:second), :second) - TestRepo.insert!(%User{inserted_at: datetime}) - assert [^datetime] = TestRepo.all(from u in User, where: u.inserted_at == ^datetime, select: u.inserted_at) - - # usec - naive_datetime = ~N[2014-01-16 20:26:51.000000] - datetime = DateTime.from_naive!(~N[2014-01-16 20:26:51.000000], "Etc/UTC") - TestRepo.insert!(%Usec{naive_datetime_usec: naive_datetime, utc_datetime_usec: datetime}) - assert [^naive_datetime] = TestRepo.all(from u in Usec, where: u.naive_datetime_usec == ^naive_datetime, select: u.naive_datetime_usec) - assert [^datetime] = TestRepo.all(from u in Usec, where: u.utc_datetime_usec == ^datetime, select: u.utc_datetime_usec) - - naive_datetime = ~N[2014-01-16 20:26:51.123000] - datetime = DateTime.from_naive!(~N[2014-01-16 20:26:51.123000], "Etc/UTC") - TestRepo.insert!(%Usec{naive_datetime_usec: naive_datetime, utc_datetime_usec: datetime}) - assert [^naive_datetime] = TestRepo.all(from u in Usec, where: u.naive_datetime_usec == ^naive_datetime, select: u.naive_datetime_usec) - assert [^datetime] = TestRepo.all(from u in Usec, where: u.utc_datetime_usec == ^datetime, select: u.utc_datetime_usec) - end - - @tag :select_not - test "primitive types boolean negate" do - TestRepo.insert!(%Post{public: true}) - assert [false] = TestRepo.all(from p in Post, where: p.public == true, select: not p.public) - assert [true] = TestRepo.all(from p in Post, where: p.public == true, select: not not p.public) - end - - test "aggregate types" do - datetime = ~N[2014-01-16 20:26:51] - TestRepo.insert!(%Post{inserted_at: datetime}) - query = from p in Post, select: max(p.inserted_at) - assert [^datetime] = TestRepo.all(query) - end - - # We don't specifically assert on the tuple content because - # some databases would return integer, others decimal. - # The important is that the type has been invoked for wrapping. - test "aggregate custom types" do - TestRepo.insert!(%Post{wrapped_visits: {:int, 10}}) - query = from p in Post, select: sum(p.wrapped_visits) - assert [{:int, _}] = TestRepo.all(query) - end - - @tag :aggregate_filters - test "aggregate filter types" do - datetime = ~N[2014-01-16 20:26:51] - TestRepo.insert!(%Post{inserted_at: datetime}) - query = from p in Post, select: filter(max(p.inserted_at), p.public == ^true) - assert [^datetime] = TestRepo.all(query) - end - - test "coalesce text type when default" do - TestRepo.insert!(%Post{blob: nil}) - blob = <<0, 1>> - query = from p in Post, select: coalesce(p.blob, ^blob) - assert [^blob] = TestRepo.all(query) - end - - test "coalesce text type when value" do - blob = <<0, 2>> - default_blob = <<0, 1>> - TestRepo.insert!(%Post{blob: blob}) - query = from p in Post, select: coalesce(p.blob, ^default_blob) - assert [^blob] = TestRepo.all(query) - end - - test "tagged types" do - TestRepo.insert!(%Post{}) - - # Numbers - assert [1] = TestRepo.all(from Post, select: type(^"1", :integer)) - assert [1.0] = TestRepo.all(from Post, select: type(^1.0, :float)) - assert [1] = TestRepo.all(from p in Post, select: type(^"1", p.visits)) - assert [1.0] = TestRepo.all(from p in Post, select: type(^"1", p.intensity)) - - # Custom wrappers - assert [1] = TestRepo.all(from Post, select: type(^"1", CustomPermalink)) - - # Custom types - uuid = Ecto.UUID.generate() - assert [^uuid] = TestRepo.all(from Post, select: type(^uuid, Ecto.UUID)) - - # Math operations - assert [4] = TestRepo.all(from Post, select: type(2 + ^"2", :integer)) - assert [4.0] = TestRepo.all(from Post, select: type(2.0 + ^"2", :float)) - assert [4] = TestRepo.all(from p in Post, select: type(2 + ^"2", p.visits)) - assert [4.0] = TestRepo.all(from p in Post, select: type(2.0 + ^"2", p.intensity)) - end - - test "binary id type" do - assert %Custom{} = custom = TestRepo.insert!(%Custom{}) - bid = custom.bid - assert [^bid] = TestRepo.all(from c in Custom, select: c.bid) - assert [^bid] = TestRepo.all(from c in Custom, select: type(^bid, :binary_id)) - end - - @tag :like_match_blob - test "text type as blob" do - assert %Post{} = post = TestRepo.insert!(%Post{blob: <<0, 1, 2>>}) - id = post.id - assert post.blob == <<0, 1, 2>> - assert [^id] = TestRepo.all(from p in Post, where: like(p.blob, ^<<0, 1, 2>>), select: p.id) - end - - @tag :like_match_blob - @tag :text_type_as_string - test "text type as string" do - assert %Post{} = post = TestRepo.insert!(%Post{blob: "hello"}) - id = post.id - assert post.blob == "hello" - assert [^id] = TestRepo.all(from p in Post, where: like(p.blob, ^"hello"), select: p.id) - end - - @tag :array_type - test "array type" do - ints = [1, 2, 3] - tag = TestRepo.insert!(%Tag{ints: ints}) - - assert TestRepo.all(from t in Tag, where: t.ints == ^[], select: t.ints) == [] - assert TestRepo.all(from t in Tag, where: t.ints == ^[1, 2, 3], select: t.ints) == [ints] - - # Both sides interpolation - assert TestRepo.all(from t in Tag, where: ^"b" in ^["a", "b", "c"], select: t.ints) == [ints] - assert TestRepo.all(from t in Tag, where: ^"b" in [^"a", ^"b", ^"c"], select: t.ints) == [ints] - - # Querying - assert TestRepo.all(from t in Tag, where: t.ints == [1, 2, 3], select: t.ints) == [ints] - assert TestRepo.all(from t in Tag, where: 0 in t.ints, select: t.ints) == [] - assert TestRepo.all(from t in Tag, where: 1 in t.ints, select: t.ints) == [ints] - - # Update - tag = TestRepo.update!(Ecto.Changeset.change tag, ints: nil) - assert TestRepo.get!(Tag, tag.id).ints == nil - - tag = TestRepo.update!(Ecto.Changeset.change tag, ints: [3, 2, 1]) - assert TestRepo.get!(Tag, tag.id).ints == [3, 2, 1] - - # Update all - {1, _} = TestRepo.update_all(Tag, push: [ints: 0]) - assert TestRepo.get!(Tag, tag.id).ints == [3, 2, 1, 0] - - {1, _} = TestRepo.update_all(Tag, pull: [ints: 2]) - assert TestRepo.get!(Tag, tag.id).ints == [3, 1, 0] - - {1, _} = TestRepo.update_all(Tag, set: [ints: nil]) - assert TestRepo.get!(Tag, tag.id).ints == nil - end - - @tag :array_type - test "array type with custom types" do - uuids = ["51fcfbdd-ad60-4ccb-8bf9-47aabd66d075"] - TestRepo.insert!(%Tag{uuids: ["51fcfbdd-ad60-4ccb-8bf9-47aabd66d075"]}) - - assert TestRepo.all(from t in Tag, where: t.uuids == ^[], select: t.uuids) == [] - assert TestRepo.all(from t in Tag, where: t.uuids == ^["51fcfbdd-ad60-4ccb-8bf9-47aabd66d075"], - select: t.uuids) == [uuids] - - {1, _} = TestRepo.update_all(Tag, set: [uuids: nil]) - assert TestRepo.all(from t in Tag, select: t.uuids) == [nil] - end - - @tag :array_type - test "array type with nil in array" do - tag = TestRepo.insert!(%Tag{ints: [1, nil, 3]}) - assert tag.ints == [1, nil, 3] - end - - @tag :map_type - test "untyped map" do - post1 = TestRepo.insert!(%Post{meta: %{"foo" => "bar", "baz" => "bat"}}) - post2 = TestRepo.insert!(%Post{meta: %{foo: "bar", baz: "bat"}}) - - assert TestRepo.all(from p in Post, where: p.id == ^post1.id, select: p.meta) == - [%{"foo" => "bar", "baz" => "bat"}] - assert TestRepo.all(from p in Post, where: p.id == ^post2.id, select: p.meta) == - [%{"foo" => "bar", "baz" => "bat"}] - end - - @tag :map_type - test "typed string map" do - post1 = TestRepo.insert!(%Post{links: %{"foo" => "http://foo.com", "bar" => "http://bar.com"}}) - post2 = TestRepo.insert!(%Post{links: %{foo: "http://foo.com", bar: "http://bar.com"}}) - - assert TestRepo.all(from p in Post, where: p.id == ^post1.id, select: p.links) == - [%{"foo" => "http://foo.com", "bar" => "http://bar.com"}] - assert TestRepo.all(from p in Post, where: p.id == ^post2.id, select: p.links) == - [%{"foo" => "http://foo.com", "bar" => "http://bar.com"}] - end - - @tag :map_type - test "typed float map" do - post = TestRepo.insert!(%Post{intensities: %{"foo" => 1.0, "bar" => 416500.0}}) - - # Note we are using === since we want to check integer vs float - assert TestRepo.all(from p in Post, where: p.id == ^post.id, select: p.intensities) === - [%{"foo" => 1.0, "bar" => 416500.0}] - end - - @tag :map_type - test "map type on update" do - post = TestRepo.insert!(%Post{meta: %{"world" => "hello"}}) - assert TestRepo.get!(Post, post.id).meta == %{"world" => "hello"} - - post = TestRepo.update!(Ecto.Changeset.change post, meta: %{hello: "world"}) - assert TestRepo.get!(Post, post.id).meta == %{"hello" => "world"} - - query = from(p in Post, where: p.id == ^post.id) - TestRepo.update_all(query, set: [meta: %{world: "hello"}]) - assert TestRepo.get!(Post, post.id).meta == %{"world" => "hello"} - end - - @tag :map_type - test "embeds one" do - item = %Item{price: 123, valid_at: ~D[2014-01-16]} - - order = - %Order{} - |> Ecto.Changeset.change - |> Ecto.Changeset.put_embed(:item, item) - |> TestRepo.insert!() - - dbitem = TestRepo.get!(Order, order.id).item - assert item.reference == dbitem.reference - assert item.price == dbitem.price - assert item.valid_at == dbitem.valid_at - assert dbitem.id - - [dbitem] = TestRepo.all(from o in Order, select: o.item) - assert item.reference == dbitem.reference - assert item.price == dbitem.price - assert item.valid_at == dbitem.valid_at - assert dbitem.id - - {1, _} = TestRepo.update_all(Order, set: [item: %{dbitem | price: 456}]) - assert TestRepo.get!(Order, order.id).item.price == 456 - end - - @tag :map_type - @tag :json_extract_path - test "json_extract_path with primitive values" do - order = %Order{meta: %{:id => 123, :time => ~T[09:00:00], "'single quoted'" => "bar", "\"double quoted\"" => "baz"}} - TestRepo.insert!(order) - - assert TestRepo.one(from o in Order, select: o.meta["id"]) == 123 - assert TestRepo.one(from o in Order, select: o.meta["bad"]) == nil - assert TestRepo.one(from o in Order, select: o.meta["bad"]["bad"]) == nil - - field = "id" - assert TestRepo.one(from o in Order, select: o.meta[^field]) == 123 - assert TestRepo.one(from o in Order, select: o.meta["time"]) == "09:00:00" - assert TestRepo.one(from o in Order, select: o.meta["'single quoted'"]) == "bar" - assert TestRepo.one(from o in Order, select: o.meta["';"]) == nil - assert TestRepo.one(from o in Order, select: o.meta["\"double quoted\""]) == "baz" - end - - @tag :map_type - @tag :json_extract_path - test "json_extract_path with arrays and objects" do - order = %Order{meta: %{tags: [%{name: "red"}, %{name: "green"}]}} - TestRepo.insert!(order) - - assert TestRepo.one(from o in Order, select: o.meta["tags"][0]["name"]) == "red" - assert TestRepo.one(from o in Order, select: o.meta["tags"][99]["name"]) == nil - - index = 1 - assert TestRepo.one(from o in Order, select: o.meta["tags"][^index]["name"]) == "green" - end - - @tag :map_type - @tag :json_extract_path - test "json_extract_path with embeds" do - order = %Order{items: [%{valid_at: ~D[2020-01-01]}]} - TestRepo.insert!(order) - - assert TestRepo.one(from o in Order, select: o.items[0]["valid_at"]) == "2020-01-01" - end - - @tag :map_type - @tag :map_type_schemaless - test "embeds one with custom type" do - item = %Item{price: 123, reference: "PREFIX-EXAMPLE"} - - order = - %Order{} - |> Ecto.Changeset.change - |> Ecto.Changeset.put_embed(:item, item) - |> TestRepo.insert!() - - dbitem = TestRepo.get!(Order, order.id).item - assert dbitem.reference == "PREFIX-EXAMPLE" - assert [%{"reference" => "EXAMPLE"}] = TestRepo.all(from o in "orders", select: o.item) - end - - @tag :map_type - test "empty embeds one" do - order = TestRepo.insert!(%Order{}) - assert order.item == nil - assert TestRepo.get!(Order, order.id).item == nil - end - - @tag :map_type - @tag :array_type - test "embeds many" do - item = %Item{price: 123, valid_at: ~D[2014-01-16]} - tag = - %Tag{} - |> Ecto.Changeset.change - |> Ecto.Changeset.put_embed(:items, [item]) - tag = TestRepo.insert!(tag) - - [dbitem] = TestRepo.get!(Tag, tag.id).items - assert item.price == dbitem.price - assert item.valid_at == dbitem.valid_at - assert dbitem.id - - [[dbitem]] = TestRepo.all(from t in Tag, select: t.items) - assert item.price == dbitem.price - assert item.valid_at == dbitem.valid_at - assert dbitem.id - - {1, _} = TestRepo.update_all(Tag, set: [items: [%{dbitem | price: 456}]]) - assert (TestRepo.get!(Tag, tag.id).items |> hd).price == 456 - end - - @tag :map_type - @tag :array_type - test "empty embeds many" do - tag = TestRepo.insert!(%Tag{}) - assert tag.items == [] - assert TestRepo.get!(Tag, tag.id).items == [] - end - - @tag :map_type - @tag :array_type - test "nested embeds" do - red = %ItemColor{name: "red"} - blue = %ItemColor{name: "blue"} - item = %Item{ - primary_color: red, - secondary_colors: [blue] - } - - order = - %Order{} - |> Ecto.Changeset.change - |> Ecto.Changeset.put_embed(:item, item) - order = TestRepo.insert!(order) - - dbitem = TestRepo.get!(Order, order.id).item - assert dbitem.primary_color.name == "red" - assert Enum.map(dbitem.secondary_colors, & &1.name) == ["blue"] - assert dbitem.id - assert dbitem.primary_color.id - - [dbitem] = TestRepo.all(from o in Order, select: o.item) - assert dbitem.primary_color.name == "red" - assert Enum.map(dbitem.secondary_colors, & &1.name) == ["blue"] - assert dbitem.id - assert dbitem.primary_color.id - end - - @tag :decimal_type - test "decimal type" do - decimal = Decimal.new("1.0") - TestRepo.insert!(%Post{cost: decimal}) - - [cost] = TestRepo.all(from p in Post, where: p.cost == ^decimal, select: p.cost) - assert Decimal.equal?(decimal, cost) - [cost] = TestRepo.all(from p in Post, where: p.cost == ^1.0, select: p.cost) - assert Decimal.equal?(decimal, cost) - [cost] = TestRepo.all(from p in Post, where: p.cost == ^1, select: p.cost) - assert Decimal.equal?(decimal, cost) - [cost] = TestRepo.all(from p in Post, where: p.cost == 1.0, select: p.cost) - assert Decimal.equal?(decimal, cost) - [cost] = TestRepo.all(from p in Post, where: p.cost == 1, select: p.cost) - assert Decimal.equal?(decimal, cost) - [cost] = TestRepo.all(from p in Post, select: p.cost * 2) - assert Decimal.equal?(Decimal.new("2.0"), cost) - [cost] = TestRepo.all(from p in Post, select: p.cost - p.cost) - assert Decimal.equal?(Decimal.new("0.0"), cost) - end - - @tag :decimal_type - @tag :decimal_precision - test "decimal typed aggregations" do - decimal = Decimal.new("1.0") - TestRepo.insert!(%Post{cost: decimal}) - - assert [1] = TestRepo.all(from p in Post, select: type(sum(p.cost), :integer)) - assert [1.0] = TestRepo.all(from p in Post, select: type(sum(p.cost), :float)) - [cost] = TestRepo.all(from p in Post, select: type(sum(p.cost), :decimal)) - assert Decimal.equal?(decimal, cost) - end - - @tag :decimal_type - test "on coalesce with mixed types" do - decimal = Decimal.new("1.0") - TestRepo.insert!(%Post{cost: decimal}) - [cost] = TestRepo.all(from p in Post, select: coalesce(p.cost, 0)) - assert Decimal.equal?(decimal, cost) - end - - @tag :union_with_literals - test "unions with literals" do - TestRepo.insert!(%Post{}) - TestRepo.insert!(%Post{}) - - query1 = from(p in Post, select: %{n: 1}) - query2 = from(p in Post, select: %{n: 2}) - - assert TestRepo.all(union_all(query1, ^query2)) == - [%{n: 1}, %{n: 1}, %{n: 2}, %{n: 2}] - - query1 = from(p in Post, select: %{n: 1.0}) - query2 = from(p in Post, select: %{n: 2.0}) - - assert TestRepo.all(union_all(query1, ^query2)) == - [%{n: 1.0}, %{n: 1.0}, %{n: 2.0}, %{n: 2.0}] - - query1 = from(p in Post, select: %{n: "foo"}) - query2 = from(p in Post, select: %{n: "bar"}) - - assert TestRepo.all(union_all(query1, ^query2)) == - [%{n: "foo"}, %{n: "foo"}, %{n: "bar"}, %{n: "bar"}] - end - - test "schemaless types" do - TestRepo.insert!(%Post{visits: 123}) - assert [123] = TestRepo.all(from p in "posts", select: type(p.visits, :integer)) - end - - test "schemaless calendar types" do - datetime = ~N[2014-01-16 20:26:51] - assert {1, _} = - TestRepo.insert_all("posts", [[inserted_at: datetime]]) - assert {1, _} = - TestRepo.update_all("posts", set: [inserted_at: datetime]) - assert [_] = - TestRepo.all(from p in "posts", where: p.inserted_at >= ^datetime, select: p.inserted_at) - assert [_] = - TestRepo.all(from p in "posts", where: p.inserted_at in [^datetime], select: p.inserted_at) - assert [_] = - TestRepo.all(from p in "posts", where: p.inserted_at in ^[datetime], select: p.inserted_at) - end -end diff --git a/integration_test/exqlite/ecto_sql/migration.exs b/integration_test/exqlite/ecto_sql/migration.exs deleted file mode 100644 index 582078ee..00000000 --- a/integration_test/exqlite/ecto_sql/migration.exs +++ /dev/null @@ -1,623 +0,0 @@ -defmodule Ecto.Integration.MigrationTest do - use ExUnit.Case, async: true - - alias Ecto.Integration.{TestRepo, PoolRepo} - - defmodule CreateMigration do - use Ecto.Migration - - @table table(:create_table_migration) - @index index(:create_table_migration, [:value], unique: true) - - def up do - create @table do - add :value, :integer - end - create @index - end - - def down do - drop @index - drop @table - end - end - - defmodule AddColumnMigration do - use Ecto.Migration - - def up do - create table(:add_col_migration) do - add :value, :integer - end - - alter table(:add_col_migration) do - add :to_be_added, :integer - end - - execute "INSERT INTO add_col_migration (value, to_be_added) VALUES (1, 2)" - end - - def down do - drop table(:add_col_migration) - end - end - - defmodule AlterColumnMigration do - use Ecto.Migration - - def up do - create table(:alter_col_migration) do - add :from_null_to_not_null, :integer - add :from_not_null_to_null, :integer, null: false - - add :from_default_to_no_default, :integer, default: 0 - add :from_no_default_to_default, :integer - end - - alter table(:alter_col_migration) do - modify :from_null_to_not_null, :string, null: false - modify :from_not_null_to_null, :string, null: true - - modify :from_default_to_no_default, :integer, default: nil - modify :from_no_default_to_default, :integer, default: 0 - end - - execute "INSERT INTO alter_col_migration (from_null_to_not_null) VALUES ('foo')" - end - - def down do - drop table(:alter_col_migration) - end - end - - defmodule AlterColumnFromMigration do - use Ecto.Migration - - def change do - create table(:modify_from_products) do - add :value, :integer - end - - if direction() == :up do - flush() - PoolRepo.insert_all "modify_from_products", [[value: 1]] - end - - alter table(:modify_from_products) do - modify :value, :bigint, from: :integer - end - end - end - - defmodule AlterColumnFromPkeyMigration do - use Ecto.Migration - - def change do - create table(:modify_from_authors, primary_key: false) do - add :id, :integer, primary_key: true - end - create table(:modify_from_posts) do - add :author_id, references(:modify_from_authors, type: :integer) - end - - if direction() == :up do - flush() - PoolRepo.insert_all "modify_from_authors", [[id: 1]] - PoolRepo.insert_all "modify_from_posts", [[author_id: 1]] - end - - alter table(:modify_from_posts) do - # remove the constraints modify_from_posts_author_id_fkey - modify :author_id, :integer, from: references(:modify_from_authors, type: :integer) - end - alter table(:modify_from_authors) do - modify :id, :bigint, from: :integer - end - alter table(:modify_from_posts) do - # add the constraints modify_from_posts_author_id_fkey - modify :author_id, references(:modify_from_authors, type: :bigint), from: :integer - end - end - end - - defmodule AlterForeignKeyOnDeleteMigration do - use Ecto.Migration - - def up do - create table(:alter_fk_users) - - create table(:alter_fk_posts) do - add :alter_fk_user_id, :id - end - - alter table(:alter_fk_posts) do - modify :alter_fk_user_id, references(:alter_fk_users, on_delete: :nilify_all) - end - end - - def down do - drop table(:alter_fk_posts) - drop table(:alter_fk_users) - end - end - - defmodule AlterForeignKeyOnUpdateMigration do - use Ecto.Migration - - def up do - create table(:alter_fk_users) - - create table(:alter_fk_posts) do - add :alter_fk_user_id, :id - end - - alter table(:alter_fk_posts) do - modify :alter_fk_user_id, references(:alter_fk_users, on_update: :update_all) - end - end - - def down do - drop table(:alter_fk_posts) - drop table(:alter_fk_users) - end - end - - defmodule DropColumnMigration do - use Ecto.Migration - - def up do - create table(:drop_col_migration) do - add :value, :integer - add :to_be_removed, :integer - end - - execute "INSERT INTO drop_col_migration (value, to_be_removed) VALUES (1, 2)" - - alter table(:drop_col_migration) do - remove :to_be_removed - end - end - - def down do - drop table(:drop_col_migration) - end - end - - defmodule RenameColumnMigration do - use Ecto.Migration - - def up do - create table(:rename_col_migration) do - add :to_be_renamed, :integer - end - - rename table(:rename_col_migration), :to_be_renamed, to: :was_renamed - - execute "INSERT INTO rename_col_migration (was_renamed) VALUES (1)" - end - - def down do - drop table(:rename_col_migration) - end - end - - defmodule OnDeleteMigration do - use Ecto.Migration - - def up do - create table(:parent1) - create table(:parent2) - - create table(:ref_migration) do - add :parent1, references(:parent1, on_delete: :nilify_all) - end - - alter table(:ref_migration) do - add :parent2, references(:parent2, on_delete: :delete_all) - end - end - - def down do - drop table(:ref_migration) - drop table(:parent1) - drop table(:parent2) - end - end - - defmodule CompositeForeignKeyMigration do - use Ecto.Migration - - def change do - create table(:composite_parent) do - add :key_id, :integer - end - - create unique_index(:composite_parent, [:id, :key_id]) - - create table(:composite_child) do - add :parent_key_id, :integer - add :parent_id, references(:composite_parent, with: [parent_key_id: :key_id]) - end - end - end - - defmodule ReferencesRollbackMigration do - use Ecto.Migration - - def change do - create table(:parent) do - add :name, :string - end - - create table(:child) do - add :parent_id, references(:parent) - end - end - end - - defmodule RenameMigration do - use Ecto.Migration - - @table_current table(:posts_migration) - @table_new table(:new_posts_migration) - - def up do - create @table_current - rename @table_current, to: @table_new - end - - def down do - drop @table_new - end - end - - defmodule PrefixMigration do - use Ecto.Migration - - @prefix "ecto_prefix_test" - - def up do - execute TestRepo.create_prefix(@prefix) - create table(:first, prefix: @prefix) - create table(:second, prefix: @prefix) do - add :first_id, references(:first) - end - end - - def down do - drop table(:second, prefix: @prefix) - drop table(:first, prefix: @prefix) - execute TestRepo.drop_prefix(@prefix) - end - end - - defmodule NoSQLMigration do - use Ecto.Migration - - def up do - create table(:collection, options: [capped: true]) - execute create: "collection" - end - end - - defmodule Parent do - use Ecto.Schema - - schema "parent" do - end - end - - defmodule NoErrorTableMigration do - use Ecto.Migration - - def change do - create_if_not_exists table(:existing) do - add :name, :string - end - - create_if_not_exists table(:existing) do - add :name, :string - end - - create_if_not_exists table(:existing) - - drop_if_exists table(:existing) - drop_if_exists table(:existing) - end - end - - defmodule NoErrorIndexMigration do - use Ecto.Migration - - def change do - create_if_not_exists index(:posts, [:title]) - create_if_not_exists index(:posts, [:title]) - drop_if_exists index(:posts, [:title]) - drop_if_exists index(:posts, [:title]) - end - end - - defmodule InferredDropIndexMigration do - use Ecto.Migration - - def change do - create index(:posts, [:title]) - end - end - - defmodule AlterPrimaryKeyMigration do - use Ecto.Migration - - def change do - create table(:no_pk, primary_key: false) do - add :dummy, :string - end - alter table(:no_pk) do - add :id, :serial, primary_key: true - end - end - end - - - defmodule AddColumnIfNotExistsMigration do - use Ecto.Migration - - def up do - create table(:add_col_if_not_exists_migration) - - alter table(:add_col_if_not_exists_migration) do - add_if_not_exists :value, :integer - add_if_not_exists :to_be_added, :integer - end - - execute "INSERT INTO add_col_if_not_exists_migration (value, to_be_added) VALUES (1, 2)" - end - - def down do - drop table(:add_col_if_not_exists_migration) - end - end - - defmodule DropColumnIfExistsMigration do - use Ecto.Migration - - def up do - create table(:drop_col_if_exists_migration) do - add :value, :integer - add :to_be_removed, :integer - end - - execute "INSERT INTO drop_col_if_exists_migration (value, to_be_removed) VALUES (1, 2)" - - alter table(:drop_col_if_exists_migration) do - remove_if_exists :to_be_removed, :integer - end - end - - def down do - drop table(:drop_col_if_exists_migration) - end - end - - defmodule NoErrorOnConditionalColumnMigration do - use Ecto.Migration - - def up do - create table(:no_error_on_conditional_column_migration) - - alter table(:no_error_on_conditional_column_migration) do - add_if_not_exists :value, :integer - add_if_not_exists :value, :integer - - remove_if_exists :value, :integer - remove_if_exists :value, :integer - end - end - - def down do - drop table(:no_error_on_conditional_column_migration) - end - end - - import Ecto.Query, only: [from: 2] - import Ecto.Migrator, only: [up: 4, down: 4] - - # Avoid migration out of order warnings - @moduletag :capture_log - @base_migration 1_000_000 - - setup do - {:ok, migration_number: System.unique_integer([:positive]) + @base_migration} - end - - test "create and drop table and indexes", %{migration_number: num} do - assert :ok == up(PoolRepo, num, CreateMigration, log: false) - assert :ok == down(PoolRepo, num, CreateMigration, log: false) - end - - test "correctly infers how to drop index", %{migration_number: num} do - assert :ok == up(PoolRepo, num, InferredDropIndexMigration, log: false) - assert :ok == down(PoolRepo, num, InferredDropIndexMigration, log: false) - end - - test "supports on delete", %{migration_number: num} do - assert :ok == up(PoolRepo, num, OnDeleteMigration, log: false) - - parent1 = PoolRepo.insert! Ecto.put_meta(%Parent{}, source: "parent1") - parent2 = PoolRepo.insert! Ecto.put_meta(%Parent{}, source: "parent2") - - writer = "INSERT INTO ref_migration (parent1, parent2) VALUES (#{parent1.id}, #{parent2.id})" - PoolRepo.query!(writer) - - reader = from r in "ref_migration", select: {r.parent1, r.parent2} - assert PoolRepo.all(reader) == [{parent1.id, parent2.id}] - - PoolRepo.delete!(parent1) - assert PoolRepo.all(reader) == [{nil, parent2.id}] - - PoolRepo.delete!(parent2) - assert PoolRepo.all(reader) == [] - - assert :ok == down(PoolRepo, num, OnDeleteMigration, log: false) - end - - test "composite foreign keys", %{migration_number: num} do - assert :ok == up(PoolRepo, num, CompositeForeignKeyMigration, log: false) - - PoolRepo.insert_all("composite_parent", [[key_id: 2]]) - assert [id] = PoolRepo.all(from p in "composite_parent", select: p.id) - - catch_error(PoolRepo.insert_all("composite_child", [[parent_id: id, parent_key_id: 1]])) - assert {1, nil} = PoolRepo.insert_all("composite_child", [[parent_id: id, parent_key_id: 2]]) - - assert :ok == down(PoolRepo, num, CompositeForeignKeyMigration, log: false) - end - - test "rolls back references in change/1", %{migration_number: num} do - assert :ok == up(PoolRepo, num, ReferencesRollbackMigration, log: false) - assert :ok == down(PoolRepo, num, ReferencesRollbackMigration, log: false) - end - - test "create table if not exists and drop table if exists does not raise on failure", %{migration_number: num} do - assert :ok == up(PoolRepo, num, NoErrorTableMigration, log: false) - end - - @tag :create_index_if_not_exists - test "create index if not exists and drop index if exists does not raise on failure", %{migration_number: num} do - assert :ok == up(PoolRepo, num, NoErrorIndexMigration, log: false) - end - - test "raises on NoSQL migrations", %{migration_number: num} do - assert_raise ArgumentError, ~r"does not support keyword lists in :options", fn -> - up(PoolRepo, num, NoSQLMigration, log: false) - end - end - - @tag :add_column - test "add column", %{migration_number: num} do - assert :ok == up(PoolRepo, num, AddColumnMigration, log: false) - assert [2] == PoolRepo.all from p in "add_col_migration", select: p.to_be_added - :ok = down(PoolRepo, num, AddColumnMigration, log: false) - end - - @tag :modify_column - test "modify column", %{migration_number: num} do - assert :ok == up(PoolRepo, num, AlterColumnMigration, log: false) - - assert ["foo"] == - PoolRepo.all from p in "alter_col_migration", select: p.from_null_to_not_null - assert [nil] == - PoolRepo.all from p in "alter_col_migration", select: p.from_not_null_to_null - assert [nil] == - PoolRepo.all from p in "alter_col_migration", select: p.from_default_to_no_default - assert [0] == - PoolRepo.all from p in "alter_col_migration", select: p.from_no_default_to_default - - query = "INSERT INTO alter_col_migration (from_not_null_to_null) VALUES ('foo')" - assert catch_error(PoolRepo.query!(query)) - - :ok = down(PoolRepo, num, AlterColumnMigration, log: false) - end - - @tag :modify_column - test "modify column with from", %{migration_number: num} do - assert :ok == up(PoolRepo, num, AlterColumnFromMigration, log: false) - - assert [1] == - PoolRepo.all from p in "modify_from_products", select: p.value - - :ok = down(PoolRepo, num, AlterColumnFromMigration, log: false) - end - - @tag :alter_primary_key - test "modify column with from and pkey", %{migration_number: num} do - assert :ok == up(PoolRepo, num, AlterColumnFromPkeyMigration, log: false) - - assert [1] == - PoolRepo.all from p in "modify_from_posts", select: p.author_id - - :ok = down(PoolRepo, num, AlterColumnFromPkeyMigration, log: false) - end - - @tag :alter_foreign_key - test "modify foreign key's on_delete constraint", %{migration_number: num} do - assert :ok == up(PoolRepo, num, AlterForeignKeyOnDeleteMigration, log: false) - - PoolRepo.insert_all("alter_fk_users", [[]]) - assert [id] = PoolRepo.all from p in "alter_fk_users", select: p.id - - PoolRepo.insert_all("alter_fk_posts", [[alter_fk_user_id: id]]) - PoolRepo.delete_all("alter_fk_users") - assert [nil] == PoolRepo.all from p in "alter_fk_posts", select: p.alter_fk_user_id - - :ok = down(PoolRepo, num, AlterForeignKeyOnDeleteMigration, log: false) - end - - @tag :assigns_id_type - test "modify foreign key's on_update constraint", %{migration_number: num} do - assert :ok == up(PoolRepo, num, AlterForeignKeyOnUpdateMigration, log: false) - - PoolRepo.insert_all("alter_fk_users", [[]]) - assert [id] = PoolRepo.all from p in "alter_fk_users", select: p.id - - PoolRepo.insert_all("alter_fk_posts", [[alter_fk_user_id: id]]) - PoolRepo.update_all("alter_fk_users", set: [id: 12345]) - assert [12345] == PoolRepo.all from p in "alter_fk_posts", select: p.alter_fk_user_id - - PoolRepo.delete_all("alter_fk_posts") - :ok = down(PoolRepo, num, AlterForeignKeyOnUpdateMigration, log: false) - end - - @tag :remove_column - test "remove column", %{migration_number: num} do - assert :ok == up(PoolRepo, num, DropColumnMigration, log: false) - assert catch_error(PoolRepo.all from p in "drop_col_migration", select: p.to_be_removed) - :ok = down(PoolRepo, num, DropColumnMigration, log: false) - end - - @tag :rename_column - test "rename column", %{migration_number: num} do - assert :ok == up(PoolRepo, num, RenameColumnMigration, log: false) - assert [1] == PoolRepo.all from p in "rename_col_migration", select: p.was_renamed - :ok = down(PoolRepo, num, RenameColumnMigration, log: false) - end - - @tag :rename_table - test "rename table", %{migration_number: num} do - assert :ok == up(PoolRepo, num, RenameMigration, log: false) - assert :ok == down(PoolRepo, num, RenameMigration, log: false) - end - - @tag :prefix - test "prefix", %{migration_number: num} do - assert :ok == up(PoolRepo, num, PrefixMigration, log: false) - assert :ok == down(PoolRepo, num, PrefixMigration, log: false) - end - - @tag :alter_primary_key - test "alter primary key", %{migration_number: num} do - assert :ok == up(PoolRepo, num, AlterPrimaryKeyMigration, log: false) - assert :ok == down(PoolRepo, num, AlterPrimaryKeyMigration, log: false) - end - - @tag :add_column_if_not_exists - @tag :remove_column_if_exists - test "add if not exists and remove if exists does not raise on failure", %{migration_number: num} do - assert :ok == up(PoolRepo, num, NoErrorOnConditionalColumnMigration, log: false) - assert :ok == down(PoolRepo, num, NoErrorOnConditionalColumnMigration, log: false) - end - - @tag :add_column_if_not_exists - test "add column if not exists", %{migration_number: num} do - assert :ok == up(PoolRepo, num, AddColumnIfNotExistsMigration, log: false) - assert [2] == PoolRepo.all from p in "add_col_if_not_exists_migration", select: p.to_be_added - :ok = down(PoolRepo, num, AddColumnIfNotExistsMigration, log: false) - end - - @tag :remove_column_if_exists - test "remove column when exists", %{migration_number: num} do - assert :ok == up(PoolRepo, num, DropColumnIfExistsMigration, log: false) - assert catch_error(PoolRepo.all from p in "drop_col_if_exists_migration", select: p.to_be_removed) - :ok = down(PoolRepo, num, DropColumnIfExistsMigration, log: false) - end -end diff --git a/integration_test/exqlite/ecto_sql/migrator.exs b/integration_test/exqlite/ecto_sql/migrator.exs deleted file mode 100644 index 8668d15a..00000000 --- a/integration_test/exqlite/ecto_sql/migrator.exs +++ /dev/null @@ -1,243 +0,0 @@ -ecto_sql = Mix.Project.deps_paths()[:ecto_sql] -Code.require_file "#{ecto_sql}/integration_test/support/file_helpers.exs", __DIR__ - -defmodule Ecto.Integration.MigratorTest do - use Ecto.Integration.Case - - import Support.FileHelpers - import ExUnit.CaptureLog - import Ecto.Migrator - - alias Ecto.Integration.{TestRepo, PoolRepo} - alias Ecto.Migration.SchemaMigration - - setup config do - Process.register(self(), config.test) - PoolRepo.delete_all(SchemaMigration) - :ok - end - - defmodule AnotherSchemaMigration do - use Ecto.Migration - - def change do - execute TestRepo.create_prefix("bad_schema_migrations"), - TestRepo.drop_prefix("bad_schema_migrations") - - create table(:schema_migrations, prefix: "bad_schema_migrations") do - add :version, :string - add :inserted_at, :integer - end - end - end - - defmodule BrokenLinkMigration do - use Ecto.Migration - - def change do - Task.start_link(fn -> raise "oops" end) - Process.sleep(:infinity) - end - end - - defmodule GoodMigration do - use Ecto.Migration - - def up do - create table(:good_migration) - end - - def down do - drop table(:good_migration) - end - end - - defmodule BadMigration do - use Ecto.Migration - - def change do - execute "CREATE WHAT" - end - end - - test "migrations up and down" do - assert migrated_versions(PoolRepo) == [] - assert up(PoolRepo, 31, GoodMigration, log: false) == :ok - - [migration] = PoolRepo.all(SchemaMigration) - assert migration.version == 31 - assert migration.inserted_at - - assert migrated_versions(PoolRepo) == [31] - assert up(PoolRepo, 31, GoodMigration, log: false) == :already_up - assert migrated_versions(PoolRepo) == [31] - assert down(PoolRepo, 32, GoodMigration, log: false) == :already_down - assert migrated_versions(PoolRepo) == [31] - assert down(PoolRepo, 31, GoodMigration, log: false) == :ok - assert migrated_versions(PoolRepo) == [] - end - - @tag :prefix - test "does not commit migration if insert into schema migration fails" do - # First we create a new schema migration table in another prefix - assert up(PoolRepo, 33, AnotherSchemaMigration, log: false) == :ok - assert migrated_versions(PoolRepo) == [33] - - catch_error(up(PoolRepo, 34, GoodMigration, log: false, prefix: "bad_schema_migrations")) - catch_error(PoolRepo.all("good_migration")) - catch_error(PoolRepo.all("good_migration", prefix: "bad_schema_migrations")) - - assert down(PoolRepo, 33, AnotherSchemaMigration, log: false) == :ok - end - - test "bad execute migration" do - assert catch_error(up(PoolRepo, 31, BadMigration, log: false)) - end - - test "broken link migration" do - Process.flag(:trap_exit, true) - - assert capture_log(fn -> - {:ok, pid} = Task.start_link(fn -> up(PoolRepo, 31, BrokenLinkMigration, log: false) end) - assert_receive {:EXIT, ^pid, _} - end) =~ "oops" - - assert capture_log(fn -> - catch_exit(up(PoolRepo, 31, BrokenLinkMigration, log: false)) - end) =~ "oops" - end - - test "run up to/step migration", config do - in_tmp fn path -> - create_migration(47, config) - create_migration(48, config) - - assert [47] = run(PoolRepo, path, :up, step: 1, log: false) - assert count_entries() == 1 - - assert [48] = run(PoolRepo, path, :up, to: 48, log: false) - end - end - - test "run down to/step migration", config do - in_tmp fn path -> - migrations = [ - create_migration(49, config), - create_migration(50, config), - ] - - assert [49, 50] = run(PoolRepo, path, :up, all: true, log: false) - purge migrations - - assert [50] = run(PoolRepo, path, :down, step: 1, log: false) - purge migrations - - assert count_entries() == 1 - assert [50] = run(PoolRepo, path, :up, to: 50, log: false) - end - end - - test "runs all migrations", config do - in_tmp fn path -> - migrations = [ - create_migration(53, config), - create_migration(54, config), - ] - - assert [53, 54] = run(PoolRepo, path, :up, all: true, log: false) - assert [] = run(PoolRepo, path, :up, all: true, log: false) - purge migrations - - assert [54, 53] = run(PoolRepo, path, :down, all: true, log: false) - purge migrations - - assert count_entries() == 0 - assert [53, 54] = run(PoolRepo, path, :up, all: true, log: false) - end - end - - test "does not commit half transactions on bad syntax", config do - in_tmp fn path -> - migrations = [ - create_migration(64, config), - create_migration("65_+", config) - ] - - assert_raise SyntaxError, fn -> - run(PoolRepo, path, :up, all: true, log: false) - end - - refute_received {:up, _} - assert count_entries() == 0 - purge migrations - end - end - - @tag :lock_for_migrations - test "raises when connection pool is too small" do - config = Application.fetch_env!(:ecto_sql, PoolRepo) - config = Keyword.merge(config, pool_size: 1) - Application.put_env(:ecto_sql, __MODULE__.SingleConnectionRepo, config) - - defmodule SingleConnectionRepo do - use Ecto.Repo, otp_app: :ecto_sql, adapter: PoolRepo.__adapter__() - end - - {:ok, _pid} = SingleConnectionRepo.start_link() - - in_tmp fn path -> - exception_message = ~r/Migrations failed to run because the connection pool size is less than 2/ - - assert_raise Ecto.MigrationError, exception_message, fn -> - run(SingleConnectionRepo, path, :up, all: true, log: false) - end - end - end - - test "does not raise when connection pool is too small but there is no lock" do - config = Application.fetch_env!(:ecto_sql, PoolRepo) - config = Keyword.merge(config, pool_size: 1, migration_lock: nil) - Application.put_env(:ecto_sql, __MODULE__.SingleConnectionNoLockRepo, config) - - defmodule SingleConnectionNoLockRepo do - use Ecto.Repo, otp_app: :ecto_sql, adapter: PoolRepo.__adapter__() - end - - {:ok, _pid} = SingleConnectionNoLockRepo.start_link() - - in_tmp fn path -> - run(SingleConnectionNoLockRepo, path, :up, all: true, log: false) - end - end - - defp count_entries() do - PoolRepo.aggregate(SchemaMigration, :count, :version) - end - - defp create_migration(num, config) do - module = Module.concat(__MODULE__, "Migration#{num}") - - File.write! "#{num}_migration_#{num}.exs", """ - defmodule #{module} do - use Ecto.Migration - - def up do - send #{inspect config.test}, {:up, #{inspect num}} - end - - def down do - send #{inspect config.test}, {:down, #{inspect num}} - end - end - """ - - module - end - - defp purge(modules) do - Enum.each(List.wrap(modules), fn m -> - :code.delete m - :code.purge m - end) - end -end diff --git a/integration_test/exqlite/test_helper.exs b/integration_test/exqlite/test_helper.exs deleted file mode 100644 index e9330ee2..00000000 --- a/integration_test/exqlite/test_helper.exs +++ /dev/null @@ -1,120 +0,0 @@ -Logger.configure(level: :info) - -Application.put_env(:ecto, :primary_key_type, :id) -Application.put_env(:ecto, :async_integration_tests, false) - -ecto = Mix.Project.deps_paths()[:ecto] -ecto_sql = Mix.Project.deps_paths()[:ecto_sql] - -Code.require_file("#{ecto_sql}/integration_test/support/repo.exs", __DIR__) - -alias Ecto.Integration.TestRepo - -Application.put_env(:exqlite, TestRepo, - adapter: Ecto.Adapters.Exqlite, - database: "/tmp/exqlite_integration_test.db", - journal_mode: :wal, - cache_size: -64000, - temp_store: :memory, - pool: Ecto.Adapters.SQL.Sandbox, - pool_size: 5, - show_sensitive_data_on_connection_error: true -) - -# Pool repo for non-async tests -alias Ecto.Integration.PoolRepo - -Application.put_env(:exqlite, PoolRepo, - adapter: Ecto.Adapters.Exqlite, - database: "/tmp/exqlite_integration_pool_test.db", - journal_mode: :wal, - cache_size: -64000, - temp_store: :memory, - pool_size: 5, - show_sensitive_data_on_connection_error: true -) - -# needed since some of the integration tests rely on fetching env from :ecto_sql -Application.put_env(:ecto_sql, TestRepo, Application.get_env(:exqlite, TestRepo)) -Application.put_env(:ecto_sql, PoolRepo, Application.get_env(:exqlite, PoolRepo)) - -defmodule Ecto.Integration.PoolRepo do - use Ecto.Integration.Repo, otp_app: :exqlite, adapter: Ecto.Adapters.Exqlite -end - -Code.require_file "#{ecto}/integration_test/support/schemas.exs", __DIR__ -Code.require_file "#{ecto_sql}/integration_test/support/migration.exs", __DIR__ - -defmodule Ecto.Integration.Case do - use ExUnit.CaseTemplate - - setup do - :ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo) - end -end - -{:ok, _} = Ecto.Adapters.Exqlite.ensure_all_started(TestRepo.config(), :temporary) - -# Load up the repository, start it, and run migrations -_ = Ecto.Adapters.Exqlite.storage_down(TestRepo.config()) -:ok = Ecto.Adapters.Exqlite.storage_up(TestRepo.config()) - -_ = Ecto.Adapters.Exqlite.storage_down(PoolRepo.config()) -:ok = Ecto.Adapters.Exqlite.storage_up(PoolRepo.config()) - -{:ok, _} = TestRepo.start_link() -{:ok, _pid} = PoolRepo.start_link() - -# migrate the pool repo -case Ecto.Migrator.migrated_versions(PoolRepo) do - [] -> - :ok = Ecto.Migrator.up(PoolRepo, 0, Ecto.Integration.Migration, log: false) - - _ -> - :ok = Ecto.Migrator.down(PoolRepo, 0, Ecto.Integration.Migration, log: false) - :ok = Ecto.Migrator.up(PoolRepo, 0, Ecto.Integration.Migration, log: false) -end - -:ok = Ecto.Migrator.up(TestRepo, 0, Ecto.Integration.Migration, log: false) -Ecto.Adapters.SQL.Sandbox.mode(TestRepo, :manual) -Process.flag(:trap_exit, true) - -ExUnit.start( - exclude: [ - :delete_with_join, - :right_join, - # SQLite does not have an array type - :array_type, - :transaction_isolation, - :insert_cell_wise_defaults, - :insert_select, - # sqlite does not support microsecond precision, only millisecond - :microsecond_precision, - # sqlite supports FKs, but does not return sufficient data - # for ecto to support matching on a given constraint violation name - # which is what most of the tests validate - :foreign_key_constraint, - # SQLite with DSQLITE_LIKE_DOESNT_MATCH_BLOBS=1 - # does not support using LIKE on BLOB types - :like_match_blob, - # SQLite will return a string for schemaless map types as - # Ecto does not have enough information to call the associated loader - # that converts the string JSON representaiton into a map - :map_type_schemaless, - - # right now in lock_for_migrations() we do effectively nothing, this is because - # SQLite is single-writer so there isn't really a need for us to do anything. - # ecto assumes all implementing adapters need >=2 connections for migrations - # which is not true for SQLite - :lock_for_migrations, - - # Migration we don't support - :prefix, - :add_column_if_not_exists, - :remove_column_if_exists, - :alter_primary_key, - :alter_foreign_key, - :assigns_id_type, - :modify_column - ] -) diff --git a/lib/ecto/adapters/exqlite.ex b/lib/ecto/adapters/exqlite.ex deleted file mode 100644 index 556fb3db..00000000 --- a/lib/ecto/adapters/exqlite.ex +++ /dev/null @@ -1,256 +0,0 @@ -defmodule Ecto.Adapters.Exqlite do - use Ecto.Adapters.SQL, - driver: :exqlite - - @behaviour Ecto.Adapter.Storage - @behaviour Ecto.Adapter.Structure - - alias Ecto.Adapters.Exqlite.Codec - - @impl Ecto.Adapter.Storage - def storage_down(options) do - db_path = Keyword.fetch!(options, :database) - - with :ok <- File.rm(db_path) do - File.rm(db_path <> "-shm") - File.rm(db_path <> "-wal") - :ok - else - _ -> {:error, :already_down} - end - end - - @impl Ecto.Adapter.Storage - def storage_status(options) do - db_path = Keyword.fetch!(options, :database) - - if File.exists?(db_path) do - :up - else - :down - end - end - - @impl Ecto.Adapter.Storage - def storage_up(options) do - options - |> Keyword.get(:database) - |> storage_up_with_path() - end - - @impl Ecto.Adapter.Migration - def supports_ddl_transaction?(), do: false - - @impl Ecto.Adapter.Migration - def lock_for_migrations(_meta, query, _options, fun) do - fun.(query) - end - - @impl Ecto.Adapter.Structure - def structure_dump(default, config) do - path = config[:dump_path] || Path.join(default, "structure.sql") - - with {:ok, contents} <- dump_schema(config), - {:ok, versions} <- dump_versions(config) do - File.mkdir_p!(Path.dirname(path)) - File.write!(path, contents <> versions) - {:ok, path} - else - err -> err - end - end - - @impl Ecto.Adapter.Structure - def structure_load(default, config) do - path = config[:dump_path] || Path.join(default, "structure.sql") - - case run_with_cmd("sqlite3", [config[:database], ".read #{path}"]) do - {_output, 0} -> {:ok, path} - {output, _} -> {:error, output} - end - end - - ## - ## Loaders - ## - - @impl Ecto.Adapter - def loaders(:boolean, type) do - [&Codec.bool_decode/1, type] - end - - @impl Ecto.Adapter - def loaders(:naive_datetime_usec, type) do - [&Codec.naive_datetime_decode/1, type] - end - - @impl Ecto.Adapter - def loaders(:utc_datetime_usec, type) do - [&Codec.datetime_decode/1, type] - end - - @impl Ecto.Adapter - def loaders(:utc_datetime, type) do - [&Codec.datetime_decode/1, type] - end - - @impl Ecto.Adapter - def loaders(:naive_datetime, type) do - [&Codec.naive_datetime_decode/1, type] - end - - @impl Ecto.Adapter - def loaders(:datetime, type) do - [&Codec.datetime_decode/1, type] - end - - @impl Ecto.Adapter - def loaders(:date, type) do - [&Codec.date_decode/1, type] - end - - @impl Ecto.Adapter - def loaders({:map, _}, type) do - [&Codec.json_decode/1, &Ecto.Type.embedded_load(type, &1, :json)] - end - - @impl Ecto.Adapter - def loaders({:array, _}, type) do - [&Codec.json_decode/1, type] - end - - @impl Ecto.Adapter - def loaders(:map, type) do - [&Codec.json_decode/1, type] - end - - @impl Ecto.Adapter - def loaders(:float, type) do - [&Codec.float_decode/1, type] - end - - @impl Ecto.Adapter - def loaders(:decimal, type) do - [&Codec.decimal_decode/1, type] - end - - # when we have an e.g., max(created_date) function - # Ecto does not truly know the return type, hence :maybe - # see Ecto.Query.Planner.collect_fields - @impl Ecto.Adapter - def loaders({:maybe, :naive_datetime}, type) do - [&Codec.naive_datetime_decode/1, type] - end - - @impl Ecto.Adapter - def loaders(_, type) do - [type] - end - - ## - ## Dumpers - ## - - @impl Ecto.Adapter - def dumpers(:binary, type) do - [type, &Codec.blob_encode/1] - end - - @impl Ecto.Adapter - def dumpers(:boolean, type) do - [type, &Codec.bool_encode/1] - end - - @impl Ecto.Adapter - def dumpers(:decimal, type) do - [type, &Codec.decimal_encode/1] - end - - @impl Ecto.Adapter - def dumpers(:time, type) do - [type, &Codec.time_encode/1] - end - - @impl Ecto.Adapter - def dumpers(:naive_datetime, type) do - [type, &Codec.naive_datetime_encode/1] - end - - @impl Ecto.Adapter - def dumpers({:array, _}, type) do - [type, &Codec.json_encode/1] - end - - @impl Ecto.Adapter - def dumpers({:map, _}, type) do - [&Ecto.Type.embedded_dump(type, &1, :json), &Codec.json_encode/1] - end - - @impl Ecto.Adapter - def dumpers(:map, type) do - [type, &Codec.json_encode/1] - end - - @impl Ecto.Adapter - def dumpers(_primitive, type) do - [type] - end - - ## - ## HELPERS - ## - - defp storage_up_with_path(nil) do - raise ArgumentError, - """ - No SQLite database path specified. Please check the configuration for your Repo. - Your config/*.exs file should have something like this in it: - - config :my_app, MyApp.Repo, - adapter: Ecto.Adapters.Exqlite, - database: "/path/to/sqlite/database" - """ - end - - defp storage_up_with_path(db_path) do - if File.exists?(db_path) do - {:error, :already_up} - else - db_path |> Path.dirname() |> File.mkdir_p!() - {:ok, db} = Exqlite.Sqlite3.open(db_path) - :ok = Exqlite.Sqlite3.close(db) - end - end - - defp dump_versions(config) do - table = config[:migration_source] || "schema_migrations" - - # `.dump` command also returns CREATE TABLE which will clash with CREATE we already run in dump_schema - # So we set mode to insert which makes every SELECT statement to issue the result - # as the INSERT statements instead of pure text data. - case run_with_cmd("sqlite3", [ - config[:database], - ".mode insert #{table}", - "SELECT * FROM #{table}" - ]) do - {output, 0} -> {:ok, output} - {output, _} -> {:error, output} - end - end - - defp dump_schema(config) do - case run_with_cmd("sqlite3", [config[:database], ".schema"]) do - {output, 0} -> {:ok, output} - {output, _} -> {:error, output} - end - end - - defp run_with_cmd(cmd, args) do - unless System.find_executable(cmd) do - raise "could not find executable `#{cmd}` in path, " <> - "please guarantee it is available before running ecto commands" - end - - System.cmd(cmd, args, stderr_to_stdout: true) - end -end diff --git a/lib/ecto/adapters/exqlite/codec.ex b/lib/ecto/adapters/exqlite/codec.ex deleted file mode 100644 index 500d76a6..00000000 --- a/lib/ecto/adapters/exqlite/codec.ex +++ /dev/null @@ -1,98 +0,0 @@ -defmodule Ecto.Adapters.Exqlite.Codec do - def bool_decode(nil), do: {:ok, nil} - def bool_decode(0), do: {:ok, false} - def bool_decode("0"), do: {:ok, false} - def bool_decode("FALSE"), do: {:ok, false} - def bool_decode(1), do: {:ok, true} - def bool_decode("1"), do: {:ok, true} - def bool_decode("TRUE"), do: {:ok, true} - def bool_decode(_), do: :error - - def json_decode(nil), do: {:ok, nil} - - def json_decode(x) when is_binary(x) do - Application.get_env(:exqlite, :json_library, Jason).decode(x) - end - - def json_decode(_), do: :error - - def float_decode(nil), do: {:ok, nil} - def float_decode(%Decimal{} = decimal), do: {:ok, Decimal.to_float(decimal)} - def float_decode(x) when is_integer(x), do: {:ok, x / 1} - def float_decode(_), do: :error - - def decimal_decode(nil), do: {:ok, nil} - - def decimal_decode(x) when is_float(x) do - try do - {:ok, Decimal.from_float(x)} - catch - Decimal.Error -> :error - end - end - - def decimal_decode(x) when is_binary(x) or is_integer(x) do - try do - {:ok, Decimal.new(x)} - catch - Decimal.Error -> :error - end - end - - def decimal_decode(_), do: :error - - def datetime_decode(nil), do: {:ok, nil} - - def datetime_decode(val) do - # TODO: Should we be preserving the timezone? SQLite3 stores everything - # shifted to UTC. sqlite_ecto2 used a custom field type "TEXT_DATETIME" - # to preserve the original string inserted. But I don't know if that - # is desirable or not. - # - # @warmwaffles 2021-02-28 - case DateTime.from_iso8601(val) do - {:ok, dt, _offset} -> {:ok, dt} - _ -> :error - end - end - - def naive_datetime_decode(nil), do: {:ok, nil} - - def naive_datetime_decode(val) do - case NaiveDateTime.from_iso8601(val) do - {:ok, dt} -> {:ok, dt} - _ -> :error - end - end - - def date_decode(nil), do: {:ok, nil} - - def date_decode(val) do - case Date.from_iso8601(val) do - {:ok, d} -> {:ok, d} - _ -> :error - end - end - - def json_encode(value) do - Application.get_env(:exqlite, :json_library, Jason).encode(value) - end - - def blob_encode(value), do: {:ok, {:blob, value}} - - def bool_encode(false), do: {:ok, 0} - def bool_encode(true), do: {:ok, 1} - - def decimal_encode(%Decimal{} = x) do - {:ok, Decimal.to_string(x, :normal)} - end - # def decimal_encode(x), do: {:ok, x} - - def time_encode(value) do - {:ok, value} - end - - def naive_datetime_encode(value) do - {:ok, NaiveDateTime.to_iso8601(value)} - end -end diff --git a/lib/ecto/adapters/exqlite/connection.ex b/lib/ecto/adapters/exqlite/connection.ex deleted file mode 100644 index ed23a675..00000000 --- a/lib/ecto/adapters/exqlite/connection.ex +++ /dev/null @@ -1,1704 +0,0 @@ -defmodule Ecto.Adapters.Exqlite.Connection do - @behaviour Ecto.Adapters.SQL.Connection - - alias Ecto.Migration.Constraint - alias Ecto.Migration.Index - alias Ecto.Migration.Reference - alias Ecto.Migration.Table - alias Ecto.Query.BooleanExpr - alias Ecto.Query.JoinExpr - alias Ecto.Query.QueryExpr - alias Ecto.Query.WithExpr - - import Ecto.Adapters.Exqlite.DataType - - @parent_as __MODULE__ - @connect_buffer 50 - - def sleep(opts) do - :timer.sleep(:rand.uniform(@connect_buffer)) - opts - end - - defp default_opts(opts) do - # todo: we may want to consider wrapping any provided :configure - # with our custom connection buffering logic - opts - |> Keyword.put_new(:configure, {__MODULE__, :sleep, []}) - end - - def start_link(opts) do - opts = default_opts(opts) - DBConnection.start_link(Exqlite.Connection, opts) - end - - @impl true - def child_spec(options) do - {:ok, _} = Application.ensure_all_started(:db_connection) - options = default_opts(options) - DBConnection.child_spec(Exqlite.Connection, options) - end - - @impl true - def prepare_execute(conn, name, sql, params, options) do - query = Exqlite.Query.build(name: name, statement: sql) - - case DBConnection.prepare_execute(conn, query, params, options) do - {:ok, _, _} = ok -> ok - {:error, %Exqlite.Error{}} = error -> error - {:error, err} -> raise err - end - end - - @impl true - def execute(conn, %Exqlite.Query{ref: ref} = cached, params, options) - when ref != nil do - DBConnection.execute(conn, cached, params, options) - end - - @impl true - def execute( - conn, - %Exqlite.Query{statement: statement, ref: nil}, - params, - options - ) do - execute(conn, statement, params, options) - end - - @impl true - def execute(conn, sql, params, options) when is_binary(sql) or is_list(sql) do - query = Exqlite.Query.build(name: "", statement: IO.iodata_to_binary(sql)) - - case DBConnection.prepare_execute(conn, query, params, options) do - {:ok, %Exqlite.Query{}, result} -> {:ok, result} - {:error, %Exqlite.Error{}} = error -> error - {:error, err} -> raise err - end - end - - @impl true - def execute(conn, query, params, options) do - case DBConnection.execute(conn, query, params, options) do - {:ok, _} = ok -> ok - {:error, %ArgumentError{} = err} -> {:reset, err} - {:error, %Exqlite.Error{}} = error -> error - {:error, err} -> raise err - end - end - - @impl true - def query(conn, sql, params, options) do - query = Exqlite.Query.build(statement: IO.iodata_to_binary(sql)) - - case DBConnection.execute(conn, query, params, options) do - {:ok, _, result} -> {:ok, result} - other -> other - end - end - - @impl true - def stream(conn, sql, params, options) do - query = Exqlite.Query.build(statement: sql) - DBConnection.stream(conn, query, params, options) - end - - # we want to return the name of the underlying index that caused - # the constraint error, but in SQLite as far as I can tell there - # is no way to do this, so we name the index according to ecto - # convention, even if technically it _could_ have a different name - defp constraint_name_hack(constraint) do - if String.contains?(constraint, ", ") do - # "a.b, a.c" -> a_b_c_index - constraint - |> String.split(", ") - |> Enum.with_index() - |> Enum.map(fn {table_col, idx} -> - case idx do - 0 -> table_col |> String.replace(".", "_") - _ -> table_col |> String.split(".") |> List.last() - end - end) - |> Enum.concat(["index"]) - |> Enum.join("_") - else - constraint - |> String.split(".") - |> Enum.concat(["index"]) - |> Enum.join("_") - end - end - - @impl true - def to_constraints(%Exqlite.Error{message: "UNIQUE constraint failed: " <> constraint}, _opts) do - [unique: constraint_name_hack(constraint)] - end - - def to_constraints(%Exqlite.Error{message: "FOREIGN KEY constraint failed"}, _opts) do - # unfortunately we have no other date from SQLite - [foreign_key: nil] - end - - def to_constraints(_, _), do: [] - - ## - ## Queries - ## - - @impl true - def all(%Ecto.Query{lock: lock}) when lock != nil do - raise ArgumentError, "locks are not supported by SQLite3" - end - - @impl true - def all(query, as_prefix \\ []) do - sources = create_names(query, as_prefix) - - cte = cte(query, sources) - from = from(query, sources) - select = select(query, sources) - join = join(query, sources) - where = where(query, sources) - group_by = group_by(query, sources) - having = having(query, sources) - window = window(query, sources) - combinations = combinations(query) - order_by = order_by(query, sources) - limit = limit(query, sources) - offset = offset(query, sources) - - [ - cte, - select, - from, - join, - where, - group_by, - having, - window, - combinations, - order_by, - limit, - offset - ] - end - - @impl true - def update_all(query, prefix \\ nil) do - %{from: %{source: source}} = query - - sources = create_names(query, []) - cte = cte(query, sources) - {from, name} = get_source(query, sources, 0, source) - - fields = - if prefix do - update_fields(:on_conflict, query, sources) - else - update_fields(:update, query, sources) - end - - # TODO: Add support for `update or rollback foo` - - {join, wheres} = using_join(query, :update_all, "FROM", sources) - prefix = prefix || ["UPDATE ", from, " AS ", name, " SET "] - where = where(%{query | wheres: wheres ++ query.wheres}, sources) - - [ - cte, - prefix, - fields, - join, - where, - returning(query, sources) - ] - end - - @impl true - def delete_all(%Ecto.Query{joins: [_ | _]}) do - # TODO: It is supported but not in the traditional sense - raise ArgumentError, "JOINS are not supported on DELETE statements by SQLite" - end - - @impl true - def delete_all(query) do - sources = create_names(query, []) - cte = cte(query, sources) - - from = from(query, sources) - where = where(query, sources) - - [ - cte, - "DELETE", - from, - where, - returning(query, sources) - ] - end - - @impl true - def insert(prefix, table, header, rows, on_conflict, returning) do - insert(prefix, table, header, rows, on_conflict, returning, []) - end - - def insert(prefix, table, [], [[]], on_conflict, returning, []) do - [ - "INSERT INTO ", - quote_table(prefix, table), - insert_as(on_conflict), - " DEFAULT VALUES", - returning(returning) - ] - end - def insert(prefix, table, header, rows, on_conflict, returning, _placeholders) do - fields = quote_names(header) - - [ - "INSERT INTO ", - quote_table(prefix, table), - insert_as(on_conflict), - " (", - fields, - ") ", - insert_all(rows), - on_conflict(on_conflict, header), - returning(returning) - ] - end - - @impl true - def update(prefix, table, fields, filters, returning) do - fields = intersperse_map(fields, ", ", &[quote_name(&1), " = ?"]) - - filters = - intersperse_map(filters, " AND ", fn - {field, nil} -> - [quote_name(field), " IS NULL"] - - {field, _value} -> - [quote_name(field), " = ?"] - end) - - [ - "UPDATE ", - quote_table(prefix, table), - " SET ", - fields, - " WHERE ", - filters, - returning(returning) - ] - end - - @impl true - def delete(prefix, table, filters, returning) do - filters = - intersperse_map(filters, " AND ", fn - {field, nil} -> - [quote_name(field), " IS NULL"] - - {field, _value} -> - [quote_name(field), " = ?"] - end) - - [ - "DELETE FROM ", - quote_table(prefix, table), - " WHERE ", - filters, - returning(returning) - ] - end - - @impl true - def explain_query(conn, query, params, opts) do - case query(conn, build_explain_query(query), params, opts) do - {:ok, %Exqlite.Result{} = result} -> - {:ok, Ecto.Adapters.SQL.format_table(result)} - - error -> - error - end - end - - ## - ## DDL - ## - - @impl true - def execute_ddl({_command, %Table{options: keyword}, _}) when keyword != nil do - raise ArgumentError, "SQLite3 adapter does not support keyword lists in :options" - end - - @impl true - def execute_ddl({:create, %Table{} = table, columns}) do - {table, composite_pk_def} = composite_pk_definition(table, columns) - composite_fk_defs = composite_fk_definitions(table, columns) - - [ - [ - "CREATE TABLE ", - quote_table(table.prefix, table.name), - ?\s, - ?(, - column_definitions(table, columns), - composite_pk_def, - composite_fk_defs, - ?), - options_expr(table.options) - ] - ] - end - - @impl true - def execute_ddl({:create_if_not_exists, %Table{} = table, columns}) do - {table, composite_pk_def} = composite_pk_definition(table, columns) - composite_fk_defs = composite_fk_definitions(table, columns) - - [ - [ - "CREATE TABLE IF NOT EXISTS ", - quote_table(table.prefix, table.name), - ?\s, - ?(, - column_definitions(table, columns), - composite_pk_def, - composite_fk_defs, - ?), - options_expr(table.options) - ] - ] - end - - @impl true - def execute_ddl({:drop, %Table{} = table}) do - [ - [ - "DROP TABLE ", - quote_table(table.prefix, table.name) - ] - ] - end - - @impl true - def execute_ddl({:drop_if_exists, %Table{} = table}) do - [ - [ - "DROP TABLE IF EXISTS ", - quote_table(table.prefix, table.name) - ] - ] - end - - @impl true - def execute_ddl({:alter, %Table{} = table, changes}) do - Enum.map(changes, fn change -> - [ - "ALTER TABLE ", - quote_table(table.prefix, table.name), - ?\s, - column_change(table, change) - ] - end) - end - - @impl true - def execute_ddl({:create, %Index{} = index}) do - fields = intersperse_map(index.columns, ", ", &index_expr/1) - - [ - [ - "CREATE ", - if_do(index.unique, "UNIQUE "), - "INDEX ", - quote_name(index.name), - " ON ", - quote_table(index.prefix, index.table), - " (", - fields, - ?), - if_do(index.where, [" WHERE ", to_string(index.where)]) - ] - ] - end - - @impl true - def execute_ddl({:create_if_not_exists, %Index{} = index}) do - fields = intersperse_map(index.columns, ", ", &index_expr/1) - - [ - [ - "CREATE ", - if_do(index.unique, "UNIQUE "), - "INDEX IF NOT EXISTS ", - quote_name(index.name), - " ON ", - quote_table(index.prefix, index.table), - " (", - fields, - ?), - if_do(index.where, [" WHERE ", to_string(index.where)]) - ] - ] - end - - @impl true - def execute_ddl({:drop, %Index{} = index}) do - [ - [ - "DROP INDEX ", - quote_table(index.prefix, index.name) - ] - ] - end - - @impl true - def execute_ddl({:drop_if_exists, %Index{} = index}) do - [ - [ - "DROP INDEX IF EXISTS ", - quote_table(index.prefix, index.name) - ] - ] - end - - @impl true - def execute_ddl({:rename, %Table{} = current_table, %Table{} = new_table}) do - [ - [ - "ALTER TABLE ", - quote_table(current_table.prefix, current_table.name), - " RENAME TO ", - quote_table(nil, new_table.name) - ] - ] - end - - @impl true - def execute_ddl({:rename, %Table{} = current_table, old_col, new_col}) do - [ - [ - "ALTER TABLE ", - quote_table(current_table.prefix, current_table.name), - " RENAME COLUMN ", - quote_name(old_col), - " TO ", - quote_name(new_col) - ] - ] - end - - @impl true - def execute_ddl({:create, %Constraint{}}) do - raise ArgumentError, "ALTER TABLE with constraints not supported by SQLite3" - end - - @impl true - def execute_ddl({:drop, %Constraint{}}) do - raise ArgumentError, "ALTER TABLE with constraints not supported by SQLite3" - end - - @impl true - def execute_ddl(string) when is_binary(string), do: [string] - - @impl true - def execute_ddl(keyword) when is_list(keyword) do - raise ArgumentError, "SQLite3 adapter does not support keyword lists in execute" - end - - @impl true - def execute_ddl({:create, %Index{} = index}) do - fields = intersperse_map(index.columns, ", ", &index_expr/1) - - [ - [ - "CREATE ", - if_do(index.unique, "UNIQUE "), - "INDEX", - ?\s, - quote_name(index.name), - " ON ", - quote_table(index.prefix, index.table), - ?\s, - ?(, - fields, - ?), - if_do(index.where, [" WHERE ", to_string(index.where)]) - ] - ] - end - - @impl true - def execute_ddl({:create_if_not_exists, %Index{} = index}) do - fields = intersperse_map(index.columns, ", ", &index_expr/1) - - [ - [ - "CREATE ", - if_do(index.unique, "UNIQUE "), - "INDEX IF NOT EXISTS", - ?\s, - quote_name(index.name), - " ON ", - quote_table(index.prefix, index.table), - ?\s, - ?(, - fields, - ?), - if_do(index.where, [" WHERE ", to_string(index.where)]) - ] - ] - end - - @impl true - def execute_ddl({:create, %Constraint{check: check}}) when is_binary(check) do - raise ArgumentError, "SQLite3 adapter does not support check constraints" - end - - @impl true - def execute_ddl({:create, %Constraint{exclude: exclude}}) when is_binary(exclude) do - raise ArgumentError, "SQLite3 adapter does not support exclusion constraints" - end - - @impl true - def execute_ddl({:drop, %Index{} = index}) do - [ - [ - "DROP INDEX ", - quote_table(index.prefix, index.name) - ] - ] - end - - @impl true - def execute_ddl({:drop_if_exists, %Index{} = index}) do - [ - [ - "DROP INDEX IF EXISTS ", - quote_table(index.prefix, index.name) - ] - ] - end - - @impl true - def execute_ddl({:drop, %Constraint{}}) do - raise ArgumentError, "SQLite3 adapter does not support constraints" - end - - @impl true - def execute_ddl({:drop_if_exists, %Constraint{}}) do - raise ArgumentError, "SQLite3 adapter does not support constraints" - end - - @impl true - def execute_ddl({:rename, %Table{} = current_table, %Table{} = new_table}) do - [ - [ - "ALTER TABLE ", - quote_table(current_table.prefix, current_table.name), - " RENAME TO ", - quote_table(new_table.prefix, new_table.name) - ] - ] - end - - @impl true - def execute_ddl({:rename, %Table{} = table, current_column, new_column}) do - [ - [ - "ALTER TABLE ", - quote_table(table.prefix, table.name), - " RENAME COLUMN ", - quote_name(current_column), - " TO ", - quote_name(new_column) - ] - ] - end - - @impl true - def execute_ddl(string) when is_binary(string), do: [string] - - @impl true - def execute_ddl(keyword) when is_list(keyword) do - raise ArgumentError, "SQLite3 adapter does not support keyword lists in execute" - end - - @impl true - def ddl_logs(_), do: [] - - @impl true - def table_exists_query(table) do - {"SELECT name FROM sqlite_master WHERE type='table' AND name=? LIMIT 1", [table]} - end - - def build_explain_query(query) do - IO.iodata_to_binary(["EXPLAIN ", query]) - end - - ## - ## Query generation - ## - - defp on_conflict({:raise, _, []}, _header), do: [] - - defp on_conflict({:nothing, _, targets}, _header) do - [" ON CONFLICT ", conflict_target(targets) | "DO NOTHING"] - end - - defp on_conflict({:replace_all, _, {:constraint, _}}, _header) do - raise ArgumentError, "Upsert in SQLite3 does not support ON CONSTRAINT" - end - - defp on_conflict({:replace_all, _, []}, _header) do - raise ArgumentError, "Upsert in SQLite3 requires :conflict_target" - end - - defp on_conflict({:replace_all, _, targets}, header) do - [" ON CONFLICT ", conflict_target(targets), "DO " | replace(header)] - end - - defp on_conflict({fields, _, targets}, _header) when is_list(fields) do - [" ON CONFLICT ", conflict_target(targets), "DO " | replace(fields)] - end - - defp on_conflict({query, _, targets}, _header) do - [" ON CONFLICT ", conflict_target(targets), "DO " | update_all(query, "UPDATE SET ")] - end - - defp conflict_target([]), do: "" - - defp conflict_target(targets) do - [?(, intersperse_map(targets, ?,, "e_name/1), ?), ?\s] - end - - defp replace(fields) do - [ - "UPDATE SET " | - intersperse_map(fields, ?,, fn field -> - quoted = quote_name(field) - [quoted, " = ", "EXCLUDED." | quoted] - end) - ] - end - - def insert_all(rows), do: insert_all(rows, 1) - - def insert_all(%Ecto.Query{} = query, _counter) do - [?(, all(query), ?)] - end - - def insert_all(rows, counter) do - [ - "VALUES ", - intersperse_reduce( - rows, - ?,, - counter, - fn row, counter -> - {row, counter} = insert_each(row, counter) - {[?(, row, ?)], counter} - end - ) - |> elem(0) - ] - end - - def insert_each(values, counter) do - intersperse_reduce(values, ?,, counter, fn - nil, _counter -> - raise ArgumentError, - "Cell-wise default values are not supported on INSERT statements by SQLite3" - - _, counter -> - # TODO: Should we have cell wise value support? - # Essentially ``?1 ?2 ?3`` instead of ``? ? ?`` - # {['?' | Integer.to_string(counter)], counter + 1} - {['?'], counter + 1} - end) - end - - defp insert_as({%{sources: sources}, _, _}) do - {_expr, name, _schema} = create_name(sources, 0, []) - [" AS " | name] - end - defp insert_as({_, _, _}) do - [] - end - - binary_ops = [ - ==: " = ", - !=: " != ", - <=: " <= ", - >=: " >= ", - <: " < ", - >: " > ", - +: " + ", - -: " - ", - *: " * ", - /: " / ", - and: " AND ", - or: " OR ", - like: " LIKE " - ] - - @binary_ops Keyword.keys(binary_ops) - - Enum.map(binary_ops, fn {op, str} -> - def handle_call(unquote(op), 2), do: {:binary_op, unquote(str)} - end) - - def handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} - - def distinct(nil, _sources, _query), do: [] - def distinct(%QueryExpr{expr: true}, _sources, _query), do: "DISTINCT " - def distinct(%QueryExpr{expr: false}, _sources, _query), do: [] - - def distinct(%QueryExpr{expr: expression}, _sources, query) - when is_list(expression) do - raise Ecto.QueryError, - query: query, - message: "DISTINCT with multiple columns is not supported by SQLite3" - end - - def select(%{select: %{fields: fields}, distinct: distinct} = query, sources) do - ["SELECT ", distinct(distinct, sources, query) | select_fields(fields, sources, query)] - end - - defp select_fields([], _sources, _query), do: "1" - - defp select_fields(fields, sources, query) do - intersperse_map(fields, ", ", fn - {:&, _, [idx]} -> - case elem(sources, idx) do - {source, _, nil} -> - raise Ecto.QueryError, - query: query, - message: """ - SQLite3 does not support selecting all fields from #{source} \ - without a schema. Please specify a schema or specify exactly \ - which fields you want to select\ - """ - - {_, source, _} -> - source - end - - {key, value} -> - [expr(value, sources, query), " AS ", quote_name(key)] - - value -> - expr(value, sources, query) - end) - end - - def from(%{from: %{source: source}} = query, sources) do - {from, name} = get_source(query, sources, 0, source) - - [ - " FROM ", - from, - " AS ", - name - ] - end - - def cte( - %{with_ctes: %WithExpr{recursive: recursive, queries: [_ | _] = queries}} = - query, - sources - ) do - recursive_opt = if recursive, do: "RECURSIVE ", else: "" - ctes = intersperse_map(queries, ", ", &cte_expr(&1, sources, query)) - - [ - "WITH ", - recursive_opt, - ctes, - " " - ] - end - - def cte(%{with_ctes: _}, _), do: [] - - defp cte_expr({name, cte}, sources, query) do - [ - quote_name(name), - " AS ", - cte_query(cte, sources, query) - ] - end - - defp cte_query(%Ecto.Query{} = query, _, _), do: ["(", all(query), ")"] - - defp cte_query(%QueryExpr{expr: expression}, sources, query), - do: expr(expression, sources, query) - - defp update_fields(type, %{updates: updates} = query, sources) do - fields = - for( - %{expr: expression} <- updates, - {op, kw} <- expression, - {key, value} <- kw, - do: update_op(op, update_key(type, key, query, sources), value, sources, query) - ) - - Enum.intersperse(fields, ", ") - end - - defp update_key(_kind, key, _query, _sources) do - quote_name(key) - end - - defp update_op(:set, quoted_key, value, sources, query) do - [ - quoted_key, - " = " | expr(value, sources, query) - ] - end - - defp update_op(:inc, quoted_key, value, sources, query) do - [ - quoted_key, - " = ", - quoted_key, - " + " | expr(value, sources, query) - ] - end - - defp update_op(command, _quoted_key, _value, _sources, query) do - raise Ecto.QueryError, - query: query, - message: "Unknown update operation #{inspect(command)} for SQLite3" - end - - defp using_join(%{joins: []}, _kind, _prefix, _sources), do: {[], []} - - defp using_join(%{joins: joins} = query, _kind, prefix, sources) do - froms = - intersperse_map(joins, ", ", fn - %JoinExpr{qual: _qual, ix: ix, source: source} -> - {join, name} = get_source(query, sources, ix, source) - [join, " AS " | name] - - # This is hold over from sqlite_ecto2. According to sqlite3 - # documentation, all of the join types are allowed. - # - # %JoinExpr{qual: qual} -> - # raise Ecto.QueryError, - # query: query, - # message: - # "SQLite3 adapter supports only inner joins on #{kind}, got: `#{qual}`" - end) - - wheres = - for %JoinExpr{on: %QueryExpr{expr: value} = query_expr} <- joins, - value != true, - do: query_expr |> Map.put(:__struct__, BooleanExpr) |> Map.put(:op, :and) - - {[?\s, prefix, ?\s | froms], wheres} - end - - def join(%{joins: []}, _sources), do: [] - - def join(%{joins: joins} = query, sources) do - Enum.map(joins, fn - %JoinExpr{ - on: %QueryExpr{expr: expression}, - qual: qual, - ix: ix, - source: source - } -> - {join, name} = get_source(query, sources, ix, source) - - [ - join_qual(qual, query), - join, - " AS ", - name, - join_on(qual, expression, sources, query) - ] - end) - end - - defp join_on(:cross, true, _sources, _query), do: [] - - defp join_on(_qual, expression, sources, query), - do: [" ON " | expr(expression, sources, query)] - - defp join_qual(:inner, _), do: " INNER JOIN " - defp join_qual(:left, _), do: " LEFT OUTER JOIN " - defp join_qual(:right, _), do: " RIGHT OUTER JOIN " - defp join_qual(:full, _), do: " FULL OUTER JOIN " - defp join_qual(:cross, _), do: " CROSS JOIN " - - defp join_qual(mode, query) do - raise Ecto.QueryError, - query: query, - message: "join `#{inspect(mode)}` not supported by SQLite3" - end - - def where(%{wheres: wheres} = query, sources) do - boolean(" WHERE ", wheres, sources, query) - end - - def having(%{havings: havings} = query, sources) do - boolean(" HAVING ", havings, sources, query) - end - - def group_by(%{group_bys: []}, _sources), do: [] - - def group_by(%{group_bys: group_bys} = query, sources) do - [ - " GROUP BY " - | intersperse_map(group_bys, ", ", fn %QueryExpr{expr: expression} -> - intersperse_map(expression, ", ", &expr(&1, sources, query)) - end) - ] - end - - def window(%{windows: []}, _sources), do: [] - - def window(%{windows: windows} = query, sources) do - [ - " WINDOW " - | intersperse_map(windows, ", ", fn {name, %{expr: kw}} -> - [quote_name(name), " AS " | window_exprs(kw, sources, query)] - end) - ] - end - - defp window_exprs(kw, sources, query) do - [?(, intersperse_map(kw, ?\s, &window_expr(&1, sources, query)), ?)] - end - - defp window_expr({:partition_by, fields}, sources, query) do - ["PARTITION BY " | intersperse_map(fields, ", ", &expr(&1, sources, query))] - end - - defp window_expr({:order_by, fields}, sources, query) do - ["ORDER BY " | intersperse_map(fields, ", ", &order_by_expr(&1, sources, query))] - end - - defp window_expr({:frame, {:fragment, _, _} = fragment}, sources, query) do - expr(fragment, sources, query) - end - - def order_by(%{order_bys: []}, _sources), do: [] - - def order_by(%{order_bys: order_bys} = query, sources) do - [ - " ORDER BY " - | intersperse_map(order_bys, ", ", fn %QueryExpr{expr: expression} -> - intersperse_map(expression, ", ", &order_by_expr(&1, sources, query)) - end) - ] - end - - defp order_by_expr({dir, expression}, sources, query) do - str = expr(expression, sources, query) - - case dir do - :asc -> - str - - :desc -> - [str | " DESC"] - - _ -> - raise Ecto.QueryError, - query: query, - message: "#{dir} is not supported in ORDER BY in SQLite3" - end - end - - def limit(%{limit: nil}, _sources), do: [] - - def limit(%{limit: %QueryExpr{expr: expression}} = query, sources) do - [" LIMIT " | expr(expression, sources, query)] - end - - def offset(%{offset: nil}, _sources), do: [] - - def offset(%{offset: %QueryExpr{expr: expression}} = query, sources) do - [" OFFSET " | expr(expression, sources, query)] - end - - defp combinations(%{combinations: combinations}) do - Enum.map(combinations, &combination/1) - end - - defp combination({:union, query}), do: [" UNION ", all(query)] - defp combination({:union_all, query}), do: [" UNION ALL ", all(query)] - defp combination({:except, query}), do: [" EXCEPT ", all(query)] - defp combination({:intersect, query}), do: [" INTERSECT ", all(query)] - - defp combination({:except_all, query}) do - raise Ecto.QueryError, - query: query, - message: "SQLite3 does not support EXCEPT ALL" - end - - defp combination({:intersect_all, query}) do - raise Ecto.QueryError, - query: query, - message: "SQLite3 does not INTERSECT ALL" - end - - def lock(query, _sources) do - raise Ecto.QueryError, - query: query, - message: "SQLite3 does not support locks" - end - - defp boolean(_name, [], _sources, _query), do: [] - - defp boolean(name, [%{expr: expression, op: op} | query_exprs], sources, query) do - [ - name, - Enum.reduce(query_exprs, {op, paren_expr(expression, sources, query)}, fn - %BooleanExpr{expr: expression, op: op}, {op, acc} -> - {op, [acc, operator_to_boolean(op) | paren_expr(expression, sources, query)]} - - %BooleanExpr{expr: expression, op: op}, {_, acc} -> - {op, - [ - ?(, - acc, - ?), - operator_to_boolean(op) | paren_expr(expression, sources, query) - ]} - end) - |> elem(1) - ] - end - - defp operator_to_boolean(:and), do: " AND " - defp operator_to_boolean(:or), do: " OR " - - defp parens_for_select([first_expr | _] = expression) do - if is_binary(first_expr) and String.match?(first_expr, ~r/^\s*select/i) do - [?(, expression, ?)] - else - expression - end - end - - defp paren_expr(expression, sources, query) do - [?(, expr(expression, sources, query), ?)] - end - - ## - ## Expression generation - ## - - def expr({:^, [], [_ix]}, _sources, _query) do - '?' - end - - def expr( - {{:., _, [{:parent_as, _, [{:&, _, [idx]}]}, field]}, _, []}, - _sources, - query - ) - when is_atom(field) do - {_, name, _} = elem(query.aliases[@parent_as], idx) - [name, ?. | quote_name(field)] - end - - def expr({{:., _, [{:&, _, [idx]}, field]}, _, []}, sources, _query) - when is_atom(field) do - {_, name, _} = elem(sources, idx) - [name, ?. | quote_name(field)] - end - - def expr({:&, _, [idx]}, sources, _query) do - {_, source, _} = elem(sources, idx) - source - end - - def expr({:in, _, [_left, []]}, _sources, _query) do - "0" - end - - def expr({:in, _, [left, right]}, sources, query) when is_list(right) do - args = intersperse_map(right, ?,, &expr(&1, sources, query)) - [expr(left, sources, query), " IN (", args, ?)] - end - - def expr({:in, _, [_, {:^, _, [_, 0]}]}, _sources, _query) do - "0" - end - - def expr({:in, _, [left, {:^, _, [_, len]}]}, sources, query) do - args = Enum.intersperse(List.duplicate(??, len), ?,) - [expr(left, sources, query), " IN (", args, ?)] - end - - def expr({:in, _, [left, %Ecto.SubQuery{} = subquery]}, sources, query) do - [expr(left, sources, query), " IN ", expr(subquery, sources, query)] - end - - def expr({:in, _, [left, right]}, sources, query) do - [expr(left, sources, query), " IN (SELECT value FROM JSON_EACH(", expr(right, sources, query), ?), ?)] - end - - def expr({:is_nil, _, [arg]}, sources, query) do - [expr(arg, sources, query) | " IS NULL"] - end - - def expr({:not, _, [expression]}, sources, query) do - ["NOT (", expr(expression, sources, query), ?)] - end - - def expr({:filter, _, [agg, filter]}, sources, query) do - aggregate = expr(agg, sources, query) - [aggregate, " FILTER (WHERE ", expr(filter, sources, query), ?)] - end - - def expr(%Ecto.SubQuery{query: query}, sources, _query) do - query = put_in(query.aliases[@parent_as], sources) - [?(, all(query, subquery_as_prefix(sources)), ?)] - end - - def expr({:fragment, _, [kw]}, _sources, query) - when is_list(kw) or tuple_size(kw) == 3 do - raise Ecto.QueryError, - query: query, - message: "SQLite3 adapter does not support keyword or interpolated fragments" - end - - def expr({:fragment, _, parts}, sources, query) do - parts - |> Enum.map(fn - {:raw, part} -> part - {:expr, expression} -> expr(expression, sources, query) - end) - |> parens_for_select - end - - def expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do - [ - "CAST (", - "strftime('%Y-%m-%d %H:%M:%f000Z'", - ",", - expr(datetime, sources, query), - ",", - interval(count, interval, sources), - ") AS TEXT_DATETIME)" - ] - end - - def expr({:date_add, _, [date, count, interval]}, sources, query) do - [ - "CAST (", - "strftime('%Y-%m-%d'", - ",", - expr(date, sources, query), - ",", - interval(count, interval, sources), - ") AS TEXT_DATE)" - ] - end - - def expr({:ilike, _, [_, _]}, _sources, query) do - raise Ecto.QueryError, - query: query, - message: "ilike is not supported by SQLite3" - end - - def expr({:over, _, [agg, name]}, sources, query) when is_atom(name) do - [expr(agg, sources, query), " OVER " | quote_name(name)] - end - - def expr({:over, _, [agg, kw]}, sources, query) do - [expr(agg, sources, query), " OVER " | window_exprs(kw, sources, query)] - end - - def expr({:{}, _, elems}, sources, query) do - [?(, intersperse_map(elems, ?,, &expr(&1, sources, query)), ?)] - end - - def expr({:count, _, []}, _sources, _query), do: "count(*)" - - def expr({:json_extract_path, _, [expr, path]}, sources, query) do - path = - Enum.map(path, fn - binary when is_binary(binary) -> - [?., escape_json_key(binary)] - - integer when is_integer(integer) -> - "[#{integer}]" - end) - - ["json_extract(", expr(expr, sources, query), ", '$", path, "')"] - end - - def expr({fun, _, args}, sources, query) when is_atom(fun) and is_list(args) do - {modifier, args} = - case args do - [rest, :distinct] -> {"DISTINCT ", [rest]} - _ -> {[], args} - end - - case handle_call(fun, length(args)) do - {:binary_op, op} -> - [left, right] = args - [op_to_binary(left, sources, query), op | op_to_binary(right, sources, query)] - - {:fun, fun} -> - [fun, ?(, modifier, intersperse_map(args, ", ", &expr(&1, sources, query)), ?)] - end - end - - def expr(list, _sources, query) when is_list(list) do - raise Ecto.QueryError, - query: query, - message: "Array type is not supported by SQLite3" - end - - def expr(%Decimal{} = decimal, _sources, _query) do - Decimal.to_string(decimal, :normal) - end - - def expr(%Ecto.Query.Tagged{value: binary, type: :binary}, _sources, _query) - when is_binary(binary) do - hex = Base.encode16(binary, case: :lower) - [?x, ?', hex, ?'] - end - - def expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) - when type in [:decimal, :float] do - ["(", expr(other, sources, query), " + 0)"] - end - - def expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) do - ["CAST(", expr(other, sources, query), " AS ", column_type(type, query), ?)] - end - - def expr(nil, _sources, _query), do: "NULL" - def expr(true, _sources, _query), do: "1" - def expr(false, _sources, _query), do: "0" - - def expr(literal, _sources, _query) when is_binary(literal) do - [?', escape_string(literal), ?'] - end - - def expr(literal, _sources, _query) when is_integer(literal) do - Integer.to_string(literal) - end - - def expr(literal, _sources, _query) when is_float(literal) do - # Unsure if SQLite3 supports float casting - ["(0 + ", Float.to_string(literal), ?)] - end - - def interval(_, "microsecond", _sources) do - raise ArgumentError, - "SQLite does not support microsecond precision in datetime intervals" - end - - def interval(count, "millisecond", sources) do - "(#{expr(count, sources, nil)} / 1000.0) || ' seconds'" - end - - def interval(count, "week", sources) do - "(#{expr(count, sources, nil)} * 7) || ' days'" - end - - def interval(count, interval, sources) do - "#{expr(count, sources, nil)} || ' #{interval}'" - end - - defp op_to_binary({op, _, [_, _]} = expression, sources, query) - when op in @binary_ops do - paren_expr(expression, sources, query) - end - - defp op_to_binary({:is_nil, _, [_]} = expression, sources, query) do - paren_expr(expression, sources, query) - end - - defp op_to_binary(expression, sources, query) do - expr(expression, sources, query) - end - - def create_names(query) do - create_names(query, []) - end - - def create_names(%{sources: sources}, as_prefix) do - create_names(sources, 0, tuple_size(sources), as_prefix) |> List.to_tuple() - end - - def create_names(sources, pos, limit, as_prefix) when pos < limit do - [ - create_name(sources, pos, as_prefix) - | create_names(sources, pos + 1, limit, as_prefix) - ] - end - - def create_names(_sources, pos, pos, as_prefix) do - [as_prefix] - end - - defp subquery_as_prefix(sources) do - [?s | :erlang.element(tuple_size(sources), sources)] - end - - def create_name(sources, pos, as_prefix) do - case elem(sources, pos) do - {:fragment, _, _} -> - {nil, as_prefix ++ [?f | Integer.to_string(pos)], nil} - - {table, schema, prefix} -> - name = as_prefix ++ [create_alias(table) | Integer.to_string(pos)] - {quote_table(prefix, table), name, schema} - - %Ecto.SubQuery{} -> - {nil, as_prefix ++ [?s | Integer.to_string(pos)], nil} - end - end - - def create_alias(<>) - when first in ?a..?z - when first in ?A..?Z do - first - end - - def create_alias(_) do - ?t - end - - defp column_definitions(table, columns) do - intersperse_map(columns, ", ", &column_definition(table, &1)) - end - - defp column_definition(table, {:add, name, %Reference{} = ref, opts}) do - [ - quote_name(name), - ?\s, - column_type(ref.type, opts), - column_options(table, ref.type, opts), - reference_expr(ref, table, name) - ] - end - - defp column_definition(table, {:add, name, type, opts}) do - [ - quote_name(name), - ?\s, - column_type(type, opts), - column_options(table, type, opts) - ] - end - - defp column_change(table, {:add, name, %Reference{} = ref, opts}) do - [ - "ADD COLUMN ", - quote_name(name), - ?\s, - column_type(ref.type, opts), - column_options(table, ref.type, opts), - reference_expr(ref, table, name) - ] - end - - # If we are adding a DATETIME column with the NOT NULL constraint, SQLite - # will force us to give it a DEFAULT value. The only default value - # that makes sense is CURRENT_TIMESTAMP, but when adding a column to a - # table, defaults must be constant values. - # - # Therefore the best option is just to remove the NOT NULL constraint when - # we add new datetime columns. - defp column_change(table, {:add, name, type, opts}) - when type in [:utc_datetime, :naive_datetime] do - opts = Keyword.delete(opts, :null) - - [ - "ADD COLUMN ", - quote_name(name), - ?\s, - column_type(type, opts), - column_options(table, type, opts) - ] - end - - defp column_change(table, {:add, name, type, opts}) do - [ - "ADD COLUMN ", - quote_name(name), - ?\s, - column_type(type, opts), - column_options(table, type, opts) - ] - end - - defp column_change(_table, {:modify, _name, _type, _opts}) do - raise ArgumentError, "ALTER COLUMN not supported by SQLite3" - end - - defp column_change(table, {:remove, name, _type, _opts}) do - column_change(table, {:remove, name}) - end - - defp column_change(_table, {:remove, name}) do - [ - "DROP COLUMN ", - quote_name(name) - ] - end - - defp column_change(_table, _) do - raise ArgumentError, "Not supported by SQLite3" - end - - defp column_options(table, type, opts) do - default = Keyword.fetch(opts, :default) - null = Keyword.get(opts, :null) - pk = table.primary_key != :composite and Keyword.get(opts, :primary_key, false) - - column_options(default, type, null, pk) - end - - defp column_options(_default, :serial, _, true) do - " PRIMARY KEY AUTOINCREMENT" - end - - defp column_options(default, type, null, pk) do - [default_expr(default, type), null_expr(null), pk_expr(pk)] - end - - defp null_expr(false), do: " NOT NULL" - defp null_expr(true), do: " NULL" - defp null_expr(_), do: [] - - defp default_expr({:ok, nil}, _type) do - " DEFAULT NULL" - end - - defp default_expr({:ok, literal}, _type) when is_binary(literal) do - [ - " DEFAULT '", - escape_string(literal), - ?' - ] - end - - defp default_expr({:ok, literal}, _type) - when is_number(literal) or is_boolean(literal) do - [ - " DEFAULT ", - to_string(literal) - ] - end - - defp default_expr({:ok, {:fragment, expression}}, _type) do - [ - " DEFAULT ", - expression - ] - end - - defp default_expr({:ok, value}, _type) when is_map(value) or is_list(value) do - library = Application.get_env(:exqlite, :json_library, Jason) - expression = IO.iodata_to_binary(library.encode_to_iodata!(value)) - - [ - " DEFAULT ", - ?(, - ?', - escape_string(expression), - ?', - ?) - ] - end - - defp default_expr(:error, _type), do: [] - - defp index_expr(literal) when is_binary(literal), do: literal - defp index_expr(literal), do: quote_name(literal) - - defp pk_expr(true), do: " PRIMARY KEY" - defp pk_expr(_), do: [] - - defp options_expr(nil), do: [] - - defp options_expr(keyword) when is_list(keyword) do - raise ArgumentError, "SQLite3 adapter does not support keyword lists in :options" - end - - defp options_expr(options), do: [?\s, to_string(options)] - - # composite FK is handled at table level - defp reference_expr(%Reference{with: [_]}, _table, _name), do: [] - - defp reference_expr(%Reference{} = ref, table, name) do - [ - " CONSTRAINT ", - reference_name(ref, table, name), - " REFERENCES ", - quote_table(ref.prefix || table.prefix, ref.table), - ?(, - quote_name(ref.column), - ?), - reference_on_delete(ref.on_delete), - reference_on_update(ref.on_update) - ] - end - - defp reference_name(%Reference{name: nil}, table, column) do - quote_name("#{table.name}_#{column}_fkey") - end - - defp reference_name(%Reference{name: name}, _table, _column) do - quote_name(name) - end - - defp reference_on_delete(:nilify_all), do: " ON DELETE SET NULL" - defp reference_on_delete(:default_all), do: " ON DELETE SET DEFAULT" - defp reference_on_delete(:delete_all), do: " ON DELETE CASCADE" - defp reference_on_delete(:restrict), do: " ON DELETE RESTRICT" - defp reference_on_delete(_), do: [] - - defp reference_on_update(:nilify_all), do: " ON UPDATE SET NULL" - defp reference_on_update(:default_all), do: " ON UPDATE SET DEFAULT" - defp reference_on_update(:update_all), do: " ON UPDATE CASCADE" - defp reference_on_update(:restrict), do: " ON UPDATE RESTRICT" - defp reference_on_update(_), do: [] - - defp returning(%{select: nil}, _sources), do: [] - defp returning(%{select: %{fields: fields}} = query, sources) do - [ - " RETURNING " | select_fields(fields, sources, query) - ] - end - - defp returning([]), do: [] - - defp returning(returning) do - [ - " RETURNING " | quote_names(returning) - ] - end - - ## - ## Helpers - ## - - defp composite_pk_definition(%Table{} = table, columns) do - pks = - Enum.reduce(columns, [], fn {_, name, _, opts}, pk_acc -> - case Keyword.get(opts, :primary_key, false) do - true -> [name | pk_acc] - false -> pk_acc - end - end) - - if length(pks) > 1 do - composite_pk_expr = pks |> Enum.reverse() |> Enum.map_join(", ", "e_name/1) - - { - %{table | primary_key: :composite}, - ", PRIMARY KEY (" <> composite_pk_expr <> ")" - } - else - {table, ""} - end - end - - defp composite_fk_definitions(%Table{} = table, columns) do - composite_fk_cols = columns - |> Enum.filter(fn c -> - case c do - {_op, _name, %Reference{with: [_]}, _opts} -> true - _ -> false - end - end) - - Enum.map(composite_fk_cols, &composite_fk_definition(table, &1)) - end - - defp composite_fk_definition(table, {_op, name, ref, _opts}) do - {current_columns, reference_columns} = Enum.unzip([{name, ref.column} | ref.with]) - - [ - ", FOREIGN KEY (", - quote_names(current_columns), - ") REFERENCES ", - quote_table(ref.prefix || table.prefix, ref.table), - ?(, - quote_names(reference_columns), - ?), - reference_on_delete(ref.on_delete), - reference_on_update(ref.on_update) - ] - - end - - defp get_source(query, sources, ix, source) do - {expression, name, _schema} = elem(sources, ix) - {expression || expr(source, sources, query), name} - end - - defp quote_names(names), do: intersperse_map(names, ?,, "e_name/1) - - def quote_name(name), do: quote_entity(name) - - def quote_table(table), do: quote_entity(table) - - defp quote_table(nil, name), do: quote_entity(name) - defp quote_table(prefix, name), do: [quote_entity(prefix), ?., quote_entity(name)] - - defp quote_entity(val) when is_atom(val) do - quote_entity(Atom.to_string(val)) - end - - defp quote_entity(val), do: [val] - - defp intersperse_map(list, separator, mapper, acc \\ []) - - defp intersperse_map([], _separator, _mapper, acc) do - acc - end - - defp intersperse_map([elem], _separator, mapper, acc) do - [acc | mapper.(elem)] - end - - defp intersperse_map([elem | rest], separator, mapper, acc) do - intersperse_map(rest, separator, mapper, [acc, mapper.(elem), separator]) - end - - defp intersperse_reduce(list, separator, user_acc, reducer, acc \\ []) - - defp intersperse_reduce([], _separator, user_acc, _reducer, acc), - do: {acc, user_acc} - - defp intersperse_reduce([item], _separator, user_acc, reducer, acc) do - {item, user_acc} = reducer.(item, user_acc) - {[acc | item], user_acc} - end - - defp intersperse_reduce([item | rest], separator, user_acc, reducer, acc) do - {item, user_acc} = reducer.(item, user_acc) - intersperse_reduce(rest, separator, user_acc, reducer, [acc, item, separator]) - end - - defp if_do(condition, value) do - if condition, do: value, else: [] - end - - defp escape_string(value) when is_binary(value) do - value - |> :binary.replace("'", "''", [:global]) - |> :binary.replace("\\", "\\\\", [:global]) - end - - defp escape_json_key(value) when is_binary(value) do - value - |> escape_string() - |> :binary.replace("\"", "\\\"", [:global]) - end -end diff --git a/lib/ecto/adapters/exqlite/data_type.ex b/lib/ecto/adapters/exqlite/data_type.ex deleted file mode 100644 index 42e80f55..00000000 --- a/lib/ecto/adapters/exqlite/data_type.ex +++ /dev/null @@ -1,48 +0,0 @@ -defmodule Ecto.Adapters.Exqlite.DataType do - # Simple column types. Note that we ignore options like :size, :precision, - # etc. because columns do not have types, and SQLite will not coerce any - # stored value. Thus, "strings" are all text and "numerics" have arbitrary - # precision regardless of the declared column type. Decimals are the - # only exception. - - @spec column_type(atom(), Keyword.t()) :: String.t() - def column_type(:id, _opts), do: "INTEGER" - def column_type(:serial, _opts), do: "INTEGER" - def column_type(:bigserial, _opts), do: "INTEGER" - def column_type(:bigint, _opts), do: "INTEGER" - # TODO: We should make this configurable - def column_type(:binary_id, _opts), do: "TEXT" - def column_type(:string, _opts), do: "TEXT" - def column_type(:float, _opts), do: "NUMERIC" - def column_type(:binary, _opts), do: "BLOB" - # TODO: We should make this configurable - # SQLite3 does not support uuid - def column_type(:uuid, _opts), do: "TEXT" - def column_type(:map, _opts), do: "JSON" - def column_type(:array, _opts), do: "JSON" - def column_type({:map, _}, _opts), do: "JSON" - def column_type({:array, _}, _opts), do: "JSON" - def column_type(:utc_datetime, _opts), do: "TEXT_DATETIME" - def column_type(:utc_datetime_usec, _opts), do: "TEXT_DATETIME" - def column_type(:naive_datetime, _opts), do: "TEXT_DATETIME" - def column_type(:naive_datetime_usec, _opts), do: "TEXT_DATETIME" - def column_type(:decimal, nil), do: "DECIMAL" - - def column_type(:decimal, opts) do - # We only store precision and scale for DECIMAL. - precision = Keyword.get(opts, :precision) - scale = Keyword.get(opts, :scale, 0) - - if precision do - "DECIMAL(#{precision},#{scale})" - else - "DECIMAL" - end - end - - def column_type(type, _) do - type - |> Atom.to_string() - |> String.upcase() - end -end diff --git a/mix.exs b/mix.exs index ba77e7c5..aa99f8af 100644 --- a/mix.exs +++ b/mix.exs @@ -31,19 +31,10 @@ defmodule Exqlite.MixProject do defp deps do [ {:db_connection, "~> 2.1"}, - {:decimal, "~> 2.0"}, - {:ecto_sql, "~> 3.5.4"}, - {:ecto, "~> 3.5.8"}, {:elixir_make, "~> 0.6", runtime: false}, {:ex_doc, "~> 0.23.0", only: [:dev], runtime: false}, {:jason, ">= 0.0.0", only: [:test, :docs]}, {:temp, "~> 0.4", only: [:test]}, - - # Benchmarks - {:benchee, "~> 0.11.0", only: :bench}, - {:benchee_json, "~> 0.4.0", only: :bench}, - {:postgrex, "~> 0.15.0", only: :bench}, - {:myxql, "~> 0.4.0", only: :bench} ] end diff --git a/mix.lock b/mix.lock index 8e22e2c9..a27096aa 100644 --- a/mix.lock +++ b/mix.lock @@ -1,23 +1,12 @@ %{ - "benchee": {:hex, :benchee, "0.11.0", "cf96e328ff5d69838dd89c21a9db22716bfcc6ef772e9d9dddf7ba622102722d", [:mix], [{:deep_merge, "~> 0.1", [hex: :deep_merge, repo: "hexpm", optional: false]}], "hexpm", "c345e090e0a61bf33e0385aa3ad394fcb7d863e313bc3fca522e390c7f39166e"}, - "benchee_json": {:hex, :benchee_json, "0.4.0", "59d3277829bd1dca8373cdb20b916cb435c2647be235d09963fc0959db908c36", [:mix], [{:benchee, "~> 0.10", [hex: :benchee, repo: "hexpm", optional: false]}, {:poison, ">= 1.4.0", [hex: :poison, repo: "hexpm", optional: false]}], "hexpm", "71a3edb6a30708de2a01368aa8f288e1c0ed7897b125adc396ce7c2c7245b1e7"}, "connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"}, "db_connection": {:hex, :db_connection, "2.3.1", "4c9f3ed1ef37471cbdd2762d6655be11e38193904d9c5c1c9389f1b891a3088e", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}], "hexpm", "abaab61780dde30301d840417890bd9f74131041afd02174cf4e10635b3a63f5"}, - "decimal": {:hex, :decimal, "2.0.0", "a78296e617b0f5dd4c6caf57c714431347912ffb1d0842e998e9792b5642d697", [:mix], [], "hexpm", "34666e9c55dea81013e77d9d87370fe6cb6291d1ef32f46a1600230b1d44f577"}, - "deep_merge": {:hex, :deep_merge, "0.2.0", "c1050fa2edf4848b9f556fba1b75afc66608a4219659e3311d9c9427b5b680b3", [:mix], [], "hexpm", "e3bf435a54ed27b0ba3a01eb117ae017988804e136edcbe8a6a14c310daa966e"}, "earmark_parser": {:hex, :earmark_parser, "1.4.12", "b245e875ec0a311a342320da0551da407d9d2b65d98f7a9597ae078615af3449", [:mix], [], "hexpm", "711e2cc4d64abb7d566d43f54b78f7dc129308a63bc103fbd88550d2174b3160"}, - "ecto": {:hex, :ecto, "3.5.8", "8ebf12be6016cb99313348ba7bb4612f4114b9a506d6da79a2adc7ef449340bc", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ea0be182ea8922eb7742e3ae8e71b67ee00ae177de1bf76210299a5f16ba4c77"}, - "ecto_sql": {:hex, :ecto_sql, "3.5.4", "a9e292c40bd79fff88885f95f1ecd7b2516e09aa99c7dd0201aa84c54d2358e4", [:mix], [{:db_connection, "~> 2.2", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.5.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.3.0 or ~> 0.4.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.15.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "1fff1a28a898d7bbef263f1f3ea425b04ba9f33816d843238c84eff883347343"}, "elixir_make": {:hex, :elixir_make, "0.6.2", "7dffacd77dec4c37b39af867cedaabb0b59f6a871f89722c25b28fcd4bd70530", [:mix], [], "hexpm", "03e49eadda22526a7e5279d53321d1cced6552f344ba4e03e619063de75348d9"}, - "esqlite": {:hex, :esqlite, "0.4.1", "ba5d0bab6b9c8432ffe1bf12fee8e154a50f1c3c40eadc3a9c870c23ca94d961", [:rebar3], [], "hexpm", "3584ca33172f4815ce56e96eed9835f5d8c987a9000fbc8c376c86acef8bf965"}, "ex_doc": {:hex, :ex_doc, "0.23.0", "a069bc9b0bf8efe323ecde8c0d62afc13d308b1fa3d228b65bca5cf8703a529d", [:mix], [{:earmark_parser, "~> 1.4.0", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}], "hexpm", "f5e2c4702468b2fd11b10d39416ddadd2fcdd173ba2a0285ebd92c39827a5a16"}, "jason": {:hex, :jason, "1.2.2", "ba43e3f2709fd1aa1dce90aaabfd039d000469c05c56f0b8e31978e03fa39052", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "18a228f5f0058ee183f29f9eae0805c6e59d61c3b006760668d8d18ff0d12179"}, "makeup": {:hex, :makeup, "1.0.5", "d5a830bc42c9800ce07dd97fa94669dfb93d3bf5fcf6ea7a0c67b2e0e4a7f26c", [:mix], [{:nimble_parsec, "~> 0.5 or ~> 1.0", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "cfa158c02d3f5c0c665d0af11512fed3fba0144cf1aadee0f2ce17747fba2ca9"}, "makeup_elixir": {:hex, :makeup_elixir, "0.15.1", "b5888c880d17d1cc3e598f05cdb5b5a91b7b17ac4eaf5f297cb697663a1094dd", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.1", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "db68c173234b07ab2a07f645a5acdc117b9f99d69ebf521821d89690ae6c6ec8"}, - "myxql": {:hex, :myxql, "0.4.5", "49784e6a3e4fc33088cc9004948ef255ee698b0d7b533fb1fa453cc99a3f9972", [:mix], [{:db_connection, "~> 2.0", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:geo, "~> 3.3", [hex: :geo, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "40a6166ab0a54f44a6e2c437aed6360ce51ce7f779557ae30d1cc4c4b4e7ad13"}, "nimble_parsec": {:hex, :nimble_parsec, "1.1.0", "3a6fca1550363552e54c216debb6a9e95bd8d32348938e13de5eda962c0d7f89", [:mix], [], "hexpm", "08eb32d66b706e913ff748f11694b17981c0b04a33ef470e33e11b3d3ac8f54b"}, - "poison": {:hex, :poison, "4.0.1", "bcb755a16fac91cad79bfe9fc3585bb07b9331e50cfe3420a24bcc2d735709ae", [:mix], [], "hexpm", "ba8836feea4b394bb718a161fc59a288fe0109b5006d6bdf97b6badfcf6f0f25"}, - "postgrex": {:hex, :postgrex, "0.15.8", "f5e782bbe5e8fa178d5e3cd1999c857dc48eda95f0a4d7f7bd92a50e84a0d491", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "698fbfacea34c4cf22c8281abeb5cf68d99628d541874f085520ab3b53d356fe"}, - "telemetry": {:hex, :telemetry, "0.4.2", "2808c992455e08d6177322f14d3bdb6b625fbcfd233a73505870d8738a2f4599", [:rebar3], [], "hexpm", "2d1419bd9dda6a206d7b5852179511722e2b18812310d304620c7bd92a13fcef"}, "temp": {:hex, :temp, "0.4.7", "2c78482cc2294020a4bc0c95950b907ff386523367d4e63308a252feffbea9f2", [:mix], [], "hexpm", "6af19e7d6a85a427478be1021574d1ae2a1e1b90882586f06bde76c63cd03e0d"}, } diff --git a/test/ecto/adapters/exqlite/codec_test.exs b/test/ecto/adapters/exqlite/codec_test.exs deleted file mode 100644 index d04ec0a2..00000000 --- a/test/ecto/adapters/exqlite/codec_test.exs +++ /dev/null @@ -1,83 +0,0 @@ -defmodule Ecto.Adapters.Exqlite.CodecTest do - use ExUnit.Case, async: true - - alias Ecto.Adapters.Exqlite.Codec - - describe ".bool_decode/1" do - test "0" do - {:ok, false} = Codec.bool_decode(0) - {:ok, false} = Codec.bool_decode("0") - end - - test "FALSE" do - {:ok, false} = Codec.bool_decode("FALSE") - end - - test "1" do - {:ok, true} = Codec.bool_decode(1) - {:ok, true} = Codec.bool_decode("1") - end - - test "TRUE" do - {:ok, true} = Codec.bool_decode("TRUE") - end - end - - describe ".json_decode/1" do - test "nil" do - {:ok, nil} = Codec.json_decode(nil) - end - - test "valid json" do - {:ok, %{}} = Codec.json_decode("{}") - {:ok, []} = Codec.json_decode("[]") - {:ok, %{"foo" => 1}} = Codec.json_decode(~s|{"foo":1}|) - end - - test "handles malformed json" do - {:error, _} = Codec.json_decode("") - {:error, _} = Codec.json_decode(" ") - {:error, _} = Codec.json_decode("{") - {:error, _} = Codec.json_decode("[") - end - end - - describe ".float_decode/1" do - test "nil" do - {:ok, nil} = Codec.float_decode(nil) - end - - test "integer" do - {:ok, 1.0} = Codec.float_decode(1) - {:ok, 2.0} = Codec.float_decode(2) - end - - test "Decimal" do - {:ok, 1.0} = Codec.float_decode(Decimal.new("1.0")) - end - end - - describe ".decimal_decode/1" do - test "nil" do - {:ok, nil} = Codec.decimal_decode(nil) - end - - test "string" do - decimal = Decimal.new("1") - {:ok, ^decimal} = Codec.decimal_decode("1") - - decimal = Decimal.new("1.0") - {:ok, ^decimal} = Codec.decimal_decode("1.0") - end - - test "integer" do - decimal = Decimal.new("2") - {:ok, ^decimal} = Codec.decimal_decode(2) - end - - test "float" do - decimal = Decimal.from_float(1.2) - {:ok, ^decimal} = Codec.decimal_decode(1.2) - end - end -end diff --git a/test/ecto/adapters/exqlite/connection_test.exs b/test/ecto/adapters/exqlite/connection_test.exs deleted file mode 100644 index efbbfdc6..00000000 --- a/test/ecto/adapters/exqlite/connection_test.exs +++ /dev/null @@ -1,2745 +0,0 @@ -defmodule Ecto.Adapters.Exqlite.ConnectionTest do - use ExUnit.Case - - alias Ecto.Adapters.Exqlite.Connection - alias Ecto.Adapters.Exqlite - # alias Ecto.Migration.Table - - import Ecto.Query - import Ecto.Migration, only: [table: 1, table: 2, index: 2, index: 3, constraint: 3] - alias Ecto.Migration.Reference - - defmodule Comment do - use Ecto.Schema - - schema "comments" do - field(:content, :string) - end - end - - defmodule Post do - use Ecto.Schema - - schema "posts" do - field(:title, :string) - field(:content, :string) - has_many(:comments, Comment) - end - end - - # TODO: Let's rename these or make them more concrete and less terse so that - # tests are easier to read and understand what is happening. - # @warmwaffles 2021-03-11 - defmodule Schema3 do - use Ecto.Schema - - schema "schema3" do - field(:binary, :binary) - end - end - - defmodule Schema do - use Ecto.Schema - - schema "schema" do - field(:x, :integer) - field(:y, :integer) - field(:z, :integer) - field(:meta, :map) - - has_many(:comments, Ecto.Adapters.Exqlite.ConnectionTest.Schema2, - references: :x, - foreign_key: :z - ) - - has_one(:permalink, Ecto.Adapters.Exqlite.ConnectionTest.Schema3, - references: :y, - foreign_key: :id - ) - end - end - - defmodule Schema2 do - use Ecto.Schema - - schema "schema2" do - belongs_to(:post, Ecto.Adapters.Exqlite.ConnectionTest.Schema, - references: :x, - foreign_key: :z - ) - end - end - - defp plan(query, operation \\ :all) do - {query, _params} = Ecto.Adapter.Queryable.plan_query(operation, Exqlite, query) - query - end - - defp all(query) do - query - |> Connection.all() - |> IO.iodata_to_binary() - end - - defp update_all(query) do - query - |> Connection.update_all() - |> IO.iodata_to_binary() - end - - defp delete_all(query) do - query - |> Connection.delete_all() - |> IO.iodata_to_binary() - end - - defp execute_ddl(query) do - query - |> Connection.execute_ddl() - |> Enum.map(&IO.iodata_to_binary/1) - end - - defp insert(prefix, table, header, rows, on_conflict, returning, placeholders \\ []) do - prefix - |> Connection.insert(table, header, rows, on_conflict, returning, placeholders) - |> IO.iodata_to_binary() - end - - defp delete(prefix, table, filter, returning) do - prefix - |> Connection.delete(table, filter, returning) - |> IO.iodata_to_binary() - end - - test "from" do - query = Schema |> select([r], r.x) |> plan() - assert all(query) == ~s{SELECT s0.x FROM schema AS s0} - end - - test "ignores from with hints" do - query = - Schema - |> from(hints: ["USE INDEX FOO", "USE INDEX BAR"]) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0} - end - - test "from without schema" do - query = - "posts" - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT p0.x FROM posts AS p0} - - query = - "posts" - |> select([r], fragment("?", r)) - |> plan() - - assert all(query) == ~s{SELECT p0 FROM posts AS p0} - - query = - "Posts" - |> select([:x]) - |> plan() - - assert all(query) == ~s{SELECT P0.x FROM Posts AS P0} - - query = - "0posts" - |> select([:x]) - |> plan() - - assert all(query) == ~s{SELECT t0.x FROM 0posts AS t0} - - assert_raise( - Ecto.QueryError, - ~r"SQLite3 does not support selecting all fields from posts without a schema", - fn -> - from(p in "posts", select: p) |> plan() |> all() - end - ) - end - - test "from with subquery" do - query = - "posts" - |> select([r], %{x: r.x, y: r.y}) - |> subquery() - |> select([r], r.x) - |> plan() - - assert all(query) == """ - SELECT s0.x \ - FROM (SELECT sp0.x AS x, sp0.y AS y FROM posts AS sp0) AS s0\ - """ - - query = - "posts" - |> select([r], %{x: r.x, z: r.y}) - |> subquery() - |> select([r], r) - |> plan() - - assert all(query) == - """ - SELECT s0.x, s0.z \ - FROM (SELECT sp0.x AS x, sp0.y AS z FROM posts AS sp0) AS s0\ - """ - - query = - "posts" - |> select([r], %{x: r.x, z: r.y}) - |> subquery() - |> select([r], r) - |> subquery() - |> select([r], r) - |> plan() - - assert all(query) == - """ - SELECT s0.x, s0.z \ - FROM (\ - SELECT ss0.x AS x, ss0.z AS z \ - FROM (\ - SELECT ssp0.x AS x, ssp0.y AS z \ - FROM posts AS ssp0\ - ) AS ss0\ - ) AS s0\ - """ - end - - test "common table expression" do - iteration_query = - "categories" - |> join(:inner, [c], t in "tree", on: t.id == c.parent_id) - |> select([c, t], %{id: c.id, depth: fragment("? + 1", t.depth)}) - - cte_query = - "categories" - |> where([c], is_nil(c.parent_id)) - |> select([c], %{id: c.id, depth: fragment("1")}) - |> union_all(^iteration_query) - - query = - Schema - |> recursive_ctes(true) - |> with_cte("tree", as: ^cte_query) - |> join(:inner, [r], t in "tree", on: t.id == r.category_id) - |> select([r, t], %{x: r.x, category_id: t.id, depth: type(t.depth, :integer)}) - |> plan() - - assert all(query) == - """ - WITH RECURSIVE tree AS \ - (SELECT c0.id AS id, 1 AS depth FROM categories AS c0 WHERE (c0.parent_id IS NULL) \ - UNION ALL \ - SELECT c0.id, t1.depth + 1 FROM categories AS c0 \ - INNER JOIN tree AS t1 ON t1.id = c0.parent_id) \ - SELECT s0.x, t1.id, CAST(t1.depth AS INTEGER) \ - FROM schema AS s0 \ - INNER JOIN tree AS t1 ON t1.id = s0.category_id\ - """ - end - - test "reference common table in union" do - comments_scope_query = - "comments" - |> where([c], is_nil(c.deleted_at)) - |> select([c], %{entity_id: c.entity_id, text: c.text}) - - posts_query = - "posts" - |> join(:inner, [p], c in "comments_scope", on: c.entity_id == p.guid) - |> select([p, c], [p.title, c.text]) - - videos_query = - "videos" - |> join(:inner, [v], c in "comments_scope", on: c.entity_id == v.guid) - |> select([v, c], [v.title, c.text]) - - query = - posts_query - |> union_all(^videos_query) - |> with_cte("comments_scope", as: ^comments_scope_query) - |> plan() - - assert all(query) == - """ - WITH comments_scope AS (\ - SELECT c0.entity_id AS entity_id, c0.text AS text \ - FROM comments AS c0 WHERE (c0.deleted_at IS NULL)) \ - SELECT p0.title, c1.text \ - FROM posts AS p0 \ - INNER JOIN comments_scope AS c1 ON c1.entity_id = p0.guid \ - UNION ALL \ - SELECT v0.title, c1.text \ - FROM videos AS v0 \ - INNER JOIN comments_scope AS c1 ON c1.entity_id = v0.guid\ - """ - end - - @raw_sql_cte """ - SELECT * FROM categories WHERE c.parent_id IS NULL \ - UNION ALL \ - SELECT * FROM categories AS c, category_tree AS ct WHERE ct.id = c.parent_id\ - """ - - test "fragment common table expression" do - query = - Schema - |> recursive_ctes(true) - |> with_cte("tree", as: fragment(@raw_sql_cte)) - |> join(:inner, [p], c in "tree", on: c.id == p.category_id) - |> select([r], r.x) - |> plan() - - assert all(query) == - """ - WITH RECURSIVE tree AS (#{@raw_sql_cte}) \ - SELECT s0.x \ - FROM schema AS s0 \ - INNER JOIN tree AS t1 ON t1.id = s0.category_id\ - """ - end - - test "common table expression update_all" do - cte_query = - from( - x in Schema, - order_by: [asc: :id], - limit: 10, - select: %{id: x.id} - ) - - query = - Schema - |> with_cte("target_rows", as: ^cte_query) - |> join(:inner, [row], target in "target_rows", on: target.id == row.id) - |> update(set: [x: 123]) - |> plan(:update_all) - - assert update_all(query) == - """ - WITH target_rows AS \ - (SELECT s0.id AS id FROM schema AS s0 ORDER BY s0.id LIMIT 10) \ - UPDATE schema AS s0 \ - SET x = 123 \ - FROM target_rows AS t1 \ - WHERE (t1.id = s0.id)\ - """ - end - - test "common table expression delete_all" do - cte_query = from(x in Schema, order_by: [asc: :id], limit: 10, select: %{id: x.id}) - - query = - Schema - |> with_cte("target_rows", as: ^cte_query) - |> plan(:delete_all) - - # TODO: This is valid in sqlite - # https://sqlite.org/lang_delete.html - assert delete_all(query) == - """ - WITH target_rows AS \ - (SELECT s0.id AS id FROM schema AS s0 ORDER BY s0.id LIMIT 10) \ - DELETE \ - FROM schema AS s0\ - """ - end - - test "select" do - query = - Schema - |> select([r], {r.x, r.y}) - |> plan() - - assert all(query) == ~s{SELECT s0.x, s0.y FROM schema AS s0} - - query = - Schema - |> select([r], [r.x, r.y]) - |> plan() - - assert all(query) == ~s{SELECT s0.x, s0.y FROM schema AS s0} - - query = - Schema - |> select([r], struct(r, [:x, :y])) - |> plan() - - assert all(query) == ~s{SELECT s0.x, s0.y FROM schema AS s0} - end - - test "aggregates" do - query = - Schema - |> select(count()) - |> plan() - - assert all(query) == ~s{SELECT count(*) FROM schema AS s0} - end - - test "aggregate filters" do - query = Schema |> select([r], count(r.x) |> filter(r.x > 10)) |> plan() - assert all(query) == ~s{SELECT count(s0.x) FILTER (WHERE s0.x > 10) FROM schema AS s0} - - query = Schema |> select([r], count(r.x) |> filter(r.x > 10 and r.x < 50)) |> plan() - assert all(query) == ~s{SELECT count(s0.x) FILTER (WHERE (s0.x > 10) AND (s0.x < 50)) FROM schema AS s0} - - query = Schema |> select([r], count() |> filter(r.x > 10)) |> plan() - assert all(query) == ~s{SELECT count(*) FILTER (WHERE s0.x > 10) FROM schema AS s0} - end - - test "distinct" do - query = - Schema - |> distinct([r], true) - |> select([r], {r.x, r.y}) - |> plan() - - assert all(query) == ~s{SELECT DISTINCT s0.x, s0.y FROM schema AS s0} - - query = - Schema - |> distinct([r], false) - |> select([r], {r.x, r.y}) - |> plan() - - assert all(query) == ~s{SELECT s0.x, s0.y FROM schema AS s0} - - query = - Schema - |> distinct(true) - |> select([r], {r.x, r.y}) - |> plan() - - assert all(query) == ~s{SELECT DISTINCT s0.x, s0.y FROM schema AS s0} - - query = - Schema - |> distinct(false) - |> select([r], {r.x, r.y}) - |> plan() - - assert all(query) == ~s{SELECT s0.x, s0.y FROM schema AS s0} - - assert_raise( - Ecto.QueryError, - ~r"DISTINCT with multiple columns is not supported by SQLite3", - fn -> - Schema - |> distinct([r], [r.x, r.y]) - |> select([r], {r.x, r.y}) - |> plan() - |> all() - end - ) - end - - test "coalesce" do - query = - Schema - |> select([s], coalesce(s.x, 5)) - |> plan() - - assert all(query) == ~s{SELECT coalesce(s0.x, 5) FROM schema AS s0} - end - - test "where" do - query = - Schema - |> where([r], r.x == 42) - |> where([r], r.y != 43) - |> select([r], r.x) - |> plan() - - assert all(query) == - ~s{SELECT s0.x FROM schema AS s0 WHERE (s0.x = 42) AND (s0.y != 43)} - - query = - Schema - |> where([r], {r.x, r.y} > {1, 2}) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0 WHERE ((s0.x,s0.y) > (1,2))} - end - - test "or_where" do - query = - Schema - |> or_where([r], r.x == 42) - |> or_where([r], r.y != 43) - |> select([r], r.x) - |> plan() - - assert all(query) == - ~s{SELECT s0.x FROM schema AS s0 WHERE (s0.x = 42) OR (s0.y != 43)} - - query = - Schema - |> or_where([r], r.x == 42) - |> or_where([r], r.y != 43) - |> where([r], r.z == 44) - |> select([r], r.x) - |> plan() - - assert all(query) == - ~s{SELECT s0.x FROM schema AS s0 WHERE ((s0.x = 42) OR (s0.y != 43)) AND (s0.z = 44)} - end - - test "order by" do - query = - Schema - |> order_by([r], r.x) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0 ORDER BY s0.x} - - query = - Schema - |> order_by([r], [r.x, r.y]) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0 ORDER BY s0.x, s0.y} - - query = - Schema - |> order_by([r], asc: r.x, desc: r.y) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0 ORDER BY s0.x, s0.y DESC} - - query = - Schema - |> order_by([r], []) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0} - - for dir <- [:asc_nulls_first, :asc_nulls_last, :desc_nulls_first, :desc_nulls_last] do - assert_raise( - Ecto.QueryError, - ~r"#{dir} is not supported in ORDER BY in SQLite3", - fn -> - Schema - |> order_by([r], [{^dir, r.x}]) - |> select([r], r.x) - |> plan() - |> all() - end - ) - end - end - - test "union and union all" do - base_query = - Schema - |> select([r], r.x) - |> order_by(fragment("rand")) - |> offset(10) - |> limit(5) - - union_query1 = - Schema - |> select([r], r.y) - |> order_by([r], r.y) - |> offset(20) - |> limit(40) - - union_query2 = - Schema - |> select([r], r.z) - |> order_by([r], r.z) - |> offset(30) - |> limit(60) - - query = - base_query - |> union(^union_query1) - |> union(^union_query2) - |> plan() - - assert all(query) == - """ - SELECT s0.x FROM schema AS s0 \ - UNION SELECT s0.y FROM schema AS s0 ORDER BY s0.y LIMIT 40 OFFSET 20 \ - UNION SELECT s0.z FROM schema AS s0 ORDER BY s0.z LIMIT 60 OFFSET 30 \ - ORDER BY rand LIMIT 5 OFFSET 10\ - """ - - query = - base_query - |> union_all(^union_query1) - |> union_all(^union_query2) - |> plan() - - assert all(query) == - """ - SELECT s0.x FROM schema AS s0 \ - UNION ALL SELECT s0.y FROM schema AS s0 ORDER BY s0.y LIMIT 40 OFFSET 20 \ - UNION ALL SELECT s0.z FROM schema AS s0 ORDER BY s0.z LIMIT 60 OFFSET 30 \ - ORDER BY rand LIMIT 5 OFFSET 10\ - """ - end - - test "except and except all" do - base_query = - Schema - |> select([r], r.x) - |> order_by(fragment("rand")) - |> offset(10) - |> limit(5) - - except_query1 = - Schema - |> select([r], r.y) - |> order_by([r], r.y) - |> offset(20) - |> limit(40) - - except_query2 = - Schema - |> select([r], r.z) - |> order_by([r], r.z) - |> offset(30) - |> limit(60) - - query = - base_query - |> except(^except_query1) - |> except(^except_query2) - |> plan() - - assert all(query) == - """ - SELECT s0.x FROM schema AS s0 \ - EXCEPT SELECT s0.y FROM schema AS s0 ORDER BY s0.y LIMIT 40 OFFSET 20 \ - EXCEPT SELECT s0.z FROM schema AS s0 ORDER BY s0.z LIMIT 60 OFFSET 30 \ - ORDER BY rand LIMIT 5 OFFSET 10\ - """ - - assert_raise( - Ecto.QueryError, - fn -> - base_query - |> except_all(^except_query1) - |> except_all(^except_query2) - |> plan() - |> all() - end - ) - end - - test "intersect and intersect all" do - base_query = - Schema - |> select([r], r.x) - |> order_by(fragment("rand")) - |> offset(10) - |> limit(5) - - intersect_query1 = - Schema - |> select([r], r.y) - |> order_by([r], r.y) - |> offset(20) - |> limit(40) - - intersect_query2 = - Schema - |> select([r], r.z) - |> order_by([r], r.z) - |> offset(30) - |> limit(60) - - query = - base_query - |> intersect(^intersect_query1) - |> intersect(^intersect_query2) - |> plan() - - assert all(query) == - """ - SELECT s0.x FROM schema AS s0 \ - INTERSECT SELECT s0.y FROM schema AS s0 ORDER BY s0.y LIMIT 40 OFFSET 20 \ - INTERSECT SELECT s0.z FROM schema AS s0 ORDER BY s0.z LIMIT 60 OFFSET 30 \ - ORDER BY rand LIMIT 5 OFFSET 10\ - """ - - assert_raise( - Ecto.QueryError, - fn -> - base_query - |> intersect_all(^intersect_query1) - |> intersect_all(^intersect_query2) - |> plan() - |> all() - end - ) - end - - test "limit and offset" do - query = - Schema - |> limit([r], 3) - |> select([], true) - |> plan() - - assert all(query) == ~s{SELECT 1 FROM schema AS s0 LIMIT 3} - - query = - Schema - |> offset([r], 5) - |> select([], true) - |> plan() - - assert all(query) == ~s{SELECT 1 FROM schema AS s0 OFFSET 5} - - query = - Schema - |> offset([r], 5) - |> limit([r], 3) - |> select([], true) - |> plan() - - assert all(query) == ~s{SELECT 1 FROM schema AS s0 LIMIT 3 OFFSET 5} - end - - test "lock" do - assert_raise( - ArgumentError, - "locks are not supported by SQLite3", - fn -> - Schema - |> lock("LOCK IN SHARE MODE") - |> select([], true) - |> plan() - |> all() - end - ) - - assert_raise( - ArgumentError, - "locks are not supported by SQLite3", - fn -> - Schema - |> lock([p], fragment("UPDATE on ?", p)) - |> select([], true) - |> plan() - |> all() - end - ) - end - - test "string escape" do - query = - "schema" - |> where(foo: "'\\ ") - |> select([], true) - |> plan() - - assert all(query) == ~s{SELECT 1 FROM schema AS s0 WHERE (s0.foo = '''\\\\ ')} - - query = - "schema" - |> where(foo: "'") - |> select([], true) - |> plan() - - assert all(query) == ~s{SELECT 1 FROM schema AS s0 WHERE (s0.foo = '''')} - end - - test "binary ops" do - query = - Schema - |> select([r], r.x == 2) - |> plan() - - assert all(query) == ~s{SELECT s0.x = 2 FROM schema AS s0} - - query = - Schema - |> select([r], r.x != 2) - |> plan() - - assert all(query) == ~s{SELECT s0.x != 2 FROM schema AS s0} - - query = - Schema - |> select([r], r.x <= 2) - |> plan() - - assert all(query) == ~s{SELECT s0.x <= 2 FROM schema AS s0} - - query = - Schema - |> select([r], r.x >= 2) - |> plan() - - assert all(query) == ~s{SELECT s0.x >= 2 FROM schema AS s0} - - query = - Schema - |> select([r], r.x < 2) - |> plan() - - assert all(query) == ~s{SELECT s0.x < 2 FROM schema AS s0} - - query = - Schema - |> select([r], r.x > 2) - |> plan() - - assert all(query) == ~s{SELECT s0.x > 2 FROM schema AS s0} - - query = - Schema - |> select([r], r.x + 2) - |> plan() - - assert all(query) == ~s{SELECT s0.x + 2 FROM schema AS s0} - end - - test "is_nil" do - query = - Schema - |> select([r], is_nil(r.x)) - |> plan() - - assert all(query) == ~s{SELECT s0.x IS NULL FROM schema AS s0} - - query = - Schema - |> select([r], not is_nil(r.x)) - |> plan() - - assert all(query) == ~s{SELECT NOT (s0.x IS NULL) FROM schema AS s0} - - query = - "schema" - |> select([r], r.x == is_nil(r.y)) - |> plan() - - assert all(query) == ~s{SELECT s0.x = (s0.y IS NULL) FROM schema AS s0} - end - - test "order_by and types" do - query = - "schema3" - |> order_by([e], type(fragment("?", e.binary), ^:decimal)) - |> select(true) - |> plan() - - assert all(query) == "SELECT 1 FROM schema3 AS s0 ORDER BY (s0.binary + 0)" - end - - test "fragments" do - query = - Schema - |> select([r], fragment("now")) - |> plan() - - assert all(query) == ~s{SELECT now FROM schema AS s0} - - query = - Schema - |> select([r], fragment("fun(?)", r)) - |> plan() - - assert all(query) == ~s{SELECT fun(s0) FROM schema AS s0} - - query = - Schema - |> select([r], fragment("lcase(?)", r.x)) - |> plan() - - assert all(query) == ~s{SELECT lcase(s0.x) FROM schema AS s0} - - query = - Schema - |> select([r], r.x) - |> where([], fragment(~s|? = "query\\?"|, ^10)) - |> plan() - - assert all(query) == ~s|SELECT s0.x FROM schema AS s0 WHERE (? = "query?")| - - value = 13 - - query = - Schema - |> select([r], fragment("lcase(?, ?)", r.x, ^value)) - |> plan() - - assert all(query) == ~s{SELECT lcase(s0.x, ?) FROM schema AS s0} - - assert_raise( - Ecto.QueryError, - fn -> - Schema - |> select([], fragment(title: 2)) - |> plan() - |> all() - end - ) - end - - test "literals" do - query = - "schema" - |> where(foo: true) - |> select([], true) - |> plan() - - assert all(query) == ~s{SELECT 1 FROM schema AS s0 WHERE (s0.foo = 1)} - - query = - "schema" - |> where(foo: false) - |> select([], true) - |> plan() - - assert all(query) == ~s{SELECT 1 FROM schema AS s0 WHERE (s0.foo = 0)} - - query = - "schema" - |> where(foo: "abc") - |> select([], true) - |> plan() - - assert all(query) == ~s{SELECT 1 FROM schema AS s0 WHERE (s0.foo = 'abc')} - - query = - "schema" - |> where(foo: 123) - |> select([], true) - |> plan() - - assert all(query) == ~s{SELECT 1 FROM schema AS s0 WHERE (s0.foo = 123)} - - query = - "schema" - |> where(foo: 123.0) - |> select([], true) - |> plan() - - assert all(query) == ~s{SELECT 1 FROM schema AS s0 WHERE (s0.foo = (0 + 123.0))} - end - - # TODO: We need to determine what format to store the UUID. Is it Text or binary 16? - # Are we going for readability or for compactness? - test "tagged type" do - query = - Schema - |> select([], type(^"601d74e4-a8d3-4b6e-8365-eddb4c893327", Ecto.UUID)) - |> plan() - - assert all(query) == ~s{SELECT CAST(? AS TEXT) FROM schema AS s0} - end - - test "string type" do - query = - Schema - |> select([], type(^"test", :string)) - |> plan() - - assert all(query) == ~s{SELECT CAST(? AS TEXT) FROM schema AS s0} - end - - test "json_extract_path" do - query = Schema |> select([s], json_extract_path(s.meta, [0, 1])) |> plan() - assert all(query) == ~s{SELECT json_extract(s0.meta, '$[0][1]') FROM schema AS s0} - - query = Schema |> select([s], json_extract_path(s.meta, ["a", "b"])) |> plan() - assert all(query) == ~s{SELECT json_extract(s0.meta, '$.a.b') FROM schema AS s0} - - query = Schema |> select([s], json_extract_path(s.meta, ["'a"])) |> plan() - assert all(query) == ~s{SELECT json_extract(s0.meta, '$.''a') FROM schema AS s0} - - query = Schema |> select([s], json_extract_path(s.meta, ["\"a"])) |> plan() - assert all(query) == ~s{SELECT json_extract(s0.meta, '$.\\"a') FROM schema AS s0} - - query = Schema |> select([s], s.meta["author"]["name"]) |> plan() - assert all(query) == ~s{SELECT json_extract(s0.meta, '$.author.name') FROM schema AS s0} - end - - test "nested expressions" do - z = 123 - - query = - (r in Schema) - |> from([]) - |> select([r], (r.x > 0 and r.y > ^(-z)) or true) - |> plan() - - assert all(query) == ~s{SELECT ((s0.x > 0) AND (s0.y > ?)) OR 1 FROM schema AS s0} - end - - test "in expression" do - query = - Schema - |> select([e], 1 in [1, e.x, 3]) - |> plan() - - assert all(query) == ~s{SELECT 1 IN (1,s0.x,3) FROM schema AS s0} - - query = - Schema - |> select([e], 1 in ^[]) - |> plan() - - assert all(query) == ~s{SELECT 0 FROM schema AS s0} - - query = - Schema - |> select([e], 1 in ^[1, 2, 3]) - |> plan() - - assert all(query) == ~s{SELECT 1 IN (?,?,?) FROM schema AS s0} - - query = - Schema - |> select([e], 1 in [1, ^2, 3]) - |> plan() - - assert all(query) == ~s{SELECT 1 IN (1,?,3) FROM schema AS s0} - - query = - Schema - |> select([e], e.x == ^0 or e.x in ^[1, 2, 3] or e.x == ^4) - |> plan() - - assert all(query) == - ~s{SELECT ((s0.x = ?) OR s0.x IN (?,?,?)) OR (s0.x = ?) FROM schema AS s0} - - query = - Schema - |> select([e], e in [1, 2, 3]) - |> plan() - - assert all(query) == "SELECT s0 IN (SELECT value FROM JSON_EACH('[1,2,3]')) FROM schema AS s0" - end - - test "in subquery" do - posts = - "posts" - |> where(title: ^"hello") - |> select([p], p.id) - |> subquery() - - query = - "comments" - |> where([c], c.post_id in subquery(posts)) - |> select([c], c.x) - |> plan() - - assert all(query) == - """ - SELECT c0.x FROM comments AS c0 \ - WHERE (c0.post_id IN (SELECT sp0.id FROM posts AS sp0 WHERE (sp0.title = ?)))\ - """ - - posts = - "posts" - |> where(title: parent_as(:comment).subtitle) - |> select([p], p.id) - |> subquery() - - query = - "comments" - |> from(as: :comment) - |> where([c], c.post_id in subquery(posts)) - |> select([c], c.x) - |> plan() - - assert all(query) == - """ - SELECT c0.x FROM comments AS c0 \ - WHERE (c0.post_id IN (SELECT sp0.id FROM posts AS sp0 WHERE (sp0.title = c0.subtitle)))\ - """ - end - - test "having" do - query = - Schema - |> having([p], p.x == p.x) - |> select([p], p.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0 HAVING (s0.x = s0.x)} - - query = - Schema - |> having([p], p.x == p.x) - |> having([p], p.y == p.y) - |> select([p], [p.y, p.x]) - |> plan() - - assert all(query) == - """ - SELECT s0.y, s0.x \ - FROM schema AS s0 \ - HAVING (s0.x = s0.x) \ - AND (s0.y = s0.y)\ - """ - end - - test "or_having" do - query = - Schema - |> or_having([p], p.x == p.x) - |> select([p], p.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0 HAVING (s0.x = s0.x)} - - query = - Schema - |> or_having([p], p.x == p.x) - |> or_having([p], p.y == p.y) - |> select([p], [p.y, p.x]) - |> plan() - - assert all(query) == - """ - SELECT s0.y, s0.x \ - FROM schema AS s0 \ - HAVING (s0.x = s0.x) \ - OR (s0.y = s0.y)\ - """ - end - - test "group by" do - query = - Schema - |> group_by([r], r.x) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0 GROUP BY s0.x} - - query = - Schema - |> group_by([r], 2) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0 GROUP BY 2} - - query = - Schema - |> group_by([r], [r.x, r.y]) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0 GROUP BY s0.x, s0.y} - - query = - Schema - |> group_by([r], []) - |> select([r], r.x) - |> plan() - - assert all(query) == ~s{SELECT s0.x FROM schema AS s0} - end - - test "interpolated values" do - cte1 = - "schema1" - |> select([m], %{id: m.id, smth: ^true}) - |> where([], fragment("?", ^1)) - - union = - "schema1" - |> select([m], {m.id, ^true}) - |> where([], fragment("?", ^5)) - - union_all = - "schema2" - |> select([m], {m.id, ^false}) - |> where([], fragment("?", ^6)) - - query = - Schema - |> with_cte("cte1", as: ^cte1) - |> with_cte("cte2", as: fragment("SELECT * FROM schema WHERE ?", ^2)) - |> select([m], {m.id, ^0}) - |> join(:inner, [], Schema2, on: fragment("?", ^true)) - |> join(:inner, [], Schema2, on: fragment("?", ^false)) - |> where([], fragment("?", ^true)) - |> where([], fragment("?", ^false)) - |> having([], fragment("?", ^true)) - |> having([], fragment("?", ^false)) - |> group_by([], fragment("?", ^3)) - |> group_by([], fragment("?", ^4)) - |> union(^union) - |> union_all(^union_all) - |> order_by([], fragment("?", ^7)) - |> limit([], ^8) - |> offset([], ^9) - |> plan() - - assert all(query) == - """ - WITH cte1 AS (SELECT s0.id AS id, ? AS smth FROM schema1 AS s0 WHERE (?)), \ - cte2 AS (SELECT * FROM schema WHERE ?) \ - SELECT s0.id, ? FROM schema AS s0 INNER JOIN schema2 AS s1 ON ? \ - INNER JOIN schema2 AS s2 ON ? WHERE (?) AND (?) \ - GROUP BY ?, ? HAVING (?) AND (?) \ - UNION SELECT s0.id, ? FROM schema1 AS s0 WHERE (?) \ - UNION ALL SELECT s0.id, ? FROM schema2 AS s0 WHERE (?) \ - ORDER BY ? LIMIT ? OFFSET ?\ - """ - end - - test "fragments allow ? to be escaped with backslash" do - query = - (e in "schema") - |> from( - where: fragment(~s|? = "query\\?"|, e.start_time), - select: true - ) - |> plan() - - result = ~s|SELECT 1 FROM schema AS s0 WHERE (s0.start_time = "query?")| - - assert all(query) == result - end - - ## - ## *_all - ## - - test "update all" do - query = - (m in Schema) - |> from(update: [set: [x: 0]]) - |> plan(:update_all) - - assert update_all(query) == ~s{UPDATE schema AS s0 SET x = 0} - - query = - (m in Schema) - |> from(update: [set: [x: 0], inc: [y: 1, z: -3]]) - |> plan(:update_all) - - # TODO: should probably be "y = s0.y + 1" - # table-name.column-name is not allowed on the left hand side of SET - # but is allowed on right hand side, and we should err towards being more explicit - assert update_all(query) == - """ - UPDATE schema AS s0 \ - SET \ - x = 0, \ - y = y + 1, \ - z = z + -3\ - """ - - query = - (e in Schema) - |> from(where: e.x == 123, update: [set: [x: 0]]) - |> plan(:update_all) - - assert update_all(query) == - """ - UPDATE schema AS s0 \ - SET x = 0 \ - WHERE (s0.x = 123)\ - """ - - query = - (m in Schema) - |> from(update: [set: [x: ^0]]) - |> plan(:update_all) - - assert update_all(query) == ~s|UPDATE schema AS s0 SET x = ?| - - query = - Schema - |> join(:inner, [p], q in Schema2, on: p.x == q.z) - |> update([_], set: [x: 0]) - |> plan(:update_all) - - assert update_all(query) == - """ - UPDATE schema AS s0 \ - SET \ - x = 0 \ - FROM schema2 AS s1 \ - WHERE (s0.x = s1.z)\ - """ - - query = - (e in Schema) - |> from( - where: e.x == 123, - update: [set: [x: 0]], - join: q in Schema2, - on: e.x == q.z - ) - |> plan(:update_all) - - assert update_all(query) == - """ - UPDATE schema AS s0 \ - SET x = 0 \ - FROM schema2 AS s1 \ - WHERE (s0.x = s1.z) \ - AND (s0.x = 123)\ - """ - - query = from( - p in Post, - where: p.title == ^"foo", - select: p.content, - update: [set: [title: "bar"]] - ) |> plan(:update_all) - assert update_all(query) == - """ - UPDATE posts AS p0 \ - SET title = 'bar' \ - WHERE (p0.title = ?) \ - RETURNING p0.content\ - """ - end - - test "update all with prefix" do - query = - (m in Schema) - |> from(update: [set: [x: 0]]) - |> Map.put(:prefix, "prefix") - |> plan(:update_all) - - assert update_all(query) == ~s{UPDATE prefix.schema AS s0 SET x = 0} - - query = - (m in Schema) - |> from(prefix: "first", update: [set: [x: 0]]) - |> Map.put(:prefix, "prefix") - |> plan(:update_all) - - assert update_all(query) == ~s{UPDATE first.schema AS s0 SET x = 0} - end - - test "update all with returning" do - query = - from(p in Post, update: [set: [title: "foo"]]) - |> select([p], p) - |> plan(:update_all) - assert update_all(query) == - """ - UPDATE posts AS p0 \ - SET title = 'foo' \ - RETURNING p0.id, p0.title, p0.content\ - """ - - query = - from(m in Schema, update: [set: [x: ^1]]) - |> where([m], m.x == ^2) - |> select([m], m.x == ^3) - |> plan(:update_all) - assert update_all(query) == - """ - UPDATE schema AS s0 \ - SET x = ? \ - WHERE (s0.x = ?) \ - RETURNING s0.x = ?\ - """ - end - - test "delete all" do - query = - Schema - |> Ecto.Queryable.to_query() - |> plan() - - assert delete_all(query) == ~s{DELETE FROM schema AS s0} - - query = - (e in Schema) - |> from(where: e.x == 123) - |> plan() - - assert delete_all(query) == ~s{DELETE FROM schema AS s0 WHERE (s0.x = 123)} - - query = - (e in Schema) - |> from(where: e.x == 123, select: e.x) - |> plan() - assert delete_all(query) == - """ - DELETE FROM schema AS s0 \ - WHERE (s0.x = 123) RETURNING s0.x\ - """ - end - - test "delete all with returning" do - query = Post |> Ecto.Queryable.to_query |> select([m], m) |> plan() - assert delete_all(query) == - """ - DELETE FROM posts AS p0 \ - RETURNING p0.id, p0.title, p0.content\ - """ - end - - test "delete all with prefix" do - query = - Schema - |> Ecto.Queryable.to_query() - |> Map.put(:prefix, "prefix") - |> plan() - - assert delete_all(query) == ~s{DELETE FROM prefix.schema AS s0} - - query = - Schema - |> from(prefix: "first") - |> Map.put(:prefix, "prefix") - |> plan() - - assert delete_all(query) == ~s{DELETE FROM first.schema AS s0} - end - - ## - ## Partitions and windows - ## - - describe "windows" do - test "one window" do - query = - Schema - |> select([r], r.x) - |> windows([r], w: [partition_by: r.x]) - |> plan() - - assert all(query) == - """ - SELECT s0.x \ - FROM schema AS s0 WINDOW w AS (PARTITION BY s0.x)\ - """ - end - - test "two windows" do - query = - Schema - |> select([r], r.x) - |> windows([r], w1: [partition_by: r.x], w2: [partition_by: r.y]) - |> plan() - - assert all(query) == - """ - SELECT s0.x \ - FROM schema AS s0 WINDOW w1 AS (PARTITION BY s0.x), \ - w2 AS (PARTITION BY s0.y)\ - """ - end - - test "count over window" do - query = - Schema - |> windows([r], w: [partition_by: r.x]) - |> select([r], count(r.x) |> over(:w)) - |> plan() - - assert all(query) == - """ - SELECT count(s0.x) OVER w \ - FROM schema AS s0 WINDOW w AS (PARTITION BY s0.x)\ - """ - end - - test "count over all" do - query = - Schema - |> select([r], count(r.x) |> over) - |> plan() - - assert all(query) == ~s{SELECT count(s0.x) OVER () FROM schema AS s0} - end - - test "row_number over all" do - query = - Schema - |> select(row_number |> over) - |> plan() - - assert all(query) == ~s{SELECT row_number() OVER () FROM schema AS s0} - end - - test "nth_value over all" do - query = - Schema - |> select([r], nth_value(r.x, 42) |> over) - |> plan() - - assert all(query) == - """ - SELECT nth_value(s0.x, 42) OVER () \ - FROM schema AS s0\ - """ - end - - test "lag/2 over all" do - query = - Schema - |> select([r], lag(r.x, 42) |> over) - |> plan() - - assert all(query) == ~s{SELECT lag(s0.x, 42) OVER () FROM schema AS s0} - end - - test "custom aggregation over all" do - query = - Schema - |> select([r], fragment("custom_function(?)", r.x) |> over) - |> plan() - - assert all(query) == - """ - SELECT custom_function(s0.x) OVER () \ - FROM schema AS s0\ - """ - end - - test "partition by and order by on window" do - query = - Schema - |> windows([r], w: [partition_by: [r.x, r.z], order_by: r.x]) - |> select([r], r.x) - |> plan() - - assert all(query) == - """ - SELECT s0.x \ - FROM schema AS s0 WINDOW w AS (PARTITION BY s0.x, s0.z ORDER BY s0.x)\ - """ - end - - test "partition by and order by on over" do - query = - Schema - |> select([r], count(r.x) |> over(partition_by: [r.x, r.z], order_by: r.x)) - |> plan() - - assert all(query) == - """ - SELECT count(s0.x) OVER (PARTITION BY s0.x, s0.z ORDER BY s0.x) \ - FROM schema AS s0\ - """ - end - - test "frame clause" do - query = - Schema - |> select( - [r], - count(r.x) - |> over( - partition_by: [r.x, r.z], - order_by: r.x, - frame: fragment("ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING") - ) - ) - |> plan() - - assert all(query) == - """ - SELECT count(s0.x) OVER (\ - PARTITION BY s0.x, \ - s0.z \ - ORDER BY s0.x \ - ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING\ - ) \ - FROM schema AS s0\ - """ - end - end - - ## - ## Joins - ## - - test "join" do - query = - Schema - |> join(:inner, [p], q in Schema2, on: p.x == q.z) - |> select([], true) - |> plan() - - assert all(query) == - """ - SELECT 1 \ - FROM schema AS s0 \ - INNER JOIN schema2 AS s1 ON s0.x = s1.z\ - """ - - query = - Schema - |> join(:inner, [p], q in Schema2, on: p.x == q.z) - |> join(:inner, [], Schema, on: true) - |> select([], true) - |> plan() - - assert all(query) == - """ - SELECT 1 FROM schema AS s0 INNER JOIN schema2 AS s1 ON s0.x = s1.z \ - INNER JOIN schema AS s2 ON 1\ - """ - end - - test "join ignores hints" do - query = - Schema - |> join(:inner, [p], q in Schema2, hints: ["USE INDEX FOO", "USE INDEX BAR"]) - |> select([], true) - |> plan() - - assert all(query) == - """ - SELECT 1 \ - FROM schema AS s0 \ - INNER JOIN schema2 AS s1 ON 1\ - """ - end - - test "join with nothing bound" do - query = - Schema - |> join(:inner, [], q in Schema2, on: q.z == q.z) - |> select([], true) - |> plan() - - assert all(query) == - """ - SELECT 1 \ - FROM schema AS s0 \ - INNER JOIN schema2 AS s1 ON s1.z = s1.z\ - """ - end - - test "join without schema" do - query = - "posts" - |> join(:inner, [p], q in "comments", on: p.x == q.z) - |> select([], true) - |> plan() - - assert all(query) == - """ - SELECT 1 \ - FROM posts AS p0 \ - INNER JOIN comments AS c1 ON p0.x = c1.z\ - """ - end - - test "join with subquery" do - posts = - "posts" - |> where(title: ^"hello") - |> select([r], %{x: r.x, y: r.y}) - |> subquery() - - query = - "comments" - |> join(:inner, [c], p in subquery(posts), on: true) - |> select([_, p], p.x) - |> plan() - - assert all(query) == - """ - SELECT s1.x FROM comments AS c0 \ - INNER JOIN (\ - SELECT sp0.x AS x, sp0.y AS y \ - FROM posts AS sp0 \ - WHERE (sp0.title = ?)\ - ) AS s1 ON 1\ - """ - - posts = - "posts" - |> where(title: ^"hello") - |> select([r], %{x: r.x, z: r.y}) - |> subquery() - - query = - "comments" - |> join(:inner, [c], p in subquery(posts), on: true) - |> select([_, p], p) - |> plan() - - assert all(query) == - """ - SELECT s1.x, s1.z FROM comments AS c0 \ - INNER JOIN (\ - SELECT sp0.x AS x, sp0.y AS z \ - FROM posts AS sp0 \ - WHERE (sp0.title = ?)\ - ) AS s1 ON 1\ - """ - - posts = - "posts" - |> where(title: parent_as(:comment).subtitle) - |> select([r], r.title) - |> subquery() - - query = - "comments" - |> from(as: :comment) - |> join(:inner, [c], p in subquery(posts)) - |> select([_, p], p) - |> plan() - - assert all(query) == - """ - SELECT s1.title \ - FROM comments AS c0 \ - INNER JOIN (\ - SELECT sp0.title AS title \ - FROM posts AS sp0 \ - WHERE (sp0.title = c0.subtitle)\ - ) AS s1 ON 1\ - """ - end - - test "join with prefix" do - query = - Schema - |> join(:inner, [p], q in Schema2, on: p.x == q.z) - |> select([], true) - |> Map.put(:prefix, "prefix") - |> plan() - - assert all(query) == - """ - SELECT 1 \ - FROM prefix.schema AS s0 \ - INNER JOIN prefix.schema2 AS s1 ON s0.x = s1.z\ - """ - - query = - Schema - |> from(prefix: "first") - |> join(:inner, [p], q in Schema2, on: p.x == q.z, prefix: "second") - |> select([], true) - |> Map.put(:prefix, "prefix") - |> plan() - - assert all(query) == - """ - SELECT 1 \ - FROM first.schema AS s0 \ - INNER JOIN second.schema2 AS s1 ON s0.x = s1.z\ - """ - end - - test "join with fragment" do - query = - Schema - |> join( - :inner, - [p], - q in fragment( - "SELECT * FROM schema2 AS s2 WHERE s2.id = ? AND s2.field = ?", - p.x, - ^10 - ) - ) - |> select([p], {p.id, ^0}) - |> where([p], p.id > 0 and p.id < ^100) - |> plan() - - assert all(query) == - """ - SELECT s0.id, ? \ - FROM schema AS s0 \ - INNER JOIN \ - (\ - SELECT * \ - FROM schema2 AS s2 \ - WHERE s2.id = s0.x AND s2.field = ?\ - ) AS f1 ON 1 \ - WHERE ((s0.id > 0) AND (s0.id < ?))\ - """ - end - - test "join with fragment and on defined" do - query = - Schema - |> join(:inner, [p], q in fragment("SELECT * FROM schema2"), on: q.id == p.id) - |> select([p], {p.id, ^0}) - |> plan() - - assert all(query) == - """ - SELECT s0.id, ? \ - FROM schema AS s0 \ - INNER JOIN \ - (SELECT * FROM schema2) AS f1 ON f1.id = s0.id\ - """ - end - - test "join with query interpolation" do - inner = Ecto.Queryable.to_query(Schema2) - - query = - (p in Schema) - |> from(left_join: c in ^inner, select: {p.id, c.id}) - |> plan() - - assert all(query) == - """ - SELECT s0.id, s1.id \ - FROM schema AS s0 \ - LEFT OUTER JOIN schema2 AS s1 ON 1\ - """ - end - - test "cross join" do - query = - (p in Schema) - |> from(cross_join: c in Schema2, select: {p.id, c.id}) - |> plan() - - assert all(query) == - """ - SELECT s0.id, s1.id \ - FROM schema AS s0 \ - CROSS JOIN schema2 AS s1\ - """ - end - - test "join produces correct bindings" do - query = from(p in Schema, join: c in Schema2, on: true) - query = from(p in query, join: c in Schema2, on: true, select: {p.id, c.id}) - query = plan(query) - - assert all(query) == - """ - SELECT s0.id, s2.id \ - FROM schema AS s0 \ - INNER JOIN schema2 AS s1 ON 1 \ - INNER JOIN schema2 AS s2 ON 1\ - """ - end - - describe "query interpolation parameters" do - test "self join on subquery" do - subquery = select(Schema, [r], %{x: r.x, y: r.y}) - - query = - subquery - |> join(:inner, [c], p in subquery(subquery)) - |> plan() - |> all() - - assert query == - """ - SELECT s0.x, s0.y \ - FROM schema AS s0 \ - INNER JOIN (SELECT ss0.x AS x, ss0.y AS y FROM schema AS ss0) \ - AS s1 ON 1\ - """ - end - - test "self join on subquery with fragment" do - subquery = select(Schema, [r], %{string: fragment("downcase(?)", ^"string")}) - - query = - subquery - |> join(:inner, [c], p in subquery(subquery)) - |> plan() - |> all() - - assert query == - """ - SELECT downcase(?) \ - FROM schema AS s0 \ - INNER JOIN (SELECT downcase(?) AS string FROM schema AS ss0) \ - AS s1 ON 1\ - """ - end - - test "join on subquery with simple select" do - subquery = select(Schema, [r], %{x: ^999, w: ^888}) - - query = - Schema - |> select([r], %{y: ^666}) - |> join(:inner, [c], p in subquery(subquery)) - |> where([a, b], a.x == ^111) - |> plan() - |> all() - - assert query == - """ - SELECT ? \ - FROM schema AS s0 \ - INNER JOIN (SELECT ? AS x, ? AS w FROM schema AS ss0) AS s1 ON 1 \ - WHERE (s0.x = ?)\ - """ - end - end - - ## - ## Associations - ## - - test "association join belongs_to" do - query = - Schema2 - |> join(:inner, [c], p in assoc(c, :post)) - |> select([], true) - |> plan() - - assert all(query) == - """ - SELECT 1 \ - FROM schema2 AS s0 \ - INNER JOIN schema AS s1 ON s1.x = s0.z\ - """ - end - - test "association join has_many" do - query = - Schema - |> join(:inner, [p], c in assoc(p, :comments)) - |> select([], true) - |> plan() - - assert all(query) == - """ - SELECT 1 \ - FROM schema AS s0 \ - INNER JOIN schema2 AS s1 ON s1.z = s0.x\ - """ - end - - test "association join has_one" do - query = - Schema - |> join(:inner, [p], pp in assoc(p, :permalink)) - |> select([], true) - |> plan() - - assert all(query) == - """ - SELECT 1 \ - FROM schema AS s0 \ - INNER JOIN schema3 AS s1 ON s1.id = s0.y\ - """ - end - - ## - ## Schema based - ## - - test "insert" do - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:raise, [], []}, []) - assert query == ~s{INSERT INTO schema (x,y) VALUES (?,?)} - - assert_raise( - ArgumentError, - "Cell-wise default values are not supported on INSERT statements by SQLite3", - fn -> - insert( - nil, - "schema", - [:x, :y], - [[:x, :y], [nil, :z]], - {:raise, [], []}, - [] - ) - end - ) - - query = insert(nil, "schema", [], [[]], {:raise, [], []}, []) - assert query == ~s{INSERT INTO schema DEFAULT VALUES} - - query = insert("prefix", "schema", [], [[]], {:raise, [], []}, []) - assert query == ~s{INSERT INTO prefix.schema DEFAULT VALUES} - - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:raise, [], []}, [:id]) - assert query == ~s{INSERT INTO schema (x,y) VALUES (?,?) RETURNING id} - - assert_raise( - ArgumentError, - "Cell-wise default values are not supported on INSERT statements by SQLite3", - fn -> - insert(nil, "schema", [:x, :y], [[:x, :y], [nil, :z]], {:raise, [], []}, [:id]) - end - ) - end - - test "insert with on conflict" do - # These tests are adapted from the Postgres Adaptor - - # For :nothing - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:nothing, [], []}, []) - - assert query == - """ - INSERT INTO schema (x,y) \ - VALUES (?,?) \ - ON CONFLICT DO NOTHING\ - """ - - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:nothing, [], [:x, :y]}, []) - - assert query == - """ - INSERT INTO schema (x,y) \ - VALUES (?,?) \ - ON CONFLICT (x,y) DO NOTHING\ - """ - - # For :update - # update = - # from("schema", update: [set: [z: "foo"]]) - # |> plan(:update_all) - # |> all() - # query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) - # assert query == ~s{INSERT INTO schema (x,y) VALUES (?,?) ON CONFLICT (x,y) DO UPDATE SET z = 'foo'} - - # update = - # from("schema", update: [set: [z: ^"foo"]], where: [w: true]) - # |> plan(:update_all) - # |> all() - # query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) - # assert query = ~s{INSERT INTO schema (x,y) VALUES (?,?) ON CONFLICT (x,y) DO UPDATE SET z = ? WHERE (schema.w = 1)} - - # update = - # from("schema", update: [set: [z: "foo"]]) - # |> plan(:update_all) - # |> all() - # query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) - # assert query = ~s{INSERT INTO schema (x,y) VALUES (?,?) ON CONFLICT (x,y) DO UPDATE SET z = 'foo'} - - # update = - # from("schema", update: [set: [z: ^"foo"]], where: [w: true]) - # |> plan(:update_all) - # |> all() - # query = insert(nil, "schema", [:x, :y], [[:x, :y]], {update, [], [:x, :y]}, [:z]) - # assert query = ~s{INSERT INTO schema (x,y) VALUES (?,?) ON CONFLICT (x,y) DO UPDATE SET z = ? WHERE (schema.w = 1)} - - # For :replace_all - assert_raise( - ArgumentError, - "Upsert in SQLite3 requires :conflict_target", - fn -> - conflict_target = [] - - insert( - nil, - "schema", - [:x, :y], - [[:x, :y]], - {:replace_all, [], conflict_target}, - [] - ) - end - ) - - assert_raise( - ArgumentError, - "Upsert in SQLite3 does not support ON CONSTRAINT", - fn -> - insert( - nil, - "schema", - [:x, :y], - [[:x, :y]], - {:replace_all, [], {:constraint, :foo}}, - [] - ) - end - ) - - query = insert(nil, "schema", [:x, :y], [[:x, :y]], {:replace_all, [], [:id]}, []) - - assert query == - """ - INSERT INTO schema (x,y) \ - VALUES (?,?) \ - ON CONFLICT (id) \ - DO UPDATE SET x = EXCLUDED.x,y = EXCLUDED.y\ - """ - end - - test "insert with query" do - select_query = from("schema", select: [:id]) |> plan(:all) - - assert_raise( - ArgumentError, - "Cell-wise default values are not supported on INSERT statements by SQLite3", - fn -> - insert( - nil, - "schema", - [:x, :y, :z], - [[:x, {select_query, 2}, :z], [nil, nil, {select_query, 1}]], - {:raise, [], []}, - [] - ) - end - ) - end - - test "insert with query as rows" do - query = from(s in "schema", select: %{ foo: fragment("3"), bar: s.bar }) |> plan(:all) - query = insert(nil, "schema", [:foo, :bar], query, {:raise, [], []}, []) - - assert query == ~s{INSERT INTO schema (foo,bar) (SELECT 3, s0.bar FROM schema AS s0)} - end - - # test "update" do - # query = update(nil, "schema", [:x, :y], [:id], []) - # assert query == ~s{UPDATE schema SET x = ?, y = ? WHERE id = ?} - # - # query = update(nil, "schema", [:x, :y], [:id], []) - # assert query == ~s{UPDATE schema SET x = ?, y = ? WHERE id = ?} - # - # query = update("prefix", "schema", [:x, :y], [:id], []) - # assert query == ~s{UPDATE prefix.schema SET x = ?, y = ? WHERE id = ?} - # end - - test "delete" do - query = delete(nil, "schema", [x: 1, y: 2], []) - assert query == ~s{DELETE FROM schema WHERE x = ? AND y = ?} - - query = delete("prefix", "schema", [x: 1, y: 2], []) - assert query == ~s{DELETE FROM prefix.schema WHERE x = ? AND y = ?} - - query = delete(nil, "schema", [x: nil, y: 1], []) - assert query == ~s{DELETE FROM schema WHERE x IS NULL AND y = ?} - end - - ## - ## DDL - ## - - test "executing a string during migration" do - assert execute_ddl("example") == ["example"] - end - - test "create table" do - create = - {:create, table(:posts), - [ - {:add, :name, :string, [default: "Untitled", size: 20, null: false]}, - {:add, :token, :binary, [size: 20, null: false]}, - {:add, :price, :numeric, - [precision: 8, scale: 2, default: {:fragment, "expr"}]}, - {:add, :on_hand, :integer, [default: 0, null: true]}, - {:add, :likes, :integer, [default: 0, null: false]}, - {:add, :published_at, :datetime, [null: true]}, - {:add, :is_active, :boolean, [default: true]} - ]} - - assert execute_ddl(create) == [ - """ - CREATE TABLE posts (\ - name TEXT DEFAULT 'Untitled' NOT NULL, \ - token BLOB NOT NULL, \ - price NUMERIC DEFAULT expr, \ - on_hand INTEGER DEFAULT 0 NULL, \ - likes INTEGER DEFAULT 0 NOT NULL, \ - published_at DATETIME NULL, \ - is_active BOOLEAN DEFAULT true\ - )\ - """ - ] - end - - test "create empty table" do - create = {:create, table(:posts), []} - - assert execute_ddl(create) == ["CREATE TABLE posts ()"] - end - - test "create table with prefix" do - create = - {:create, table(:posts, prefix: :foo), - [{:add, :category_0, %Reference{table: :categories}, []}]} - - assert execute_ddl(create) == [ - """ - CREATE TABLE foo.posts (\ - category_0 INTEGER CONSTRAINT posts_category_0_fkey REFERENCES foo.categories(id)\ - )\ - """ - ] - end - - test "create table with references" do - create = - {:create, table(:posts), - [ - {:add, :id, :serial, [primary_key: true]}, - {:add, :category_0, %Reference{table: :categories}, []}, - {:add, :category_1, %Reference{table: :categories, name: :foo_bar}, []}, - {:add, :category_2, %Reference{table: :categories, on_delete: :nothing}, []}, - {:add, :category_3, %Reference{table: :categories, on_delete: :delete_all}, - [null: false]}, - {:add, :category_4, %Reference{table: :categories, on_delete: :nilify_all}, - []}, - {:add, :category_5, - %Reference{table: :categories, prefix: :foo, on_delete: :nilify_all}, []}, - {:add, :category_6, - %Reference{table: :categories, with: [here: :there], on_delete: :nilify_all}, - []}, - {:add, :category_7, - %Reference{table: :tags, with: [that: :this], on_delete: :nilify_all}, - []}, - ]} - - assert execute_ddl(create) == [ - """ - CREATE TABLE posts (\ - id INTEGER PRIMARY KEY AUTOINCREMENT, \ - category_0 INTEGER CONSTRAINT posts_category_0_fkey REFERENCES categories(id), \ - category_1 INTEGER CONSTRAINT foo_bar REFERENCES categories(id), \ - category_2 INTEGER CONSTRAINT posts_category_2_fkey REFERENCES categories(id), \ - category_3 INTEGER NOT NULL CONSTRAINT posts_category_3_fkey REFERENCES categories(id) ON DELETE CASCADE, \ - category_4 INTEGER CONSTRAINT posts_category_4_fkey REFERENCES categories(id) ON DELETE SET NULL, \ - category_5 INTEGER CONSTRAINT posts_category_5_fkey REFERENCES foo.categories(id) ON DELETE SET NULL, \ - category_6 INTEGER, \ - category_7 INTEGER, \ - FOREIGN KEY (category_6,here) REFERENCES categories(id,there) ON DELETE SET NULL, \ - FOREIGN KEY (category_7,that) REFERENCES tags(id,this) ON DELETE SET NULL\ - )\ - """ - ] - end - - test "create table with options" do - assert_raise( - ArgumentError, - "SQLite3 adapter does not support keyword lists in :options", - fn -> - {:create, table(:posts, options: "WITH FOO=BAR"), - [{:add, :id, :serial, [primary_key: true]}, {:add, :created_at, :datetime, []}]} - |> execute_ddl() - end - ) - end - - test "create table with composite key" do - create = - {:create, table(:posts), - [ - {:add, :a, :integer, [primary_key: true]}, - {:add, :b, :integer, [primary_key: true]}, - {:add, :name, :string, []} - ]} - - assert execute_ddl(create) == [ - """ - CREATE TABLE posts (\ - a INTEGER, \ - b INTEGER, \ - name TEXT, \ - PRIMARY KEY (a, b)\ - )\ - """ - ] - end - - test "create table with a map column, and a map default with values" do - create = - {:create, table(:posts), - [ - {:add, :a, :map, [default: %{foo: "bar", baz: "boom"}]} - ]} - - assert execute_ddl(create) == [ - """ - CREATE TABLE posts (a JSON DEFAULT ('{\"baz\":\"boom\",\"foo\":\"bar\"}'))\ - """ - ] - end - - test "create table with time columns" do - create = - {:create, table(:posts), - [{:add, :published_at, :time, [precision: 3]}, {:add, :submitted_at, :time, []}]} - - assert execute_ddl(create) == [ - """ - CREATE TABLE posts (\ - published_at TIME, \ - submitted_at TIME\ - )\ - """ - ] - end - - test "create table with utc_datetime columns" do - create = - {:create, table(:posts), - [ - {:add, :published_at, :utc_datetime, [precision: 3]}, - {:add, :submitted_at, :utc_datetime, []} - ]} - - assert execute_ddl(create) == [ - """ - CREATE TABLE posts (\ - published_at TEXT_DATETIME, \ - submitted_at TEXT_DATETIME\ - )\ - """ - ] - end - - test "create table with naive_datetime columns" do - create = - {:create, table(:posts), - [ - {:add, :published_at, :naive_datetime, [precision: 3]}, - {:add, :submitted_at, :naive_datetime, []} - ]} - - assert execute_ddl(create) == [ - "CREATE TABLE posts (published_at TEXT_DATETIME, submitted_at TEXT_DATETIME)" - ] - end - - test "create table with an unsupported type" do - assert_raise( - ArgumentError, - "argument error", - fn -> - {:create, table(:posts), - [ - {:add, :a, {:a, :b, :c}, [default: %{}]} - ]} - |> execute_ddl() - end - ) - end - - test "drop table" do - drop = {:drop, table(:posts)} - - assert execute_ddl(drop) == [~s|DROP TABLE posts|] - end - - test "drop table with prefixes" do - drop = {:drop, table(:posts, prefix: :foo)} - - assert execute_ddl(drop) == [~s|DROP TABLE foo.posts|] - end - - test "drop constraint" do - assert_raise( - ArgumentError, - ~r/ALTER TABLE with constraints not supported by SQLite3/, - fn -> - execute_ddl( - {:drop, constraint(:products, "price_must_be_positive", prefix: :foo)} - ) - end - ) - end - - test "drop_if_exists constraint" do - assert_raise( - ArgumentError, - ~r/SQLite3 adapter does not support constraints/, - fn -> - execute_ddl( - {:drop_if_exists, - constraint(:products, "price_must_be_positive", prefix: :foo)} - ) - end - ) - end - - test "alter table" do - alter = - {:alter, table(:posts), - [ - {:add, :title, :string, [default: "Untitled", size: 100, null: false]}, - {:add, :author_id, %Reference{table: :author}, []} - ]} - - assert execute_ddl(alter) == [ - """ - ALTER TABLE posts \ - ADD COLUMN title TEXT DEFAULT 'Untitled' NOT NULL\ - """, - """ - ALTER TABLE posts \ - ADD COLUMN author_id INTEGER CONSTRAINT posts_author_id_fkey REFERENCES author(id)\ - """ - ] - end - - test "alter table with datetime not null" do - alter = - {:alter, table(:posts), - [ - {:add, :title, :string, [default: "Untitled", size: 100, null: false]}, - {:add, :when, :utc_datetime, [null: false]} - ]} - - assert execute_ddl(alter) == [ - """ - ALTER TABLE posts \ - ADD COLUMN title TEXT DEFAULT 'Untitled' NOT NULL\ - """, - """ - ALTER TABLE posts \ - ADD COLUMN when TEXT_DATETIME\ - """ - ] - end - - test "alter table with prefix" do - alter = - {:alter, table(:posts, prefix: :foo), - [ - {:add, :title, :string, [default: "Untitled", size: 100, null: false]}, - {:add, :author_id, %Reference{table: :author}, []} - ]} - - assert execute_ddl(alter) == [ - """ - ALTER TABLE foo.posts \ - ADD COLUMN title TEXT DEFAULT 'Untitled' NOT NULL\ - """, - """ - ALTER TABLE foo.posts \ - ADD COLUMN author_id INTEGER \ - CONSTRAINT posts_author_id_fkey REFERENCES foo.author(id)\ - """ - ] - end - - test "alter column errors for :modify column" do - assert_raise( - ArgumentError, - "ALTER COLUMN not supported by SQLite3", - fn -> - {:alter, table(:posts), - [ - {:modify, :price, :numeric, [precision: 8, scale: 2]} - ]} - |> execute_ddl() - end - ) - end - - test "alter table removes column" do - alteration = { - :alter, - table(:posts), - [{:remove, :price, :numeric, [precision: 8, scale: 2]}] - } - - assert execute_ddl(alteration) == [ - """ - ALTER TABLE posts \ - DROP COLUMN price\ - """ - ] - end - - test "alter table with primary key" do - alter = {:alter, table(:posts), [{:add, :my_pk, :serial, [primary_key: true]}]} - - assert execute_ddl(alter) == [ - """ - ALTER TABLE posts \ - ADD COLUMN my_pk INTEGER PRIMARY KEY AUTOINCREMENT\ - """ - ] - end - - test "create index" do - create = {:create, index(:posts, [:category_id, :permalink])} - - assert execute_ddl(create) == - [ - """ - CREATE INDEX posts_category_id_permalink_index \ - ON posts (category_id, permalink)\ - """ - ] - - create = {:create, index(:posts, ["lower(permalink)"], name: "posts$main")} - - assert execute_ddl(create) == [ - """ - CREATE INDEX posts$main ON posts (lower(permalink))\ - """ - ] - end - - test "create index if not exists" do - create = {:create_if_not_exists, index(:posts, [:category_id, :permalink])} - query = execute_ddl(create) - - assert query == [ - """ - CREATE INDEX IF NOT EXISTS posts_category_id_permalink_index \ - ON posts (category_id, permalink)\ - """ - ] - end - - test "create index with prefix" do - create = {:create, index(:posts, [:category_id, :permalink], prefix: :foo)} - - assert execute_ddl(create) == [ - """ - CREATE INDEX posts_category_id_permalink_index \ - ON foo.posts (category_id, permalink)\ - """ - ] - - create = - {:create, index(:posts, ["lower(permalink)"], name: "posts$main", prefix: :foo)} - - assert execute_ddl(create) == [ - """ - CREATE INDEX posts$main ON foo.posts (lower(permalink))\ - """ - ] - end - - test "create index with comment" do - create = - {:create, - index(:posts, [:category_id, :permalink], prefix: :foo, comment: "comment")} - - assert execute_ddl(create) == [ - """ - CREATE INDEX posts_category_id_permalink_index \ - ON foo.posts (category_id, permalink)\ - """ - ] - - # NOTE: Comments are not supported by SQLite. DDL query generator will ignore them. - end - - test "create unique index" do - create = {:create, index(:posts, [:permalink], unique: true)} - - assert execute_ddl(create) == [ - """ - CREATE UNIQUE INDEX posts_permalink_index \ - ON posts (permalink)\ - """ - ] - end - - test "create unique index if not exists" do - create = {:create_if_not_exists, index(:posts, [:permalink], unique: true)} - query = execute_ddl(create) - - assert query == [ - """ - CREATE UNIQUE INDEX IF NOT EXISTS posts_permalink_index \ - ON posts (permalink)\ - """ - ] - end - - test "create unique index with condition" do - create = {:create, index(:posts, [:permalink], unique: true, where: "public IS 1")} - - assert execute_ddl(create) == [ - """ - CREATE UNIQUE INDEX posts_permalink_index \ - ON posts (permalink) WHERE public IS 1\ - """ - ] - - create = {:create, index(:posts, [:permalink], unique: true, where: :public)} - - assert execute_ddl(create) == [ - """ - CREATE UNIQUE INDEX posts_permalink_index \ - ON posts (permalink) WHERE public\ - """ - ] - end - - test "create index concurrently" do - # NOTE: SQLite doesn't support CONCURRENTLY, so this isn't included in generated SQL. - create = {:create, index(:posts, [:permalink], concurrently: true)} - - assert execute_ddl(create) == [ - ~s|CREATE INDEX posts_permalink_index ON posts (permalink)| - ] - end - - test "create unique index concurrently" do - # NOTE: SQLite doesn't support CONCURRENTLY, so this isn't included in generated SQL. - create = {:create, index(:posts, [:permalink], concurrently: true, unique: true)} - - assert execute_ddl(create) == [ - ~s|CREATE UNIQUE INDEX posts_permalink_index ON posts (permalink)| - ] - end - - test "create an index using a different type" do - # NOTE: SQLite doesn't support USING, so this isn't included in generated SQL. - create = {:create, index(:posts, [:permalink], using: :hash)} - - assert execute_ddl(create) == [ - ~s|CREATE INDEX posts_permalink_index ON posts (permalink)| - ] - end - - test "drop index" do - drop = {:drop, index(:posts, [:id], name: "posts$main")} - assert execute_ddl(drop) == [~s|DROP INDEX posts$main|] - end - - test "drop index with prefix" do - drop = {:drop, index(:posts, [:id], name: "posts$main", prefix: :foo)} - assert execute_ddl(drop) == [~s|DROP INDEX foo.posts$main|] - end - - test "drop index if exists" do - drop = {:drop_if_exists, index(:posts, [:id], name: "posts$main")} - assert execute_ddl(drop) == [~s|DROP INDEX IF EXISTS posts$main|] - end - - test "drop index concurrently" do - # NOTE: SQLite doesn't support CONCURRENTLY, so this isn't included in generated SQL. - drop = {:drop, index(:posts, [:id], name: "posts$main", concurrently: true)} - assert execute_ddl(drop) == [~s|DROP INDEX posts$main|] - end - - test "create check constraint" do - assert_raise( - ArgumentError, - "ALTER TABLE with constraints not supported by SQLite3", - fn -> - {:create, constraint(:products, "price_must_be_positive", check: "price > 0")} - |> execute_ddl() - end - ) - - assert_raise( - ArgumentError, - "ALTER TABLE with constraints not supported by SQLite3", - fn -> - {:create, - constraint(:products, "price_must_be_positive", - check: "price > 0", - prefix: "foo" - )} - |> execute_ddl() - end - ) - end - - test "create exclusion constraint" do - assert_raise( - ArgumentError, - "ALTER TABLE with constraints not supported by SQLite3", - fn -> - {:create, - constraint(:products, "price_must_be_positive", - exclude: ~s|gist (int4range("from", "to", '[]') WITH &&)| - )} - |> execute_ddl() - end - ) - end - - test "create constraint with comment" do - assert_raise( - ArgumentError, - "ALTER TABLE with constraints not supported by SQLite3", - fn -> - {:create, - constraint(:products, "price_must_be_positive", - check: "price > 0", - prefix: "foo", - comment: "comment" - )} - |> execute_ddl() - end - ) - end - - test "rename table" do - rename = {:rename, table(:posts), table(:new_posts)} - - assert execute_ddl(rename) == [ - ~s|ALTER TABLE posts RENAME TO new_posts| - ] - end - - test "rename table with prefix" do - rename = {:rename, table(:posts, prefix: :foo), table(:new_posts, prefix: :foo)} - - assert execute_ddl(rename) == [ - ~s|ALTER TABLE foo.posts RENAME TO new_posts| - ] - end - - test "rename column" do - rename = {:rename, table(:posts), :given_name, :first_name} - - assert execute_ddl(rename) == [ - ~s|ALTER TABLE posts RENAME COLUMN given_name TO first_name| - ] - end - - test "rename column in prefixed table" do - rename = {:rename, table(:posts, prefix: :foo), :given_name, :first_name} - - assert execute_ddl(rename) == [ - ~s|ALTER TABLE foo.posts RENAME COLUMN given_name TO first_name| - ] - end - - test "drop column" do - drop_column = {:alter, table(:posts), [{:remove, :summary}]} - - assert execute_ddl(drop_column) == [ - """ - ALTER TABLE posts \ - DROP COLUMN summary\ - """ - ] - end - - test "arrays" do - assert_raise( - Ecto.QueryError, - ~r"Array type is not supported by SQLite3", - fn -> - Schema - |> select([], fragment("?", [1, 2, 3])) - |> plan() - |> all() - end - ) - end - - test "preloading" do - query = - from(p in Post, preload: [:comments], select: p) - |> plan() - |> all() - - assert query == "SELECT p0.id, p0.title, p0.content FROM posts AS p0" - end -end diff --git a/test/ecto/adapters/exqlite/data_type_test.exs b/test/ecto/adapters/exqlite/data_type_test.exs deleted file mode 100644 index 3e57f51f..00000000 --- a/test/ecto/adapters/exqlite/data_type_test.exs +++ /dev/null @@ -1,83 +0,0 @@ -defmodule Ecto.Adapters.Exqlite.DataTypeTest do - use ExUnit.Case, async: true - - alias Ecto.Adapters.Exqlite.DataType - - describe ".column_type/2" do - test ":id is INTEGER" do - assert DataType.column_type(:id, nil) == "INTEGER" - end - - test ":serial is INTEGER" do - assert DataType.column_type(:serial, nil) == "INTEGER" - end - - test ":bigserial is INTEGER" do - assert DataType.column_type(:bigserial, nil) == "INTEGER" - end - - test ":binary_id is TEXT" do - assert DataType.column_type(:binary_id, nil) == "TEXT" - end - - test ":string is TEXT" do - assert DataType.column_type(:string, nil) == "TEXT" - end - - test ":uuid is TEXT" do - assert DataType.column_type(:uuid, nil) == "TEXT" - end - - test ":map is JSON" do - assert DataType.column_type(:map, nil) == "JSON" - end - - test "{:map, _} is JSON" do - assert DataType.column_type({:map, %{}}, nil) == "JSON" - end - - test ":array is JSON" do - assert DataType.column_type(:array, nil) == "JSON" - end - - test "{:array, _} is JSON" do - assert DataType.column_type({:array, []}, nil) == "JSON" - end - - test ":float is NUMERIC" do - assert DataType.column_type(:float, nil) == "NUMERIC" - end - - test ":decimal with no options is DECIMAL" do - assert DataType.column_type(:decimal, nil) == "DECIMAL" - end - - test ":decimal with empty options is DECIMAL" do - assert DataType.column_type(:decimal, []) == "DECIMAL" - end - - test ":decimal with precision and scale is DECIMAL" do - assert DataType.column_type(:decimal, precision: 5, scale: 2) == "DECIMAL(5,2)" - end - - test ":binary is BLOB" do - assert DataType.column_type(:binary, nil) == "BLOB" - end - - test ":utc_datetime is DATETIME" do - assert DataType.column_type(:utc_datetime, nil) == "TEXT_DATETIME" - end - - test ":utc_datetime_usec is DATETIME" do - assert DataType.column_type(:utc_datetime_usec, nil) == "TEXT_DATETIME" - end - - test ":naive_datetime is DATETIME" do - assert DataType.column_type(:naive_datetime, nil) == "TEXT_DATETIME" - end - - test ":naive_datetime_usec is DATETIME" do - assert DataType.column_type(:naive_datetime_usec, nil) == "TEXT_DATETIME" - end - end -end diff --git a/test/ecto/adapters/exqlite_test.exs b/test/ecto/adapters/exqlite_test.exs deleted file mode 100644 index a5370ee6..00000000 --- a/test/ecto/adapters/exqlite_test.exs +++ /dev/null @@ -1,54 +0,0 @@ -defmodule Ecto.Adapters.ExqliteTest do - use ExUnit.Case - - alias Ecto.Adapters.Exqlite - - describe ".storage_up/1" do - test "create database" do - opts = [database: Temp.path!()] - - assert Exqlite.storage_up(opts) == :ok - assert File.exists?(opts[:database]) - - File.rm(opts[:database]) - end - - test "does not fail on second call" do - opts = [database: Temp.path!()] - - assert Exqlite.storage_up(opts) == :ok - assert File.exists?(opts[:database]) - assert Exqlite.storage_up(opts) == {:error, :already_up} - - File.rm(opts[:database]) - end - - test "fails with helpful error message if no database specified" do - assert_raise( - ArgumentError, - """ - No SQLite database path specified. Please check the configuration for your Repo. - Your config/*.exs file should have something like this in it: - - config :my_app, MyApp.Repo, - adapter: Ecto.Adapters.Exqlite, - database: "/path/to/sqlite/database" - """, - fn -> Exqlite.storage_up(mumble: "no database here") == :ok end - ) - end - end - - describe ".storage_down/2" do - test "storage down (twice)" do - opts = [database: Temp.path!()] - - assert Exqlite.storage_up(opts) == :ok - assert Exqlite.storage_down(opts) == :ok - refute File.exists?(opts[:database]) - assert Exqlite.storage_down(opts) == {:error, :already_down} - - File.rm(opts[:database]) - end - end -end diff --git a/test/ecto/integration/crud_test.exs b/test/ecto/integration/crud_test.exs deleted file mode 100644 index 86c01bf1..00000000 --- a/test/ecto/integration/crud_test.exs +++ /dev/null @@ -1,195 +0,0 @@ -defmodule Ecto.Integration.CrudTest do - use Ecto.Integration.Case - - alias Ecto.Integration.TestRepo - alias Exqlite.Integration.Account - alias Exqlite.Integration.User - alias Exqlite.Integration.AccountUser - alias Exqlite.Integration.Product - - import Ecto.Query - - describe "insert" do - test "insert user" do - {:ok, user1} = TestRepo.insert(%User{name: "John"}, []) - assert user1 - - {:ok, user2} = TestRepo.insert(%User{name: "James"}, []) - assert user2 - - assert user1.id != user2.id - - user = - User - |> select([u], u) - |> where([u], u.id == ^user1.id) - |> TestRepo.one() - - assert user.name == "John" - end - - test "handles nulls when querying correctly" do - {:ok, account} = - %Account{name: "Something"} - |> TestRepo.insert() - - {:ok, product} = - %Product{ - name: "Thing", - account_id: account.id, - approved_at: nil - } - |> TestRepo.insert() - - found = TestRepo.get(Product, product.id) - assert found.id == product.id - assert found.approved_at == nil - assert found.description == nil - assert found.name == "Thing" - assert found.tags == [] - end - - test "insert_all" do - timestamp = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) - account = %{ - name: "John", - inserted_at: timestamp, - updated_at: timestamp, - } - {1, nil} = TestRepo.insert_all(Account, [account], []) - end - - end - - describe "delete" do - test "deletes user" do - {:ok, user} = TestRepo.insert(%User{name: "John"}, []) - - {:ok, _} = TestRepo.delete(user) - end - - @tag :busy_repo - test "delete_all deletes one product" do - TestRepo.insert!(%Product{name: "hello"}) - - # we have to do this because the tests are not isolated from one another. - # @kevinlang is working on rectifying that problem - assert {total, _} = TestRepo.delete_all(Product) - assert total >= 1 - end - - # this test keeps on hitting busy issues, not sure why - # one error i saw, tho not sure what test exactly, was - # passes fine in isolation. - @tag :busy_repo - test "delete_all deletes all products" do - TestRepo.insert!(%Product{name: "hello"}) - TestRepo.insert!(%Product{name: "hello again"}) - - # we have to do this because the tests are not isolated from one another. - # @kevinlang is working on rectifying that problem - assert {total, _} = TestRepo.delete_all(Product) - assert total >= 2 - end - end - - describe "update" do - test "updates user" do - {:ok, user} = TestRepo.insert(%User{name: "John"}, []) - changeset = User.changeset(user, %{name: "Bob"}) - - {:ok, changed} = TestRepo.update(changeset) - - assert changed.name == "Bob" - end - - test "update_all handles null<->nil conversion correctly" do - account = TestRepo.insert!(%Account{name: "hello"}) - assert {1, nil} = TestRepo.update_all(Account, set: [name: nil]) - assert %Account{name: nil} = TestRepo.reload(account) - end - end - - describe "transaction" do - test "successful user and account creation" do - {:ok, _} = - Ecto.Multi.new() - |> Ecto.Multi.insert(:account, fn _ -> - Account.changeset(%Account{}, %{name: "Foo"}) - end) - |> Ecto.Multi.insert(:user, fn _ -> - User.changeset(%User{}, %{name: "Bob"}) - end) - |> Ecto.Multi.insert(:account_user, fn %{account: account, user: user} -> - AccountUser.changeset(%AccountUser{}, %{ - account_id: account.id, - user_id: user.id - }) - end) - |> TestRepo.transaction() - end - - test "unsuccessful account creation" do - {:error, _, _, _} = - Ecto.Multi.new() - |> Ecto.Multi.insert(:account, fn _ -> - Account.changeset(%Account{}, %{name: nil}) - end) - |> Ecto.Multi.insert(:user, fn _ -> - User.changeset(%User{}, %{name: "Bob"}) - end) - |> Ecto.Multi.insert(:account_user, fn %{account: account, user: user} -> - AccountUser.changeset(%AccountUser{}, %{ - account_id: account.id, - user_id: user.id - }) - end) - |> TestRepo.transaction() - end - - test "unsuccessful user creation" do - {:error, _, _, _} = - Ecto.Multi.new() - |> Ecto.Multi.insert(:account, fn _ -> - Account.changeset(%Account{}, %{name: "Foo"}) - end) - |> Ecto.Multi.insert(:user, fn _ -> - User.changeset(%User{}, %{name: nil}) - end) - |> Ecto.Multi.insert(:account_user, fn %{account: account, user: user} -> - AccountUser.changeset(%AccountUser{}, %{ - account_id: account.id, - user_id: user.id - }) - end) - |> TestRepo.transaction() - end - end - - describe "preloading" do - test "preloads many to many relation" do - account1 = TestRepo.insert!(%Account{name: "Main"}) - account2 = TestRepo.insert!(%Account{name: "Secondary"}) - user1 = TestRepo.insert!(%User{name: "John"}, []) - user2 = TestRepo.insert!(%User{name: "Shelly"}, []) - TestRepo.insert!(%AccountUser{user_id: user1.id, account_id: account1.id}) - TestRepo.insert!(%AccountUser{user_id: user1.id, account_id: account2.id}) - TestRepo.insert!(%AccountUser{user_id: user2.id, account_id: account2.id}) - - accounts = from(a in Account, preload: [:users]) |> TestRepo.all() - - assert Enum.count(accounts) == 2 - Enum.each(accounts, fn account -> - assert Ecto.assoc_loaded?(account.users) - end) - end - end - - describe "select" do - test "can handle in" do - TestRepo.insert!(%Account{name: "hi"}) - assert [] = TestRepo.all from a in Account, where: a.name in ["404"] - assert [_] = TestRepo.all from a in Account, where: a.name in ["hi"] - end - end -end diff --git a/test/ecto/integration/math_test.exs b/test/ecto/integration/math_test.exs deleted file mode 100644 index 7049f98e..00000000 --- a/test/ecto/integration/math_test.exs +++ /dev/null @@ -1,138 +0,0 @@ -defmodule Ecto.Integration.MathTest do - use Ecto.Integration.Case - - alias Ecto.Integration.TestRepo - alias Exqlite.Integration.Account - alias Exqlite.Integration.Product - alias Exqlite.Integration.Vec3f - - import Ecto.Query - - defp random_string(len) do - :crypto.strong_rand_bytes(len) - |> Base.url_encode64() - |> binary_part(0, len) - end - - defp create_account(name) do - TestRepo.insert!(%Account{name: name}) - end - - defp create_product(account, price) do - TestRepo.insert!(%Product{ - name: random_string(8), - price: price, - account_id: account.id - }) - end - - describe "max" do - test "decimal" do - account = create_account("Company") - create_product(account, Decimal.new("1.23")) - create_product(account, Decimal.new("2.00")) - create_product(account, Decimal.new("2.67")) - - query = from(p in Product, select: max(p.price)) - - [highest_price] = TestRepo.all(query) - assert Decimal.equal?(highest_price, Decimal.new("2.67")) - end - end - - describe "min" do - test "decimal" do - account = create_account("Company") - create_product(account, Decimal.new("1.23")) - create_product(account, Decimal.new("2.00")) - create_product(account, Decimal.new("2.67")) - - query = from(p in Product, select: min(p.price)) - - [lowest_price] = TestRepo.all(query) - assert Decimal.equal?(lowest_price, Decimal.new("1.23")) - end - end - - describe "sum" do - test "decimal" do - account = create_account("Company") - create_product(account, Decimal.new("1.23")) - create_product(account, Decimal.new("2.00")) - create_product(account, Decimal.new("2.67")) - - query = from(p in Product, select: sum(p.price)) - - [total] = TestRepo.all(query) - assert Decimal.equal?(total, Decimal.new("5.90")) - end - end - - describe "avg" do - test "decimal" do - account = create_account("Company") - create_product(account, Decimal.new("1.23")) - create_product(account, Decimal.new("2.00")) - create_product(account, Decimal.new("2.67")) - - query = from(p in Product, select: avg(p.price)) - - [average] = TestRepo.all(query) - assert Decimal.equal?(average, Decimal.new("1.9666666666666668")) - end - end - - describe "acos" do - test "decimal above 1.0" do - account = create_account("Company") - create_product(account, Decimal.new("1.23")) - - query = from(p in Product, select: fragment("acos(?)", p.price)) - - [nil] = TestRepo.all(query) - end - - test "decimal below -1.0" do - account = create_account("Company") - create_product(account, Decimal.new("-1.23")) - - query = from(p in Product, select: fragment("acos(?)", p.price)) - - [nil] = TestRepo.all(query) - end - - test "decimal at 0.3" do - account = create_account("Company") - create_product(account, Decimal.new("0.30")) - - query = from(p in Product, select: fragment("acos(?)", p.price)) - - # Right now, sqlite will return the acos function as an IEEE float - [1.2661036727794992] = TestRepo.all(query) - end - - test "float above 1.0" do - TestRepo.insert!(%Vec3f{x: 1.1, y: 1.2, z: 1.3}) - - query = from(v in Vec3f, select: fragment("acos(?)", v.x)) - - [nil] = TestRepo.all(query) - end - - test "float below -1.0" do - TestRepo.insert!(%Vec3f{x: -1.1, y: 1.2, z: 1.3}) - - query = from(v in Vec3f, select: fragment("acos(?)", v.x)) - - [nil] = TestRepo.all(query) - end - - test "float at 0.3" do - TestRepo.insert!(%Vec3f{x: 0.3, y: 1.2, z: 1.3}) - - query = from(v in Vec3f, select: fragment("acos(?)", v.x)) - - [1.2661036727794992] = TestRepo.all(query) - end - end -end diff --git a/test/ecto/integration/streaming_test.exs b/test/ecto/integration/streaming_test.exs deleted file mode 100644 index 5abdc64f..00000000 --- a/test/ecto/integration/streaming_test.exs +++ /dev/null @@ -1,30 +0,0 @@ -defmodule Ecto.Integration.StreamingTest do - use Ecto.Integration.Case - - alias Ecto.Integration.TestRepo - alias Exqlite.Integration.User - - import Ecto.Query - - test "handles streams properly" do - # TODO: We really need to get proper sandboxing in place - before_count = User |> select([u], u) |> TestRepo.all() |> Enum.count() - - {:ok, _} = TestRepo.insert(User.changeset(%User{}, %{name: "Bill"})) - {:ok, _} = TestRepo.insert(User.changeset(%User{}, %{name: "Shannon"})) - {:ok, _} = TestRepo.insert(User.changeset(%User{}, %{name: "Tom"})) - {:ok, _} = TestRepo.insert(User.changeset(%User{}, %{name: "Tiffany"})) - {:ok, _} = TestRepo.insert(User.changeset(%User{}, %{name: "Dave"})) - - {:ok, count} = - TestRepo.transaction(fn -> - User - |> select([u], u) - |> TestRepo.stream() - |> Enum.map(fn user -> user end) - |> Enum.count() - end) - - assert 5 == count - before_count - end -end diff --git a/test/ecto/integration/timestamps_test.exs b/test/ecto/integration/timestamps_test.exs deleted file mode 100644 index 58549943..00000000 --- a/test/ecto/integration/timestamps_test.exs +++ /dev/null @@ -1,76 +0,0 @@ -defmodule Ecto.Integration.TimestampsTest do - use Ecto.Integration.Case - - alias Ecto.Integration.TestRepo - - import Ecto.Query - - defmodule UserNaiveDatetime do - use Ecto.Schema - import Ecto.Changeset - - schema "users" do - field(:name, :string) - timestamps() - end - - def changeset(struct, attrs) do - struct - |> cast(attrs, [:name]) - |> validate_required([:name]) - end - end - - defmodule UserUtcDatetime do - use Ecto.Schema - import Ecto.Changeset - - schema "users" do - field(:name, :string) - timestamps(type: :utc_datetime) - end - - def changeset(struct, attrs) do - struct - |> cast(attrs, [:name]) - |> validate_required([:name]) - end - end - - test "insert and fetch naive datetime" do - {:ok, user} = - %UserNaiveDatetime{} - |> UserNaiveDatetime.changeset(%{name: "Bob"}) - |> TestRepo.insert() - - user = - UserNaiveDatetime - |> select([u], u) - |> where([u], u.id == ^user.id) - |> TestRepo.one() - - assert user - end - - test "max of naive datetime" do - datetime = ~N[2014-01-16 20:26:51] - TestRepo.insert!(%UserNaiveDatetime{inserted_at: datetime}) - query = from p in UserNaiveDatetime, select: max(p.inserted_at) - assert [^datetime] = TestRepo.all(query) - end - - test "insert and fetch utc datetime" do - {:ok, user} = - %UserUtcDatetime{} - |> UserUtcDatetime.changeset(%{name: "Bob"}) - |> TestRepo.insert() - - user = - UserUtcDatetime - |> select([u], u) - |> where([u], u.id == ^user.id) - |> TestRepo.one() - - assert user - end -end diff --git a/test/ecto/integration/uuid_test.exs b/test/ecto/integration/uuid_test.exs deleted file mode 100644 index 0fbae729..00000000 --- a/test/ecto/integration/uuid_test.exs +++ /dev/null @@ -1,18 +0,0 @@ -defmodule Ecto.Integration.UUIDTest do - use Ecto.Integration.Case - - alias Ecto.Integration.TestRepo - alias Exqlite.Integration.Product - - test "handles uuid serialization and deserialization" do - external_id = Ecto.UUID.generate() - product = TestRepo.insert!(%Product{name: "Pupper Beer", external_id: external_id}) - - assert product.id - assert product.external_id == external_id - - found = TestRepo.get(Product, product.id) - assert found - assert found.external_id == external_id - end -end diff --git a/test/support/migration.ex b/test/support/migration.ex deleted file mode 100644 index 25d61bbe..00000000 --- a/test/support/migration.ex +++ /dev/null @@ -1,40 +0,0 @@ -defmodule Exqlite.Integration.Migration do - use Ecto.Migration - - def change do - create table(:accounts) do - add(:name, :string) - timestamps() - end - - create table(:users) do - add(:name, :string) - add(:custom_id, :uuid) - timestamps() - end - - create table(:account_users) do - add(:account_id, references(:accounts)) - add(:user_id, references(:users)) - add(:role, :string) - timestamps() - end - - create table(:products) do - add(:account_id, references(:accounts)) - add(:name, :string) - add(:description, :text) - add(:external_id, :uuid) - add(:tags, {:array, :string}) - add(:approved_at, :naive_datetime) - add(:price, :decimal) - timestamps() - end - - create table(:vec3f) do - add(:x, :float) - add(:y, :float) - add(:z, :float) - end - end -end diff --git a/test/support/repo.ex b/test/support/repo.ex deleted file mode 100644 index 10ae1278..00000000 --- a/test/support/repo.ex +++ /dev/null @@ -1,15 +0,0 @@ -defmodule Ecto.Integration.TestRepo do - use Ecto.Repo, otp_app: :exqlite, adapter: Ecto.Adapters.Exqlite - - def create_prefix(_) do - raise "SQLite3 does not support CREATE DATABASE" - end - - def drop_prefix(_) do - raise "SQLite3 does not support DROP DATABASE" - end - - def uuid do - Ecto.UUID - end -end diff --git a/test/support/schemas.ex b/test/support/schemas.ex deleted file mode 100644 index 08d5caf6..00000000 --- a/test/support/schemas.ex +++ /dev/null @@ -1,113 +0,0 @@ -defmodule Exqlite.Integration.Account do - use Ecto.Schema - - import Ecto.Changeset - - alias Exqlite.Integration.User - alias Exqlite.Integration.Product - - schema "accounts" do - field(:name, :string) - - timestamps() - - many_to_many(:users, User, join_through: "account_users") - has_many(:products, Product) - end - - def changeset(struct, attrs) do - struct - |> cast(attrs, [:name]) - |> validate_required([:name]) - end -end - -defmodule Exqlite.Integration.User do - use Ecto.Schema - - import Ecto.Changeset - - alias Exqlite.Integration.Account - - schema "users" do - field(:name, :string) - - timestamps() - - many_to_many(:accounts, Account, join_through: "account_users") - end - - def changeset(struct, attrs) do - struct - |> cast(attrs, [:name]) - |> validate_required([:name]) - end -end - -defmodule Exqlite.Integration.AccountUser do - use Ecto.Schema - - import Ecto.Changeset - - alias Exqlite.Integration.Account - alias Exqlite.Integration.User - - schema "account_users" do - timestamps() - - belongs_to(:account, Account) - belongs_to(:user, User) - end - - def changeset(struct, attrs) do - struct - |> cast(attrs, [:account_id, :user_id]) - |> validate_required([:account_id, :user_id]) - end -end - -defmodule Exqlite.Integration.Product do - use Ecto.Schema - - import Ecto.Changeset - - alias Exqlite.Integration.Account - - schema "products" do - field(:name, :string) - field(:description, :string) - field(:external_id, Ecto.UUID) - field(:tags, {:array, :string}, default: []) - field(:approved_at, :naive_datetime) - field(:price, :decimal) - - belongs_to(:account, Account) - - timestamps() - end - - def changeset(struct, attrs) do - struct - |> cast(attrs, [:name, :description, :tags]) - |> validate_required([:name]) - |> maybe_generate_external_id() - end - - defp maybe_generate_external_id(changeset) do - if get_field(changeset, :external_id) do - changeset - else - put_change(changeset, :external_id, Ecto.UUID.bingenerate()) - end - end -end - -defmodule Exqlite.Integration.Vec3f do - use Ecto.Schema - - schema "vec3f" do - field(:x, :float) - field(:y, :float) - field(:z, :float) - end -end diff --git a/test/test_helper.exs b/test/test_helper.exs index d87c48fd..869559e7 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -1,43 +1 @@ -Logger.configure(level: :info) - -Application.put_env(:ecto, :primary_key_type, :id) -Application.put_env(:ecto, :async_integration_tests, false) - -ecto = Mix.Project.deps_paths()[:ecto] -Code.require_file("#{ecto}/integration_test/support/schemas.exs", __DIR__) - -alias Ecto.Integration.TestRepo - -Application.put_env(:exqlite, TestRepo, - adapter: Ecto.Adapters.Exqlite, - database: "/tmp/exqlite_sandbox_test.db", - journal_mode: :wal, - cache_size: -64000, - temp_store: :memory, - pool: Ecto.Adapters.SQL.Sandbox, - pool_size: 5, - show_sensitive_data_on_connection_error: true -) - -defmodule Ecto.Integration.Case do - use ExUnit.CaseTemplate - - setup do - :ok = Ecto.Adapters.SQL.Sandbox.checkout(TestRepo) - #on_exit(fn -> Ecto.Adapters.SQL.Sandbox.checkin(TestRepo) end) - end -end - -{:ok, _} = Ecto.Adapters.Exqlite.ensure_all_started(TestRepo.config(), :temporary) - -# Load up the repository, start it, and run migrations -_ = Ecto.Adapters.Exqlite.storage_down(TestRepo.config()) -:ok = Ecto.Adapters.Exqlite.storage_up(TestRepo.config()) - -{:ok, _} = TestRepo.start_link() - -:ok = Ecto.Migrator.up(TestRepo, 0, Exqlite.Integration.Migration, log: false) -Ecto.Adapters.SQL.Sandbox.mode(TestRepo, :manual) -Process.flag(:trap_exit, true) - ExUnit.start() From fce47438f80bab848f2fe1bb28a23c0bd11cca2d Mon Sep 17 00:00:00 2001 From: Matthew Johnston Date: Wed, 17 Mar 2021 21:18:23 -0500 Subject: [PATCH 3/3] Update to Simultaneous --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index cd520377..1fd82c61 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ If you are looking for the Ecto adapater, take a look at the * Prepared statements are not immutable. You must be careful when manipulating statements and binding values to statements. Do not try to manipulate the statements concurrently. Keep it isolated to one process. -* Asynchronous writing is not supported by SQLite3 and will not be supported +* Simultaneous writing is not supported by SQLite3 and will not be supported here. * All native calls are run through the Dirty NIF scheduler. * Datetimes are stored without offsets. This is due to how SQLite3 handles date