From 34845206664a32eb2d5d6ac7383ba8cdc654a747 Mon Sep 17 00:00:00 2001 From: Benjamin Schultzer Date: Wed, 2 Nov 2022 18:04:10 -0400 Subject: [PATCH] Initial release --- .formatter.exs | 4 + .github/workflows/ci.yml | 50 + .gitignore | 29 + CHANGELOG.md | 7 + LICENSE.md | 176 ++ README.md | 66 + bench/bench.exs | 57 + config/bench.exs | 14 + config/config.exs | 27 + config/dev.exs | 3 + config/test.exs | 13 + ..._simple_cache_with_ecto_in_66_lines.livemd | 112 ++ ...ple_job_queue_with_ecto_in_46_lines.livemd | 71 + examples/my_event_module.ex | 79 + lib/ecto/adapters/dets.ex | 21 + lib/ecto/adapters/ets.ex | 15 + lib/ecto/adapters/mnesia.ex | 71 + lib/ecto/adapters/qlc.ex | 1413 +++++++++++++++++ lib/ecto/adapters/qlc/application.ex | 8 + mix.exs | 91 ++ mix.lock | 21 + .../00000000000001_create_users.exs | 14 + .../00000000000002_create_users_sessions.exs | 12 + ...0000000003_add_token_to_users_sessions.exs | 9 + test/ecto/dets_test.exs | 507 ++++++ test/ecto/ets_test.exs | 507 ++++++ test/ecto/mnesia_test.exs | 510 ++++++ test/ecto/postgres_test.exs | 490 ++++++ test/support/accounts/user.ex | 18 + test/support/accounts/user_session.ex | 23 + test/support/data_case.ex | 48 + test/test_helper.exs | 2 + 32 files changed, 4488 insertions(+) create mode 100644 .formatter.exs create mode 100644 .github/workflows/ci.yml create mode 100644 .gitignore create mode 100644 CHANGELOG.md create mode 100644 LICENSE.md create mode 100644 README.md create mode 100644 bench/bench.exs create mode 100644 config/bench.exs create mode 100644 config/config.exs create mode 100644 config/dev.exs create mode 100644 config/test.exs create mode 100644 examples/building_a_simple_cache_with_ecto_in_66_lines.livemd create mode 100644 examples/building_a_simple_job_queue_with_ecto_in_46_lines.livemd create mode 100644 examples/my_event_module.ex create mode 100644 lib/ecto/adapters/dets.ex create mode 100644 lib/ecto/adapters/ets.ex create mode 100644 lib/ecto/adapters/mnesia.ex create mode 100644 lib/ecto/adapters/qlc.ex create mode 100644 lib/ecto/adapters/qlc/application.ex create mode 100644 mix.exs create mode 100644 mix.lock create mode 100644 priv/repo/migrations/00000000000001_create_users.exs create mode 100644 priv/repo/migrations/00000000000002_create_users_sessions.exs create mode 100644 priv/repo/migrations/00000000000003_add_token_to_users_sessions.exs create mode 100644 test/ecto/dets_test.exs create mode 100644 test/ecto/ets_test.exs create mode 100644 test/ecto/mnesia_test.exs create mode 100644 test/ecto/postgres_test.exs create mode 100644 test/support/accounts/user.ex create mode 100644 test/support/accounts/user_session.ex create mode 100644 test/support/data_case.ex create mode 100644 test/test_helper.exs diff --git a/.formatter.exs b/.formatter.exs new file mode 100644 index 0000000..9776f50 --- /dev/null +++ b/.formatter.exs @@ -0,0 +1,4 @@ +[ + import_deps: [:ecto, :ecto_sql], + inputs: [] +] diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..5c330bc --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,50 @@ +name: Elixir CI + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + build: + + name: Build and test + runs-on: ubuntu-latest + services: + postgres: + image: postgres:14 + env: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: ecto_qlc_test + ports: + - 5432:5432 + strategy: + matrix: + elixir: [1.14.0] + otp: [25.0.4] + steps: + - uses: actions/checkout@v2 + - name: Set up Elixir + uses: erlef/setup-beam@v1 + with: + elixir-version: ${{ matrix.elixir }} # Define the elixir version [required] + otp-version: ${{ matrix.otp }} # Define the OTP version [required] + - name: Restore dependencies cache + uses: actions/cache@v2 + id: cache-mix + with: + path: | + deps + _build + key: ${{ runner.os }}-${{ matrix.otp }}-${{ matrix.elixir }}-mix-${{ hashFiles('**/mix.lock') }} + - name: Install and compile deps + if: steps.cache-mix.outputs.cache-hit != 'true' + run: | + mix local.rebar --force + mix local.hex --force + mix deps.get + mix deps.compile + - name: Run test + run: mix test || mix test --failed diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ecea7a6 --- /dev/null +++ b/.gitignore @@ -0,0 +1,29 @@ +# The directory Mix will write compiled artifacts to. +/_build/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Where third-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Ignore package tarball (built via "mix hex.build"). +ecto_qlc-*.tar + +# Temporary files, for example, from tests. +/tmp/ + +# Benchmark results. +/bench/results diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..bd3af21 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,7 @@ +# Changelog + +## Ecto QLC v0.1.0 December 16th, 2022 + +### Enhancements + +* Initial release \ No newline at end of file diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..2bb9ad2 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..a0b953f --- /dev/null +++ b/README.md @@ -0,0 +1,66 @@ +# Ecto QLC + +Have you ever wondered if you could just write one Ecto query and use it both for an SQL database and Erlang's ETS, DETS or Mnesia? + +Whether you are doing early prototyping or looking for an easy way to do pass-through cache, let Ecto do the heavy lifting for you, with one schema and multiple backends! + +## Installation + +If [available in Hex](https://hex.pm/docs/publish), the package can be installed +by adding `ecto_qlc` to your list of dependencies in `mix.exs`: + +```elixir +def deps do + [ + {:ecto_qlc, "~> 0.1.0"} + ] +end +``` + +Documentation can be generated with [ExDoc](https://github.com/elixir-lang/ex_doc) +and published on [HexDocs](https://hexdocs.pm). Once published, the docs can +be found at . + +## Limitations + +The following are the current limitations. Most of these can be implemented others might not even make sense to implement, like placeholders. + +* Subqueries are only supported without parent_as since that would require implemetation of a query planner. +* Combinations are not supported, but could be supported in a similiar way as suqueries. +* Windows are not supported. +* Joins are limted to merge_join or nested_join, the qual: :left, :right, :inner, and etc will not affect the result. +* CTE are not supported. +* Query locks are only supported in the mnesia adapter, and only read, write or sticky_write. global locks are not supported. +* Placeholders are not supported. +* On_conflict is not supported. +* Fragments are not supported in select and has limited support in where clauses where the string can only contain valid Erlang code. +* Queries are not cached +* No automatic clustering + +## Examples + +Can be found under the examples directory. + +- [Building a simple cache with Ecto in 66 lines](/examples/building_a_simple_cache_with_ecto_in_66_lines.livemd) or [![Run in Livebook](https://livebook.dev/badge/v1/black.svg)](https://livebook.dev/run?url=https%3A%2F%2Fraw.githubusercontent.com%2FSchultzer%2Fecto_qlc%2Fmain%2Fexamples%2Fbuilding_a_simple_cache_with_ecto_in_66_lines.livemd%3Ftoken%3DGHSAT0AAAAAABYTWG4GREE7SJQWSL5INJZSY44YZWQ) +- [Building a simple job queue with Ecto in 46 lines](/examples/building_a_simple_job_queue_with_ecto_in_46_lines.livemd) or [![Run in Livebook](https://livebook.dev/badge/v1/black.svg)](https://livebook.dev/run?url=https%3A%2F%2Fraw.githubusercontent.com%2FSchultzer%2Fecto_qlc%2Fmain%2Fexamples%2Fbuilding_a_simple_job_queue_with_ecto_in_46_lines.livemd%3Ftoken%3DGHSAT0AAAAAABYTWG4GZLPAJDZ4WIIF6LAWY44Y2OQ) + +## Prior Arts + +Most of the inspiration for this adapter comes from [Ecto SQL](https://github.com/elixir-ecto/ecto_sql) and [Etso](https://github.com/evadne/etso). + +Below is a non-exhustive list of similiar projects: + +- https://github.com/elixir-ecto/ecto_sql +- https://github.com/meh/amnesia +- https://github.com/sheharyarn/memento +- https://github.com/Nebo15/ecto_mnesia +- https://github.com/evadne/etso +- https://github.com/wojtekmach/ets_ecto +- https://gitlab.com/patatoid/ecto3_mnesia +- https://github.com/Logflare/ecto3_mnesia + +## License + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. \ No newline at end of file diff --git a/bench/bench.exs b/bench/bench.exs new file mode 100644 index 0000000..d73458e --- /dev/null +++ b/bench/bench.exs @@ -0,0 +1,57 @@ +alias EctoQLC.Accounts.User + +:mnesia.stop +mnesia_dir = '#{System.tmp_dir!()}ecto-qlc-#{Ecto.UUID.generate}/' +File.mkdir(mnesia_dir) +Application.put_env(:mnesia, :dir, mnesia_dir) +:mnesia.start + +defmodule Postgres do + use Ecto.Repo, otp_app: :ecto_qlc, adapter: Ecto.Adapters.Postgres, log: false +end + +defmodule Mnesia do + use Ecto.Repo, otp_app: :ecto_qlc, adapter: EctoQLC.Adapters.Mnesia, log: false +end + +defmodule ETS do + use Ecto.Repo, otp_app: :ecto_qlc, adapter: EctoQLC.Adapters.ETS, log: false +end + +migration_path = "#{File.cwd!}/priv/repo/migrations" +repos = [Mnesia, Postgres, ETS] +Enum.map(repos, &(&1.__adapter__.ensure_all_started(&1.config, :temporary))) +Enum.map(repos, &(&1.__adapter__.storage_down(&1.config))) +Enum.map(repos, &(&1.__adapter__.storage_up(&1.config))) +Enum.map(repos, &(&1.start_link(&1.config ++ [dir: mnesia_dir]))) + +for file <- Enum.sort(File.ls!(migration_path)) do + {version, _rest} = Integer.parse(file) + [{module, _}] = Code.compile_file(file, migration_path) + for repo <- repos, do: Ecto.Migrator.up(repo, version, module, log: false) +end + +Benchee.run(Map.new(repos, fn repo -> {"#{repo}", &repo.insert!(&1)} end), + inputs: %{"struct" => struct(User, %{email: "user@example.com"}), "changeset" => User.changeset(%User{}, %{email: "user@example.com"})}, + formatters: [ + {Benchee.Formatters.HTML, file: "bench/results/insert.html"}, + Benchee.Formatters.Console + ] +) + +Enum.map(repos, &(&1.delete_all(User))) + +users = Enum.map(1..5_000, &%{email: "user#{&1}@example.com", inserted_at: DateTime.utc_now, updated_at: DateTime.utc_now}) +Enum.map(repos, &(&1.insert_all(User, users))) + +Benchee.run( + Map.new(repos, fn repo -> {"#{repo}", fn -> repo.all(User, limit: 5_000) end} end), + time: 10, + after_each: &(5_000 = length(&1)), + formatters: [ + {Benchee.Formatters.HTML, file: "bench/results/all.html"}, + Benchee.Formatters.Console + ] +) + +Enum.map(repos, &(&1.__adapter__.storage_down(&1.config))) diff --git a/config/bench.exs b/config/bench.exs new file mode 100644 index 0000000..e34458b --- /dev/null +++ b/config/bench.exs @@ -0,0 +1,14 @@ +import Config + +config :mnesia, debug: :none, dump_log_load_regulation: true + +config :logger, level: :none + +config :ecto_qlc, Postgres, + username: "postgres", + password: "postgres", + database: "ecto_qlc_test#{System.get_env("MIX_TEST_PARTITION")}", + hostname: "localhost", + show_sensitive_data_on_connection_error: true + +config :ecto_qlc, Mnesia, log: false diff --git a/config/config.exs b/config/config.exs new file mode 100644 index 0000000..6a8978a --- /dev/null +++ b/config/config.exs @@ -0,0 +1,27 @@ +import Config + +## mnesia is configurable https://www.erlang.org/doc/man/mnesia.html#configuration-parameters +## https://streamhacker.com/2008/12/10/how-to-eliminate-mnesia-overload-events/ +config :mnesia, + access_module: :mnesia, + auto_repair: true, + backup_module: :mnesia_backup, + debug: :none, + # core_dir: false, + dc_dump_limit: 40, + # dir: '/', + dump_disc_copies_at_startup: true, + dump_log_load_regulation: true, + dump_log_update_in_place: true, + dump_log_write_threshold: 50000, + dump_log_time_threshold: :timer.minutes(3), + event_module: :mnesia_event, #EctoQLC.Adapters.Mnesia.Event, + extra_db_nodes: [], + # fallback_error_function: {EctoQLC.Adapters.Mnesia.Fallback, :fallback}, + max_wait_for_decision: :infinity, + no_table_loaders: 2, + send_compressed: 0, + max_transfer_size: 64000 + # schema_location: :opt_disc + +import_config "#{Mix.env()}.exs" diff --git a/config/dev.exs b/config/dev.exs new file mode 100644 index 0000000..965e2aa --- /dev/null +++ b/config/dev.exs @@ -0,0 +1,3 @@ +import Config + +config :mnesia, debug: :verbose diff --git a/config/test.exs b/config/test.exs new file mode 100644 index 0000000..36b5ea9 --- /dev/null +++ b/config/test.exs @@ -0,0 +1,13 @@ +import Config + +config :mnesia, debug: :none + +config :logger, level: :none + +config :ecto_qlc, EctoQLC.Adapters.PostgresTest.Repo, + username: "postgres", + password: "postgres", + database: "ecto_qlc_test#{System.get_env("MIX_TEST_PARTITION")}", + hostname: "localhost", + pool: Ecto.Adapters.SQL.Sandbox, + pool_size: 10 diff --git a/examples/building_a_simple_cache_with_ecto_in_66_lines.livemd b/examples/building_a_simple_cache_with_ecto_in_66_lines.livemd new file mode 100644 index 0000000..4b9839a --- /dev/null +++ b/examples/building_a_simple_cache_with_ecto_in_66_lines.livemd @@ -0,0 +1,112 @@ +# Building a simple cache with Ecto in 66 lines + +```elixir +Mix.install([ + {:ecto_sql, "~> 3.9.0"}, + {:ecto_qlc, "~> 0.1.0"}, + {:postgrex, ">= 0.0.0"} +]) +``` + +## Setting up our Ecto schema, migration and repos + +```elixir +defmodule Cache do + use Ecto.Schema + + schema "cache" do + field(:data, {:array, :any}) + timestamps(updated_at: false) + end +end + +defmodule CreateCache do + use Ecto.Migration + + def change do + create table(:cache) do + add(:data, {:array, :any}) + timestamps(updated_at: false) + end + end +end + +defmodule Accounts.User do + use Ecto.Schema + + schema "users" do + field(:email, :string) + timestamps() + end +end + +defmodule CreateUser do + use Ecto.Migration + + def change do + create table(:users) do + add(:email, :string) + timestamps() + end + end +end + +defmodule Repo do + use Ecto.Repo, otp_app: :my_app, adapter: Ecto.Adapters.Postgres + + def all_cached(query, opts \\ []) do + RepoCache.get_or_insert(query, opts) + end +end + +defmodule RepoCache do + use Ecto.Repo, otp_app: :my_app, adapter: EctoQLC.Adapters.ETS + + def get_or_insert(query, opts) do + key = :erlang.phash2(query) + + if cache = get(Cache, key) do + cache.data + else + data = Repo.all(query, opts) + insert!(%Cache{id: key, data: data}) + data + end + end + + def clear_cache do + delete_all(Cache) + end +end +``` + +## Let's try it out! + +```elixir +Repo.start_link( + username: "postgres", + password: "postgres", + database: "ecto_qlc_test", + hostname: "localhost" +) + +RepoCache.start_link([]) +Ecto.Migrator.up(Repo, 1, CreateUser) +Ecto.Migrator.up(RepoCache, 1, CreateUser) +Ecto.Migrator.up(RepoCache, 2, CreateCache) +Repo.delete_all(Accounts.User) +RepoCache.clear_cache() + +user = Repo.insert!(%Accounts.User{email: "user@example.com"}) + +[%{email: "user@example.com"}] = Repo.all(Accounts.User) +[%{email: "user@example.com"}] = Repo.all_cached(Accounts.User) + +Repo.insert!(%Accounts.User{email: "user2@example.com"}) + +[%{email: "user@example.com"}, %{email: "user2@example.com"}] = Repo.all(Accounts.User) +[%{email: "user@example.com"}] = Repo.all_cached(Accounts.User) + +RepoCache.clear_cache() +[%{email: "user@example.com"}, %{email: "user2@example.com"}] = Repo.all_cached(Accounts.User) +``` diff --git a/examples/building_a_simple_job_queue_with_ecto_in_46_lines.livemd b/examples/building_a_simple_job_queue_with_ecto_in_46_lines.livemd new file mode 100644 index 0000000..f03275f --- /dev/null +++ b/examples/building_a_simple_job_queue_with_ecto_in_46_lines.livemd @@ -0,0 +1,71 @@ +# Building a simple job queue with Ecto in 46 lines + +```elixir +Mix.install([{:ecto_qlc, "~> 0.1.0"}]) +``` + +## Setting up our Ecto schema, migration and repo + +```elixir +defmodule Job do + use Ecto.Schema + + schema "jobs" do + field(:completed_at, :utc_datetime_usec) + field(:started_at, :utc_datetime_usec) + field(:mfa, {:array, :any}) + timestamps() + end +end + +defmodule CreateJobs do + use Ecto.Migration + + def change do + create table(:jobs) do + add(:completed_at, :utc_datetime_usec) + add(:started_at, :utc_datetime_usec) + add(:mfa, {:array, :any}) + timestamps() + end + end +end + +defmodule Jobs do + import Ecto.Query + + def exectute_job() do + %Job{mfa: [{m, f, a}], id: id} = + Job + |> order_by(asc: :inserted_at) + |> first() + |> where([job], is_nil(job.completed_at) and is_nil(job.started_at)) + |> Repo.one() + + now = DateTime.utc_now() + Repo.update_all(where(Job, id: ^id), set: [started_at: now, updated_at: now]) + result = apply(m, f, a) + now = DateTime.utc_now() + Repo.update_all(where(Job, id: ^id), set: [completed_at: now, updated_at: now]) + result + end +end + +defmodule Repo do + use Ecto.Repo, otp_app: :my_app, adapter: EctoQLC.Adapters.ETS +end +``` + +## Let's try it out! + +```elixir +Repo.start_link([]) +Ecto.Migrator.up(Repo, 1, CreateJobs) +Repo.delete_all(Job) + +Repo.insert!(%Job{mfa: [{IO, :inspect, ["Hello World!"]}]}) +Repo.insert!(%Job{mfa: [{IO, :inspect, ["Bonjour Monde!"]}]}) + +"Hello World!" = Jobs.exectute_job() +"Bonjour Monde!" = Jobs.exectute_job() +``` diff --git a/examples/my_event_module.ex b/examples/my_event_module.ex new file mode 100644 index 0000000..4abd6a3 --- /dev/null +++ b/examples/my_event_module.ex @@ -0,0 +1,79 @@ +defmodule MyEventModule do + @moduledoc """ + This module uses implement the :gen_event behavior that the :mnesia event_module expect https://www.erlang.org/doc/apps/mnesia/mnesia_chap5#mnesia-event-handling. + """ + @behaviour :gen_event + + @impl :gen_event + def init(state), do: {:ok, state} + + @impl :gen_event + def handle_event(event, state), do: __handle_event__(event, state) + + @impl :gen_event + def handle_info(msg, state) do + {:ok, _} = __handle_event__(msg, state) + {:ok, state} + end + + @impl :gen_event + def handle_call(msg, state) do + {:ok, state} = __handle_event__(msg, state) + {:ok, :ok, state} + end + + @impl :gen_event + def format_status(_opt, [_pdict, _s]), do: :ok + + @impl :gen_event + def terminate(_reason, _state), do: :ok + + @impl :gen_event + def code_change(_old_vsn, state, _extra), do: {:ok, state} + + defp __handle_event__({:mnesia_system_event, {:mnesia_up, _node}}, state) do + {:ok, state} + end + + defp __handle_event__({:mnesia_system_event, {:mnesia_down, _node}}, state) do + {:ok, state} + end + + defp __handle_event__({:mnesia_system_event, {:mnesia_checkpoint_activated, _chechpoint}}, state) do + {:ok, state} + end + + defp __handle_event__({:mnesia_system_event, {:mnesia_checkpoint_deactivated, _chechpoint}}, state) do + {:ok, state} + end + + defp __handle_event__({:mnesia_system_event, {:mnesia_overload, _details}}, state) do + ## mnesia_overload is a common event and can be avoided by adjusting dc_dump_limit and dump_log_write_threshold to fit your hardware and mnesia usage. + ## https://groups.google.com/g/rabbitmq-users/c/N9DYkaand9k + ## https://issues.couchbase.com/browse/MB-3982?focusedCommentId=21168&page=com.atlassian.jira.plugin.system.issuetabpanels%3Acomment-tabpanel + {:ok, state} + end + + defp __handle_event__({:mnesia_system_event, {:inconsistent_database, type, _node}}, state) when type in ~w[running_partitioned_network starting_partitioned_network bad_decision]a do + ## inconsistent_database useally happens when running mnesia in a cluster, the replicas can becomen inconsistent when enduring a netsplit, therefore a strategy is to be implemented by the consumer. + ## one way to deal with this is https://github.com/uwiger/unsplit and https://github.com/danschultzer/pow/blob/master/lib/pow/store/backend/mnesia_cache/unsplit.ex + {:ok, state} + end + + defp __handle_event__({:mnesia_fatal, {:mnesia_fatal, _format, _args, _binary_core}}, state) do + {:ok, state} + end + + defp __handle_event__({:mnesia_system_event, {:mnesia_error, _format, _args}}, state) do + {:ok, state} + end + + defp __handle_event__({:mnesia_system_event, {:mnesia_user, _event}}, state) do + ## some deal with splitbrain from netsplit by leveraging majority checking and do their unsplit when the minority_write_attempt event happens. https://github.com/sheharyarn/memento/pull/21 + {:ok, state} + end + + defp __handle_event__({:mnesia_activity_event, _event}, state) do + {:ok, state} + end +end diff --git a/lib/ecto/adapters/dets.ex b/lib/ecto/adapters/dets.ex new file mode 100644 index 0000000..eb63600 --- /dev/null +++ b/lib/ecto/adapters/dets.ex @@ -0,0 +1,21 @@ +defmodule EctoQLC.Adapters.DETS do + @moduledoc """ + Adapter module for [DETS](https://www.erlang.org/doc/man/dets.html). + """ + use EctoQLC.Adapters.QLC, driver: :dets + + @impl Ecto.Adapter.Storage + def storage_down(_opts) do + if Enum.all?(Enum.map(:dets.all(), &:dets.close/1)), do: :ok, else: {:error, :already_down} + end + + @impl Ecto.Adapter.Storage + def storage_status(_opts) do + :up + end + + @impl Ecto.Adapter.Storage + def storage_up(_opts) do + :ok + end +end diff --git a/lib/ecto/adapters/ets.ex b/lib/ecto/adapters/ets.ex new file mode 100644 index 0000000..074f9f8 --- /dev/null +++ b/lib/ecto/adapters/ets.ex @@ -0,0 +1,15 @@ +defmodule EctoQLC.Adapters.ETS do + @moduledoc """ + Adapter module for [ETS](https://www.erlang.org/doc/man/ets.html). + """ + use EctoQLC.Adapters.QLC, driver: :ets + + @impl Ecto.Adapter.Storage + def storage_down(_opts), do: :ok + + @impl Ecto.Adapter.Storage + def storage_status(_opts), do: :up + + @impl Ecto.Adapter.Storage + def storage_up(_opts), do: :ok +end diff --git a/lib/ecto/adapters/mnesia.ex b/lib/ecto/adapters/mnesia.ex new file mode 100644 index 0000000..e878983 --- /dev/null +++ b/lib/ecto/adapters/mnesia.ex @@ -0,0 +1,71 @@ +defmodule EctoQLC.Adapters.Mnesia do + @moduledoc """ + Adapter module for [Mnesia](https://www.erlang.org/doc/man/mnesia.html). + + ## Clustering + + There is currently no support for automatic clustering or unsplit. If you decide to deploy EctoQLC.Adapters.Mnesia in a cluster you might find the following resources useful: + + * https://www.erlang.org/doc/apps/mnesia/mnesia_chap5#distribution-and-fault-tolerance + * https://github.com/danschultzer/pow/blob/master/lib/pow/store/backend/mnesia_cache.ex + * https://github.com/danschultzer/pow/blob/master/lib/pow/store/backend/mnesia_cache/unsplit.ex + * https://github.com/uwiger/unsplit + """ + use EctoQLC.Adapters.QLC, driver: :mnesia + + @impl Ecto.Adapter.Storage + def storage_down(_opts) do + with :stopped <- :mnesia.stop(), + :ok <- :mnesia.delete_schema([node()]) do + :ok + else + {:error, reason} -> raise RuntimeError, :mnesia.error_description(reason) + end + end + + @impl Ecto.Adapter.Storage + def storage_status(_opts) do + case :mnesia.system_info(:is_running) do + :no -> :down + :yes -> :up + value -> {:error, value} + end + end + + @impl Ecto.Adapter.Storage + def storage_up(_opts) do + with :stopped <- :mnesia.stop, + :ok <- :mnesia.create_schema([node()]) do + :ok + else + {:aborted, {:already_exists, _table, _node, _storage_type}} -> {:error, :already_up} + + {:error, {_, {:already_exists, _}}} -> {:error, :already_up} + + {:error, reason} -> raise RuntimeError, :mnesia.error_description(reason) + end + end + + @impl Ecto.Adapter.Transaction + def transaction(_adapter_meta, _opts, fun) do + # mnesia transaction support funtions with args + # with would allow it to forward options if needed. + case :mnesia.transaction(fun) do + {:aborted, reason} -> {:error, reason} + {:atomic, result} -> {:ok, result} + result -> {:ok, result} + end + end + + @impl Ecto.Adapter.Transaction + def in_transaction?(_adapter_meta), do: :mnesia.is_transaction() + + @impl Ecto.Adapter.Transaction + def rollback(adapter_meta, %_schema{} = value) do + if in_transaction?(adapter_meta) do + throw(:mnesia.abort(value)) + else + raise "cannot call rollback outside of transaction" + end + end +end diff --git a/lib/ecto/adapters/qlc.ex b/lib/ecto/adapters/qlc.ex new file mode 100644 index 0000000..4a91196 --- /dev/null +++ b/lib/ecto/adapters/qlc.ex @@ -0,0 +1,1413 @@ +defmodule EctoQLC.Adapters.QLC do + @moduledoc ~S""" + This application provides functionality for working with Erlang databases in `Ecto`. + + ## Built-in adapters + + * `EctoQLC.Adapters.DETS` for [`dets`](https://www.erlang.org/doc/man/dets.html) + * `EctoQLC.Adapters.ETS` for [`ets`](https://www.erlang.org/doc/man/ets.html) + * `EctoQLC.Adapters.Mnesia` for [`mnesia`](https://www.erlang.org/doc/man/mnesia.html) + + ## Migrations + + `ecto_qlc` supports `ecto_sql` database migrations, currently none of the adapters support constraints, unique index or multi column index. + """ + require Kernel + require Logger + alias Ecto.Migration.Table + alias Ecto.Migration.Index + alias Ecto.Migration.Constraint + + @doc false + defmacro __using__(opts) do + quote do + @behaviour Ecto.Adapter + @behaviour Ecto.Adapter.Migration + @behaviour Ecto.Adapter.Structure + @behaviour Ecto.Adapter.Queryable + @behaviour Ecto.Adapter.Schema + @behaviour Ecto.Adapter.Storage + @behaviour Ecto.Adapter.Transaction + + alias __MODULE__ + + opts = unquote(opts) + @driver Keyword.fetch!(opts, :driver) + + @impl Ecto.Adapter + def ensure_all_started(_config, _type) do + case Application.ensure_all_started(@driver) do + {:ok, []} -> {:ok, [@driver]} + {:ok, [@driver]} -> {:ok, [@driver]} + {:error, _reason} -> {:ok, [@driver]} + end + end + + @impl Ecto.Adapter + def init(config) do + EctoQLC.Adapters.QLC.init(config, @driver) + end + + @impl Ecto.Adapter + def checkout(%{pid: pid}, _opts, fun) do + Process.put({__MODULE__, pid}, true) + result = fun.() + Process.delete({__MODULE__, pid}) + result + end + + @impl Ecto.Adapter + def checked_out?(%{pid: pid} = _adapter_meta) do + Process.get({__MODULE__, pid}) != nil + end + + @impl Ecto.Adapter + defmacro __before_compile__(_env), do: :ok + + @impl Ecto.Adapter + def loaders({:map, _}, type), do: [&Ecto.Type.embedded_load(type, &1, :json)] + def loaders(match, type) when match in ~w[binary_id embed_id]a, do: [Ecto.UUID, type] + def loaders(_, type), do: [type] + + @impl Ecto.Adapter + def dumpers({:map, _}, type), do: [&Ecto.Type.embedded_load(type, &1, :json)] + def dumpers({:array, {:array, value}}, _type), do: [{:in, value}] + def dumpers(match, type) when match in ~w[binary_id embed_id]a, do: [type, Ecto.UUID] + def dumpers(_, type), do: [type] + + @impl Ecto.Adapter.Queryable + def prepare(operation, %Ecto.Query{} = query) do + {:nocache, {operation, query}} + end + + @impl Ecto.Adapter.Queryable + def execute(adapter_meta, query_meta, {:nocache, query}, params, options) do + EctoQLC.Adapters.QLC.execute(adapter_meta, query_meta, query, params, options) + end + + @impl Ecto.Adapter.Queryable + def stream(adapter_meta, query_meta, {:nocache, query}, params, options) do + EctoQLC.Adapters.QLC.stream(adapter_meta, query_meta, query, params, options) + end + + @impl Ecto.Adapter.Schema + def delete(adapter_meta, schema_meta, filters, options) do + EctoQLC.Adapters.QLC.delete(@driver, adapter_meta, schema_meta, filters, options) + end + + @impl Ecto.Adapter.Schema + def insert(adapter_meta, schema_meta, fields, on_conflict, returning, options) do + EctoQLC.Adapters.QLC.insert(@driver, adapter_meta, schema_meta, fields, on_conflict, returning, options) + end + + @impl Ecto.Adapter.Schema + def insert_all(adapter_meta, schema_meta, header, list, on_conflict, returning, placeholders, options) do + EctoQLC.Adapters.QLC.insert_all(@driver, adapter_meta, schema_meta, header, list, on_conflict, returning, placeholders, options) + end + + @impl Ecto.Adapter.Schema + def update(adapter_meta, schema_meta, fields, filters, returning, options) do + EctoQLC.Adapters.QLC.update(@driver, adapter_meta, schema_meta, fields, filters, returning, options) + end + + @impl Ecto.Adapter.Schema + def autogenerate(:id), do: System.unique_integer([:positive, :monotonic]) + def autogenerate(:embed_id), do: Ecto.UUID.generate() + def autogenerate(:binary_id), do: Ecto.UUID.bingenerate() + + @impl Ecto.Adapter.Migration + def execute_ddl(adapter_meta, command, options) do + EctoQLC.Adapters.QLC.execute_ddl(adapter_meta, command, options) + end + + @impl Ecto.Adapter.Migration + def lock_for_migrations(adapter_meta, options, fun) do + EctoQLC.Adapters.QLC.lock_for_migrations(adapter_meta, options, fun) + end + + @impl Ecto.Adapter.Migration + def supports_ddl_transaction?(), do: false + + @impl Ecto.Adapter.Transaction + def transaction(adapter_meta, opts, fun) do + Process.put({adapter_meta.pid, :transaction}, true) + v = fun.() + Process.put({adapter_meta.pid, :transaction}, false) + {:ok, v} + end + + @impl Ecto.Adapter.Transaction + def in_transaction?(adapter_meta), do: Process.get({adapter_meta.pid, :transaction}, false) + + @impl Ecto.Adapter.Transaction + def rollback(adapter_meta, %_schema{} = value) do + if in_transaction?(adapter_meta) do + throw(value) + else + raise "cannot call rollback outside of transaction" + end + end + + @impl Ecto.Adapter.Structure + def dump_cmd(args, opts, config) do + EctoQLC.Adapters.QLC.dump_cmd(args, opts, config, @driver) + end + + @impl Ecto.Adapter.Structure + def structure_dump(default, config) do + EctoQLC.Adapters.QLC.structure_dump(default, config, @driver) + end + + @impl Ecto.Adapter.Structure + def structure_load(default, config) do + EctoQLC.Adapters.QLC.structure_load(default, config, @driver) + end + + defoverridable [prepare: 2, execute: 5, stream: 5, execute_ddl: 3, loaders: 2, dumpers: 2, checked_out?: 1, checkout: 3, autogenerate: 1, ensure_all_started: 2, __before_compile__: 1, lock_for_migrations: 3, supports_ddl_transaction?: 0, transaction: 3, in_transaction?: 1, rollback: 2] + end + end + + @doc false + def dump_cmd(_args, _opts, _config, _driver) do + {"not_implemnted", 1} + end + + @doc false + def structure_dump(_default, _config, _driver) do + {:error, "not_implemnted"} + end + + @doc false + def structure_load(_default, _config, _driver) do + {:error, "not_implemnted"} + end + + @doc false + def init(config, :mnesia = driver) do + dir = '#{Keyword.fetch!(config, :dir)}' + File.mkdir_p!(dir) + Application.put_env(:mnesia, :dir, dir) + log = Keyword.get(config, :log, :debug) + stacktrace = Keyword.get(config, :stacktrace, nil) + telemetry_prefix = Keyword.fetch!(config, :telemetry_prefix) + telemetry = {config[:repo], log, telemetry_prefix ++ [:query]} + {:ok, DynamicSupervisor.child_spec(strategy: :one_for_one, name: Module.concat([config[:repo], driver])), %{telemetry: telemetry, stacktrace: stacktrace, opts: config}} + end + + def init(config, driver) do + log = Keyword.get(config, :log, :debug) + stacktrace = Keyword.get(config, :stacktrace, nil) + telemetry_prefix = Keyword.fetch!(config, :telemetry_prefix) + telemetry = {config[:repo], log, telemetry_prefix ++ [:query]} + {:ok, DynamicSupervisor.child_spec(strategy: :one_for_one, name: Module.concat([config[:repo], driver])), %{telemetry: telemetry, stacktrace: stacktrace, opts: config}} + end + + @aggregates ~w[avg count max min sum]a + @operators ~w[or and > < >= <= == === != + - * /]a + + @doc false + def execute(%{adapter: EctoQLC.Adapters.Mnesia} = adapter_meta, query_meta, {operator, query}, params, options) do + prepareed = prepare(adapter_meta, query_meta, query, params, options) + qlc = to_qlc(operator, prepareed) + :mnesia.transaction(fn -> + if lock = elem(prepareed, 2).lock do + :mnesia.lock(elem(lock, 0), elem(lock, 1)) + end + query_handle = to_query_handle(operator, prepareed, qlc) + {query_time, values} = :timer.tc(:qlc, :eval, [query_handle, []]) + {decode_time, value} = :timer.tc(__MODULE__, :select, [values, operator, prepareed]) + log(value, get_source(query.sources), qlc, query_time, decode_time, 0, 0, operator, adapter_meta.telemetry, params, options ++ adapter_meta.opts) + end) + |> elem(1) + end + def execute(adapter_meta, query_meta, {operator, query}, params, options) do + prepareed = prepare(adapter_meta, query_meta, query, params, options) + qlc = to_qlc(operator, prepareed) + query_handle = to_query_handle(operator, prepareed, qlc) + {query_time, values} = :timer.tc(:qlc, :eval, [query_handle, []]) + {decode_time, value} = :timer.tc(__MODULE__, :select, [values, operator, prepareed]) + log(value, get_source(query.sources), qlc, query_time, decode_time, 0, 0, operator, adapter_meta.telemetry, params, options ++ adapter_meta.opts) + end + + @doc false + def stream(adapter_meta, query_meta, {operator, query}, params, options) do + key = :erlang.timestamp + prepareed = prepare(adapter_meta, query_meta, query, params, options) + qlc = to_qlc(operator, prepareed) + query_handle = to_query_handle(operator, prepareed, qlc) + Stream.resource( + fn -> + Process.put(key, :erlang.timestamp) + :qlc.cursor(query_handle, elem(prepareed, 4)) + end, + fn cursor -> + case :qlc.next_answers(cursor, options[:max_rows] || 500) do + [] -> {:halt, cursor} + rows -> {[select(rows, :all, prepareed)], cursor} + end + end, + fn cursor -> + result = :qlc.delete_cursor(cursor) + query_time = :timer.now_diff(:erlang.timestamp, Process.get(key)) + log(result, get_source(query.sources), qlc, query_time, 0, 0, 0, operator, adapter_meta.telemetry, params, options ++ adapter_meta.opts) + end + ) + end + + @doc false + def get_source({%Ecto.SubQuery{} = subquery}), do: get_source(subquery.query.sources) + def get_source({source, _module_, _prefix}) when is_binary(source), do: source + def get_source(source) when is_tuple(source), do: get_source(elem(source, 0)) + + @creates [:create, :create_if_not_exists] + @drops [:drop, :drop_if_exists] + + @doc false + def execute_ddl(adapter_meta, {_command, %Constraint{}}, _options) do + {:ok, [{:warn, "#{adapter_meta.adapter} adapter does not support CONSTRAINT commands", []}]} + end + def execute_ddl(%{adapter: EctoQLC.Adapters.DETS} = adapter_meta, {command, %Table{} = table, _columns}, options) when command in @creates do + options = Keyword.merge(adapter_meta.opts, List.wrap(table.options) ++ options) + table = to_table(adapter_meta, table.name, table.prefix, options) + file = if dir = Application.get_env(:dets, :dir), do: Path.join(dir, "#{table}"), else: table + options = Keyword.take(Keyword.merge([file: '#{file}'], options), ~w[access auto_save estimated_no_objects file max_no_slots min_no_slots keypos ram_file repair type]a) + case :dets.open_file(table, options) do + {:ok, ^table} -> + Enum.map(:dets.all, &:dets.sync/1) + {:ok, []} + {:error, reason} -> {:ok, [{:warn, "#{inspect(:mnesia.error_description(reason))}", []}]} + end + end + def execute_ddl(%{adapter: EctoQLC.Adapters.ETS} = adapter_meta, {command, %Table{} = table, _columns}, options) when command in @creates do + options = Keyword.merge([write_concurrency: true, read_concurrency: true], Keyword.merge(adapter_meta.opts, List.wrap(table.options) ++ options)) + options = [:set, :public, :named_table, {:heir, adapter_meta.pid, %{}}] ++ Keyword.take(options, ~w[write_concurrency read_concurrency keypos heir heir decentralized_counters compressed]a) + table = to_table(adapter_meta, table.name, table.prefix, options) + with :undefined <- :ets.info(table), + ^table <- :ets.new(table, options) do + {:ok, []} + else + _ -> + {:ok, []} + end + end + def execute_ddl(%{adapter: EctoQLC.Adapters.Mnesia} = adapter_meta, {command, %Table{} = table, columns}, options) when command in @creates do + options = if "schema_migrations" == table.name do + [disc_only_copies: [node() | Node.list]] + else + Keyword.take(Keyword.merge(adapter_meta.opts, List.wrap(table.options) ++ options), ~w[disc_copies access_mode disc_only_copies index load_order majority ram_copies record_name snmp storage_properties type local_content]a) + end + table = to_table(adapter_meta, table.name, table.prefix, options) + primary_keys = Enum.count(columns, fn {_commnad, _column, _typpe, options} -> options[:primary_key] == true end) + attributes = Enum.reject(columns, fn {_commnad, _column, _typpe, options} -> options[:primary_key] == true end) |> Enum.map(&elem(&1, 1)) + attributes = if primary_keys > 1, do: [:primary_keys | attributes], else: Enum.map(columns, &elem(&1, 1)) + + with :ok <- :mnesia.start(), + {:atomic, :ok} <- :mnesia.create_table(table, [{:attributes, attributes} | options]) do + {:ok, []} + else + {:aborted, {:already_exists, ^table}} -> {:ok, []} + + {status, reason} when status in ~w[error aborted]a -> {:ok, [{:warn, "#{inspect(:mnesia.error_description(reason))}", []}]} + end + end + def execute_ddl(%{adapter: EctoQLC.Adapters.DETS} = adapter_meta, {command, %Table{} = table, _columns}, options) when command in @drops do + table = to_table(adapter_meta, table.name, table.prefix, options) + case :dets.close(table) do + :ok -> {:ok, []} + {:error, resoan} -> {:ok, [{:warn, "#{inspect(resoan)}", []}]} + end + end + def execute_ddl(%{adapter: EctoQLC.Adapters.ETS} = adapter_meta, {command, %Table{} = table, _columns}, options) when command in @drops do + _table = to_table(adapter_meta, table.name, table.prefix, options) + {:ok, []} + end + def execute_ddl(%{adapter: EctoQLC.Adapters.Mnesia} = adapter_meta, {command, %Table{} = table, _columns}, options) when command in @drops do + case :mnesia.delete_table(to_table(adapter_meta, table.name, table.prefix, options)) do + {:atomic, :ok} -> {:ok, []} + {:aborted, resoan} -> {:ok, [{:warn, "#{inspect(resoan)}", []}]} + end + end + def execute_ddl(%{adapter: EctoQLC.Adapters.Mnesia} = adapter_meta, {:alter, %Table{} = table, changes}, options) do + table = to_table(adapter_meta, table.name, table.prefix, options) + attributes = :mnesia.table_info(table, :attributes) + new_attributes = Enum.reduce(changes, attributes, fn change, attributes -> update_attributes(change, attributes) end) + with true <- attributes != new_attributes, + {:atomic, :ok} <- :mnesia.transform_table(table, &Enum.reduce(changes, &1, fn change, row -> update_row(row, change, attributes) end), new_attributes) do + {:ok, []} + else + false -> {:ok, []} + {status, reason} when status in ~w[error aborted]a -> {:ok, [{:warn, "#{inspect(:mnesia.error_description(reason))}", []}]} + end + end + def execute_ddl(adapter_meta, {:alter, %Table{} = _table, _changes}, _options) do + {:ok, [{:warn, "#{adapter_meta.adapter} adapter does not support alter", []}]} + end + def execute_ddl(%{adapter: EctoQLC.Adapters.Mnesia} = adapter_meta, {command, %Index{columns: [column]} = index}, options) when command in @creates do + case :mnesia.add_table_index(to_table(adapter_meta, index.name, index.prefix, options), column) do + {:atomic, :ok} -> {:ok, []} + {:aborted, {:already_exists, _table, _}} when command == :create_if_not_exists -> {:ok, []} + {:aborted, {:already_exists, _table, _}} -> raise "index already exists" + {:aborted, resoan} -> {:ok, [{:warn, "#{inspect(resoan)}", []}]} + end + end + def execute_ddl(%{adapter: EctoQLC.Adapters.Mnesia} = adapter_meta, {command, %Index{columns: [column]} = index, _}, options) when command in @drops do + case :mnesia.del_table_index(to_table(adapter_meta, index.name, index.prefix, options), column) do + {:atomic, :ok} -> {:ok, []} + {:aborted, {:no_exists, _table, _}} when command == :drop_if_exists -> {:ok, []} + {:aborted, {:no_exists, _table, _}} -> raise "index does not exists" + {:aborted, resoan} -> {:ok, [{:warn, "#{inspect(resoan)}", []}]} + end + end + def execute_ddl(%{adapter: EctoQLC.Adapters.Mnesia}, {_command, %Index{}}, _options) do + {:ok, [{:warn, "Mnesia adapter does not support index with multiply columns", []}]} + end + def execute_ddl(adapter_meta, {_command, %Index{}}, _options) do + {:ok, [{:warn, "#{adapter_meta.adapter} adapter does not support index", []}]} + end + def execute_ddl(adapter_meta, {:rename, %Table{} = _current_table, %Table{} = _new_table}, _options) do + # Since table name always stays the same then we need to copy the table into a newley created one. + # There might be some limitations with also being aware of indexes + # So for now we just gonna warn + # current_table = to_table(adapter_meta, current_table.name, current_table.prefix, options) + # new_table = to_table(adapter_meta, new_table.name, new_table.prefix, options) + # case :mnesia.transform_table(current_table, &:erlang.setelement(1, &1, new_table), :mnesia.table_info(current_table, :attributes), new_table) do + # {:atomic, :ok} -> {:ok, []} + # {status, reason} when status in ~w[error aborted]a -> {:ok, [{:warn, "#{inspect(:mnesia.error_description(reason))}", []}]} + # end + {:ok, [{:warn, "#{adapter_meta.adapter} adapter does not support RENAME table commands", []}]} + end + def execute_ddl(%{adapter: EctoQLC.Adapters.Mnesia} = adapter_meta, {:rename, %Table{} = table, current_column, new_column}, options) do + table = to_table(adapter_meta, table.name, table.prefix, options) + attributes = Enum.map(:mnesia.table_info(table, :attributes), fn + ^current_column -> new_column + column -> column + end) + case :mnesia.transform_table(table, &(&1), attributes) do + {:atomic, :ok} -> {:ok, []} + {status, reason} when status in ~w[error aborted]a -> {:ok, [{:warn, "#{inspect(:mnesia.error_description(reason))}", []}]} + end + end + def execute_ddl(adapter_meta, {:rename, %Table{} = _table, _current_column, _new_column}, _options) do + {:ok, [{:warn, "#{adapter_meta.adapter} adapter does not support RENAME column commands", []}]} + end + def execute_ddl(adapter_meta, command, _options) when is_binary(command) do + raise "#{adapter_meta.adapter} adapter does not support binary in execute" + end + def execute_ddl(adapter_meta, command, _options) when is_list(command) do + raise "#{adapter_meta.adapter} adapter does not support keyword lists in execute" + end + + @doc false + def lock_for_migrations(%{adapter: EctoQLC.Adapters.Mnesia}, _options, fun) do + with :ok <- :global.sync(), + :ok <- :mnesia.start(), + value when value != :aborted <- :global.trans({:lock_for_migrations, __MODULE__}, fun), + v when v in [:ok, {:error, :no_such_log}] <- :mnesia.sync_log(), + true <- :global.del_lock({:lock_for_migrations, __MODULE__}) do + value + else + reason -> {:error, reason} + end + end + def lock_for_migrations(%{adapter: EctoQLC.Adapters.DETS}, _options, fun) do + with :ok <- :global.sync(), + value when value != :aborted <- :global.trans({:lock_for_migrations, __MODULE__}, fun), + _ <- Enum.map(:dets.all, &:dets.sync/1), + true <- :global.del_lock({:lock_for_migrations, __MODULE__}) do + value + else + reason -> {:error, reason} + end + end + def lock_for_migrations(_adapter_meta, _options, fun) do + with :ok <- :global.sync(), + value when value != :aborted <- :global.trans({:lock_for_migrations, __MODULE__}, fun), + true <- :global.del_lock({:lock_for_migrations, __MODULE__}) do + value + else + reason -> {:error, reason} + end + end + + defp update_attributes({command, column, _type, _options}, attributes) when command in [:remove_if_exists, :remove] do + attributes -- [column] + end + defp update_attributes({command, column, _type, _options}, attributes) when command in [:add_if_not_exists, :add] do + if column in attributes do + attributes + else + attributes ++ [column] + end + end + defp update_attributes(_change, attributes), do: attributes + + @doc false + def update_row(row, {:add, _column, _type, options}, _attributes) do + Tuple.append(row, options[:default]) + end + def update_row(row, {:add_if_not_exists, column, _type, options}, attributes) do + if column not in attributes, do: Tuple.append(row, options[:default]), else: row + end + def update_row(row, {:remove, column, _type, _options}, attributes) do + Tuple.delete_at(row, Enum.find_index(attributes, &(&1 == column))) + end + def update_row(row, {:remove_if_exists, column, _type, _options}, attributes) do + if column in attributes, do: Tuple.delete_at(row, Enum.find_index(attributes, &(&1 == column))), else: row + end + def update_row(row, {:modify, column, type, options}, attributes) do + idx = Enum.find_index(attributes, &(&1 == column)) + :erlang.setelement(idx, row, cast(elem(row, idx), options[:from], type)) + end + def update_row(schema, fields, row) do + schema.__schema__(:fields) -- schema.__schema__(:primary_key) + |> Enum.reduce({1, row}, fn column, {idx, row} -> + if value = fields[column] do + {idx + 1, :erlang.setelement(idx, row, value)} + else + {idx + 1, row} + end + end) + |> elem(1) + end + + defp get_key([], [primary_key | _ ], fields), do: fields[primary_key] + defp get_key([primary_key], _columns, fields), do: fields[primary_key] + defp get_key(primary_keys, _columns, fields), do: Enum.reduce(primary_keys, {}, &Tuple.insert_at(&2, tuple_size(&2), fields[&1])) + + defp cast(value, _from, :list), do: '#{value}' + defp cast(value, _from, :string), do: "#{value}" + defp cast(value, _from, :integer) when is_binary(value) or is_list(value) do + case Integer.parse("#{value}") do + {integer, ""} -> integer + result -> raise "Could no parse #{value} to integer got: #{inspect(result)}" + end + end + defp cast(value, _from, :float) when is_binary(value) or is_list(value) do + case Float.parse("#{value}") do + {float, ""} -> float + result -> raise "Could no parse #{value} to float got: #{inspect(result)}" + end + end + + defp bindings(params, bindings \\ :erl_eval.new_bindings()) do + params + |> Enum.reduce({length(bindings), bindings}, fn v, {count, bindings} -> + count = count + 1 + {count, :erl_eval.add_binding(:"PARAM#{count}", v, bindings)} + end) + |> elem(1) + end + + @doc false + def coalesce(nil, nil), do: nil + def coalesce(nil, right), do: right + def coalesce(left, nil), do: left + def coalesce(left, _), do: left + + @doc false + def like(left, right) do + String.match?(left, Regex.compile!(right)) + end + + @doc false + def ilike(left, right) do + String.match?(left, Regex.compile!(right, [:caseless])) + end + + @doc false + def to_match_spec(adapter_meta, schema, filters) do + primary_key = schema.__schema__(:primary_key) + columns = schema.__schema__(:fields) -- primary_key + key = if length(primary_key) > 1, do: Enum.reduce(primary_key, {}, &Tuple.insert_at(&2, tuple_size(&2), filters[&1])), else: filters[hd(primary_key)] + row = if adapter_meta.adapter == EctoQLC.Adapters.Mnesia, do: {to_table(adapter_meta, schema.__schema__(:source), schema.__schema__(:prefix), []), key}, else: {key} + head = Enum.reduce(columns, row, fn column, head -> Tuple.insert_at(head, tuple_size(head), filters[column] || :"$#{tuple_size(head)}") end) + body = if adapter_meta.adapter == EctoQLC.Adapters.Mnesia, do: [{head}], else: [true] + conditions = [] + match_spec = [{head, conditions, body}] + case :ets.test_ms(head, match_spec) do + {:error, reason} -> raise RuntimeError, "invalid MatchSpec: #{inspect reason}" + _ -> match_spec + end + end + + @doc false + def prepare(%{adapter: adapter}, _query_meta, %Ecto.Query{lock: lock} = query, _params, _options) when not is_nil(lock) and adapter != EctoQLC.Adapters.Mnesia do + raise Ecto.QueryError, query: query, message: "#{List.last(Module.split(adapter))} adapter does not support locks" + end + def prepare(_adapter_meta, _query_meta, %Ecto.Query{with_ctes: with_ctes} = query, _params, _options) when not is_nil(with_ctes) do + raise Ecto.QueryError, query: query, message: "QLC adapter does not support CTE" + end + def prepare(_adapter_meta, _query_meta, %Ecto.Query{windows: windows} = query, _params, _options) when windows != [] do + raise Ecto.QueryError, query: query, message: "QLC adapter does not support windows" + end + def prepare(_adapter_meta, _query_meta, %Ecto.Query{combinations: combinations} = query, _params, _options) when combinations != [] do + raise Ecto.QueryError, query: query, message: "QLC adapter does not support combinations like: #{Enum.map_join(combinations, ", ", fn {k, _} -> k end)}" + end + def prepare(adapter_meta, query_meta, %Ecto.Query{} = query, params, options) do + if query.select && Enum.any?(query.select.fields, &has_fragment/1), do: raise Ecto.QueryError, query: query, message: "QLC adapter does not support fragemnt in select clauses" + if query.wheres |> Enum.flat_map(&(&1.subqueries)) |> Enum.any?(&has_parent_as/1), do: raise Ecto.QueryError, query: query, message: "QLC adapter does not support parent_as in a subquery's where clauses" + options = options(query, options) + prefix = options[:prefix] || query.from.prefix || query.prefix + order_bys = if query.distinct && Keyword.keyword?(query.distinct.expr), do: [query.distinct | query.order_bys], else: query.order_bys + {adapter_meta, query_meta, %{query | + order_bys: order_bys, + group_bys: group_bys(query), + updates: updates(adapter_meta, query, params), + offset: offset(query), + lock: lock(adapter_meta, query, prefix), + limit: limit(adapter_meta, query, prefix), + }, params, options} + end + + defp lock(_adapter_meta, %Ecto.Query{lock: nil}, _prefix), do: nil + defp lock(adapter_meta, %Ecto.Query{lock: "write", from: %{source: {source, _module}}}, prefix), do: {{:table, to_table(adapter_meta, source, prefix, [])}, :write} + defp lock(adapter_meta, %Ecto.Query{lock: "read", from: %{source: {source, _module}}}, prefix), do: {{:table, to_table(adapter_meta, source, prefix, [])}, :read} + defp lock(adapter_meta, %Ecto.Query{lock: "sticky_write", from: %{source: {source, _module}}}, prefix), do: {{:table, to_table(adapter_meta, source, prefix, [])}, :sticky_write} + defp lock(_adapter_meta, %Ecto.Query{lock: lock} = query, _prefix), do: raise Ecto.QueryError, query: query, message: "Unsupported lock: #{inspect lock}, supported locks: write, read, stickey_write" + + defp offset(%Ecto.Query{offset: %{expr: expr}}), do: expr + defp offset(%Ecto.Query{}), do: 0 + + defp limit(adapter_meta, %Ecto.Query{limit: nil, from: %{source: {source, _module}}}, prefix) do + mod = :"#{String.downcase(List.last(Module.split(adapter_meta.adapter)))}" + fun = if mod == :mnesia, do: :table_info, else: :info + case apply(mod, fun, [to_table(adapter_meta, source, prefix, adapter_meta.opts), :size]) do + :undefined -> 1 + 0 -> 500 + limit -> limit + end + end + defp limit(_adapter_meta, %Ecto.Query{limit: %{expr: expr}}, _prefix), do: expr + defp limit(_adapter_meta, %Ecto.Query{limit: limit}, _prefix), do: limit || 500 + + defp group_bys(%Ecto.Query{group_bys: group_bys, sources: sources}) do + Enum.reduce(group_bys, [], fn + %Ecto.Query.QueryExpr{expr: [{:selected_as, [], [:date]}]}, acc -> acc + + %Ecto.Query.QueryExpr{expr: expr}, acc -> + acc ++ for {{:., _, [{:&, _, [idx]}, column]}, _, _} <- expr do + module = elem(elem(sources, idx), 1) + {column, get_index(%{adapter: nil}, column, module.__schema__(:fields), module.__schema__(:primary_key))} + end + end) + end + + defp updates(_adapter_meta, %Ecto.Query{updates: [] = updates}, _params), do: updates + defp updates(adapter_meta, %Ecto.Query{from: %{source: {_source, module}}, updates: updates}, params) do + addition = if adapter_meta.adapter == EctoQLC.Adapters.Mnesia, do: 2, else: 1 + columns = module.__schema__(:fields) + Enum.flat_map(updates, fn %Ecto.Query.QueryExpr{expr: [set: set]} -> + Enum.map(set, fn + {column, {:^, _, [idx]}} -> {column, {Enum.find_index(columns, &(&1 == column)) + addition, Enum.at(params, idx)}} + {column, value} -> {column, {Enum.find_index(columns, &(&1 == column)) + addition, value}} + end) + end) + end + + # Subqurries are currently not allowed to have parent_as due to having to plan which query to execute first, an example would be if the subquery would have to match on the FK from the main query or to evaluate columns from the main query. + # in that case we would have to execute the main query before we could evalute the subquery for then filtering the main query based on the result of the subquery + defp has_parent_as(fields, acc \\ false) + defp has_parent_as(%Ecto.SubQuery{query: query}, acc) do + has_parent_as(query.select.fields, acc) || if Enum.find(query.wheres, &has_parent_as(&1.expr, acc)), do: true, else: false + end + defp has_parent_as({_op, _meta, children}, acc), do: has_parent_as(children, acc) + defp has_parent_as(nil, acc), do: acc + defp has_parent_as([], acc), do: acc + defp has_parent_as([{{:., _, [{:parent_as, _, _}, _]}, _, _} | _fields], _acc), do: true + defp has_parent_as([{_op, _meta, children} | fields], acc), do: has_parent_as(children, acc) || has_parent_as(fields, acc) + defp has_parent_as([_ | fields], acc), do: has_parent_as(fields, acc) + + defp has_fragment(fields, acc \\ false) + defp has_fragment(%Ecto.SubQuery{query: query}, acc), do: has_fragment(query.select.fields, acc) + defp has_fragment(%Ecto.Query.Tagged{value: value}, acc), do: has_fragment(value, acc) + defp has_fragment(nil, acc), do: acc + defp has_fragment([], acc), do: acc + defp has_fragment({:fragment, _meta, _children}, _acc), do: true + defp has_fragment([{{:., _, [{:fragment, _, _}, _]}, _, _} | _fields], _acc), do: true + defp has_fragment({_op, _meta, children}, acc), do: has_fragment(children, acc) + defp has_fragment({_op, children}, acc), do: has_fragment(children, acc) + defp has_fragment([{:fragment, _meta, _children} | _fields], _acc), do: true + defp has_fragment([{_op, _meta, children} | fields], acc), do: has_fragment(children, acc) || has_fragment(fields, acc) + defp has_fragment([_ | fields], acc), do: has_fragment(fields, acc) + + defp has_aggregates(fields, acc \\ false) + defp has_aggregates(nil, acc), do: acc + defp has_aggregates([], acc), do: acc + defp has_aggregates([{_, {op, _meta, _children}} | _fields], false) when op in @aggregates, do: true + defp has_aggregates([{op, _meta, _children} | _fields], false) when op in @aggregates, do: true + defp has_aggregates([{_op, _meta, children} | fields], _acc) do + if has_aggregates(children), do: true, else: has_aggregates(fields) + end + defp has_aggregates([_ | fields], acc), do: has_aggregates(fields, acc) + + defp options(%Ecto.Query{} = query, options) do + unique = unique?(query) + if options[:unique] && unique, do: raise Ecto.QueryError, query: query, message: "QLC does not support mixing distinct in queries and unique options" + options + # |> Keyword.put_new(:unique, unique) + |> Enum.take_while(fn + {k, _v} -> k in ~w[max_lookup cache join lookup unique]a + k -> k in ~w[cache unique]a + end) + |> Enum.map(fn + {:join, join} when join not in ~w[any merge lookup nested_loop]a -> raise Ecto.QueryError, query: query, message: "QLC only supports: :any, :merge, :lookup or :nested_loop joins, got: `#{inspect(join)}`" + x -> x + end) + end + + defp unique?(%Ecto.Query{distinct: %Ecto.Query.QueryExpr{}}), do: false + defp unique?(%Ecto.Query{}), do: false + + + defp to_qlc(:subquery = operator, query), do: '[#{to_expression(operator, query)} || #{to_qualifiers(query)}]' + defp to_qlc(operator, query), do: '[#{to_expression(operator, query)} || #{to_qualifiers(query)}].' + + defp to_expression(operator, {adapter_meta, _query_meta, query, _params, options} = q) do + mod = :"#{String.downcase(List.last(Module.split(adapter_meta.adapter)))}" + count = tuple_size(query.sources) - 1 + if operator in ~w[delete_all update_all]a do + if query.select do + '{#{Enum.map_join(query.select.fields, ", ", &expr(&1, q))}}' + else + '#{Enum.map_join(0..count, ", ", fn idx -> "#{String.upcase(String.first(elem(elem(query.sources, idx), 0)))}#{idx}" end)}' + end + else + '{#{Enum.map_join(0..count, ", ", fn idx -> + case elem(query.sources, idx) do + {<> = source, nil, prefix} -> + table = to_table(adapter_meta, source, prefix, options) + [primary_keys | fields] = if mod == :mnesia and table in :mnesia.system_info(:tables), do: :mnesia.table_info(table, :attributes), else: [:version, :inserted_at] + primary_keys = if source == "schema_migrations", do: [:version], else: [primary_keys] + Enum.map_join(fields, ", ", &to_element(adapter_meta, &1, fields, primary_keys, "#{String.upcase(s)}#{idx}")) + + {<>, module, _} -> + Enum.map_join(module.__schema__(:fields), ", ", &to_element(adapter_meta, &1, module.__schema__(:fields), module.__schema__(:primary_key), "#{String.upcase(s)}#{idx}")) + + %{query: %{sources: {{<>, module, _}}}} -> + Enum.map_join(module.__schema__(:fields), ", ", &to_element(adapter_meta, &1, module.__schema__(:fields), module.__schema__(:primary_key), "#{String.upcase(s)}#{idx}")) + + %{query: query} -> + {<>, module, _} = elem(query.sources, idx) + Enum.map_join(module.__schema__(:fields), ", ", &to_element(adapter_meta, &1, module.__schema__(:fields), module.__schema__(:primary_key), "#{String.upcase(s)}#{idx}")) + + end + end)}}' + end + end + + defp to_qualifiers({adapter_meta, _query_meta, query, _params, options} = q) do + prefix = options[:prefix] || query.from.prefix || query.prefix + options = options(query, options) + count = tuple_size(query.sources) - 1 + mod = :"#{String.downcase(List.last(Module.split(adapter_meta.adapter)))}" + options = Keyword.merge(options, [n_objects: query.limit]) + take = if mod == :mnesia, do: ~w[lock traverse n_objects]a, else: ~w[traverse n_objects]a + table_opts = Keyword.take(options, take) + generators = Enum.map_join(0..count, ", ", fn idx -> + case elem(query.sources, idx) do + {<> = source, _module, _} -> + "#{String.upcase(s)}#{idx} <- #{mod}:table('#{to_table(adapter_meta, source, prefix, options)}', [#{Enum.map_join(table_opts, ", ", fn {k, v} -> "{#{k}, #{v}}" end)}])" + %{query: %{sources: {{<> = source, _module, _}}}} -> + "#{String.upcase(s)}#{idx} <- #{mod}:table('#{to_table(adapter_meta, source, prefix, options)}', [#{Enum.map_join(table_opts, ", ", fn {k, v} -> "{#{k}, #{v}}" end)}])" + + %{query: %{sources: sources}} -> + {<> = source, _module, _} = elem(sources, idx) + "#{String.upcase(s)}#{idx} <- #{mod}:table('#{to_table(adapter_meta, source, prefix, options)}', [#{Enum.map_join(table_opts, ", ", fn {k, v} -> "{#{k}, #{v}}" end)}])" + end + end) + filters = Enum.map_join(query.joins, " andalso ", fn %Ecto.Query.JoinExpr{prefix: _prefix, on: %Ecto.Query.QueryExpr{expr: expr}} -> expr(expr, q) end) + guards = wheres(query.wheres, q, []) + cond do + filters == "" and guards == "" -> generators + guards == "" -> "#{generators}, #{filters}" + filters == "" -> "#{generators}, #{guards}" + true -> "#{generators}, #{filters}, #{guards}" + end + end + + defp wheres([], _query, [" andalso "]), do: "" + defp wheres([], _query, acc), do: to_string(acc) + defp wheres([%Ecto.Query.BooleanExpr{expr: expr} | rest], query, [] = acc), do: wheres(rest, query, [expr(expr, query) | acc]) + defp wheres([%Ecto.Query.BooleanExpr{op: op, expr: expr} | rest], query, acc), do: wheres(rest, query, [acc] ++ "#{to_erlang_term(op)} #{expr(expr, query)}") + + defp to_query_handle(_operator, {_adapter_meta, _query_meta, query, params, options}, qlc) do + Enum.reduce(query.order_bys, :qlc.string_to_handle(qlc, options, bindings(params)), fn %{expr: expr}, qh -> + Enum.reduce(expr, qh, fn {k, {{:., _, [{:&, _, [idx]}, column]}, _, _}}, qh when k in ~w[asc desc]a -> + module = elem(elem(query.sources, idx), 1) + columns = module.__schema__(:fields) + primary_key = module.__schema__(:primary_key) + + key = case primary_key do + [_primary_key] -> + Enum.find_index(columns, &(&1 == column)) + 1 + primary_keys -> + if idx = Enum.find_index(columns -- primary_keys, &(&1 == column)), do: idx + length(primary_keys), else: 1 + end + :qlc.keysort(key, qh, order: to_order(query, k)) + + {k, _v}, qh -> + :qlc.sort(qh, order: to_order(query, k)) + end) + end) + end + + defp to_order(_query, :asc), do: :ascending + defp to_order(_query, :desc), do: :descending + defp to_order(query, order), do: raise Ecto.QueryError, query: query, message: "QLC does not support ordering by: #{inspect order}" + + defp expr({_, _,[{{_, _, [{:parent_as, _, _}, _]}, _, _}, _]}, _query), do: "" + defp expr({_, _,[_, {{_, _, [{:parent_as, _, _}, _]}, _, _}]}, _query), do: "" + defp expr({:exists, _, [%Ecto.SubQuery{} = subquery]}, {adapter_meta, _query_meta, query, params, options}) do + execute(adapter_meta, query, {:all, subquery.query}, params, options) + |> elem(1) + |> List.flatten() + |> Enum.empty?() + |> Kernel.not() + |> to_string() + end + defp expr({op, _, [left, {o, _, [%Ecto.SubQuery{} = subquery]}]}, {adapter_meta, _query_meta, query, params, options} = q) when o in ~w[all any]a do + values = + execute(adapter_meta, query, {:all, subquery.query}, params, options) + |> elem(1) + |> List.flatten() + + "apply('Elixir.Enum', '#{o}?', [#{expr(values, q)}, fun(Val) -> #{expr(left, q)} #{to_erlang_term(op)} Val end])" + end + defp expr({:sum, _, [expr]}, query) do + "{sum, #{expr(expr, query)}}" + end + defp expr({:fragment, _, fragemnt}, query) do + fragemnt + |> Enum.reduce([], fn + {:raw, raw}, acc -> acc ++ [raw] + {:expr, {expr, _, _}}, acc -> acc ++ [expr(expr, query)] + end) + |> to_string() + end + defp expr({:not = operator, mdl, [{:in, mdr, [left, %Ecto.SubQuery{} = subquery]}]}, {adapter_meta, _query_meta, query, params, options} = q), do: expr({operator, mdl, [{:in, mdr, [left, elem(execute(adapter_meta, query, {:all, subquery.query}, params, options), 1)]}]}, q) + defp expr({:not = operator, _, [{:in, _, _} = expr]}, query), do: unroll(expr, query, operator) + defp expr({:not = operator, [], [{:is_nil, _, [{{:., _, [{:&, _, [index]}, column]}, _, _}]}]}, {adapter_meta, _query_meta, query, _params, _options}) do + {<>, module, _prefix} = elem(query.sources, index) + "#{to_element(adapter_meta, column, module.__schema__(:fields), module.__schema__(:primary_key), "#{String.upcase(s)}#{index}")} #{to_erlang_term(operator)} nil" + end + defp expr({:not, _, [expr]}, query) do + "(#{expr(expr, query)}) == false" + end + defp expr({operator, _, [{l, _, _} = left, {r, _, _} = right]}, query) when operator in ~w[> < >= <= == === !=]a and l == :datetime_add or r == :datetime_add do + case operator do + :> -> "apply('Elixir.DateTime', compare, [#{expr(left, query)}, #{expr(right, query)}]) == gt" + :< -> "apply('Elixir.DateTime', compare, [#{expr(left, query)}, #{expr(right, query)}]) == lt" + :>= -> "apply('Elixir.DateTime', compare, [#{expr(left, query)}, #{expr(right, query)}]) == gt or eq" + :<= -> "apply('Elixir.DateTime', compare, [#{expr(left, query)}, #{expr(right, query)}]) == lt or eq" + :== -> "apply('Elixir.DateTime', compare, [#{expr(left, query)}, #{expr(right, query)}]) == eq" + :=== -> "apply('Elixir.DateTime', compare, [#{expr(left, query)}, #{expr(right, query)}]) == eq" + :!= -> "apply('Elixir.DateTime', compare, [#{expr(left, query)}, #{expr(right, query)}]) /= eq" + end + end + defp expr({operator, _, [left, right]}, query) when operator in ~w[not or and > < >= <= == === != + - * /]a do + "#{expr(left, query)} #{to_erlang_term(operator)} #{expr(right, query)}" + end + defp expr(%Ecto.Query.Tagged{value: expr}, query), do: expr(expr, query) + defp expr({:json_extract_path, _, [left, right]}, query) do + "apply('Elixir.EctoQLC.Adapters.QLC', get_in, [#{expr(left, query)}, #{expr(right, query)}])" + end + defp expr({:like, _, [left, match]}, query) do + "apply('Elixir.EctoQLC.Adapters.QLC', 'like', [#{expr(left, query)}, #{expr(match, query)}]) == true" + end + defp expr({:ilike, _, [left, match]}, query) do + "apply('Elixir.EctoQLC.Adapters.QLC', 'ilike', [#{expr(left, query)}, #{expr(match, query)}]) == true" + end + defp expr({:datetime_add, _, [left, right, interval]}, query) do + "apply('Elixir.DateTime', add, [#{expr(left, query)}, #{interval_to_seconds(interval) * expr(right, query)}])" + end + defp expr({:date_add, _, [left, right, interval]}, query) do + "apply('Elixir.Date', add, [#{expr(left, query)}, #{round(interval_to_days(interval) * expr(right, query))}])" + end + defp expr({:count, _, [{expr, [], []}, :distinct]}, query), do: expr(expr, query) + defp expr({:count, _, [{expr, [], []}]}, query), do: expr(expr, query) + defp expr({:coalesce, _, [left, right]}, query) do + "apply('Elixir.EctoQLC.Adapters.QLC', coalesce, [#{expr(left, query)}, #{expr(right, query)}])" + end + defp expr({:parent_as, _, [key]}, {_adapter_meta, query_meta, _query, _params, _options}), do: query_meta.aliases[key] + defp expr({:., _, [{:&, _, [idx]}, column]}, {adapter_meta, _query_meta, query, _params, options}) do + case elem(query.sources, idx) do + {<> = source, nil, prefix} when adapter_meta.adapter == EctoQLC.Adapters.Mnesia -> + attributes = :mnesia.table_info(to_table(adapter_meta, source, prefix, options), :attributes) + to_element(adapter_meta, column, tl(attributes), [hd(attributes)], "#{String.upcase(s)}#{idx}") + + {<>, module, _prefix} -> + to_element(adapter_meta, column, module.__schema__(:fields), module.__schema__(:primary_key), "#{String.upcase(s)}#{idx}") + end + end + defp expr({:., _, [{:parent_as, _, [key]}, column]}, {adapter_meta, query_meta, _query, _params, _options}) do + idx = query_meta.aliases[key] + {<>, module, _prefix} = elem(query_meta.sources, query_meta.aliases[key]) + to_element(adapter_meta, column, module.__schema__(:fields), module.__schema__(:primary_key), "#{String.upcase(s)}#{idx}") + end + defp expr({:in, metadata, [left, %Ecto.SubQuery{} = subquery]}, {adapter_meta, _query_meta, query, params, options} = q), do: expr({:in, metadata, [left, elem(execute(adapter_meta, query, {:all, subquery.query}, params, options), 1)]}, q) + defp expr({:in, _, _} = expr, query), do: unroll(expr, query, :==) + defp expr({:is_nil, _, [expr]}, query), do: "#{expr(expr, query)} == nil" + defp expr({:^, _, [ix]}, _query), do: "PARAM#{ix + 1}" + defp expr({expr, [], []}, query), do: expr(expr, query) + defp expr(expr, _query), do: to_erlang_term(expr) + + defp expr({:filter, _, [expr, filter]}, group, query) do + expr(expr, Enum.filter(group, &expr(filter, &1, query)), query) + end + defp expr({:count, _, []}, group, _query), do: length(group) + defp expr({:count, _, [expr, :distinct]}, group, query) do + group + |> Enum.uniq_by(&expr(expr, &1, query)) + |> Enum.count() + end + defp expr({:avg, _, [expr]}, group, query) do + case Enum.map(group, &expr(expr, &1, query)) do + [] -> 0 + values -> Enum.sum(values) / length(values) + end + end + defp expr({op, _, [expr]}, group, query) when op in @aggregates do + case Enum.map(group, &expr(expr, &1, query)) do + [] -> 0 + values -> apply(Enum, op, [values]) + end + end + defp expr({:in, _, [left, right]}, row, query), do: expr(left, row, query) in expr(right, row, query) + defp expr({:and, _, [left, right]}, row, query), do: expr(left, row, query) and expr(right, row, query) + defp expr({:or, _, [left, right]}, row, query), do: expr(left, row, query) or expr(right, row, query) + defp expr({op, _, [left, right]}, row, query) when op in @operators, do: apply(Kernel, op, [expr(left, row, query), expr(right, row, query)]) + defp expr({op, _, _} = expr, [row | _] = group, query) when op not in @aggregates and is_tuple(row), do: expr(expr, List.first(group), query) + defp expr({:parent_as, _, [key]}, _row, {_adapter_meta, query_meta, _query, _params, _options}), do: query_meta.aliases[key] + defp expr({:&, _, [idx]}, _row, _query), do: idx + defp expr({{:., _, [_expr, _column]}, _, _} = expr, row, {adapter_meta, query_meta, %{query: query}, params, options}), do: expr(expr, row, {adapter_meta, query_meta, query, params, options}) + defp expr({{:., _, [expr, column]}, _, _}, row, {_adapter_meta, _query_meta, query, _params, _options} = q) do + idx = expr(expr, row, q) + case elem(query.sources, idx) do + %{query: query} -> + {_source, module, _prefix} = elem(query.sources, idx) + + base = if idx > 0, do: Enum.reduce(0..idx - 1, 0, fn idx, acc -> + {_source, module, _prefix} = elem(query.sources, idx) + length(module.__schema__(:fields)) + acc + end), else: 0 + elem(row, base + Enum.find_index(module.__schema__(:fields), &(&1 == column))) + + {"schema_migrations", nil, _prefix} -> elem(row, 0) + + {_source, module, _prefix} -> + base = if idx > 0, do: Enum.reduce(0..idx - 1, 0, fn idx, acc -> + {_source, module, _prefix} = elem(query.sources, idx) + length(module.__schema__(:fields)) + acc + end), else: 0 + elem(row, base + Enum.find_index(module.__schema__(:fields), &(&1 == column))) + end + end + defp expr({:^, _, [idx]}, _row, {_adapter_meta, _query_meta, _query, params, _options}), do: Enum.at(params, idx, idx) + defp expr({:like, _, [left, right]}, row, query), do: String.match?(expr(left, row, query), Regex.compile!(expr(right, row, query))) + defp expr({:ilike, _, [left, right]}, row, query), do: String.match?(expr(left, row, query), Regex.compile!(expr(right, row, query), [:caseless])) + defp expr({:json_extract_path, _, [left, right]}, row, query), do: Enum.reduce(expr(right, row, query), expr(left, row, query), fn + k, %_{} = struct -> Map.get(struct, String.to_existing_atom(k)) + k, data -> data[k] || data[String.to_existing_atom(k)] + end) + defp expr({operator, md, [%Ecto.Query.Tagged{value: expr}, right, interval]}, row, query) when operator in ~w[datetime_add date_add]a do + expr({operator, md, [expr, right, interval]}, row, query) + end + defp expr({operator, md, [left, %Ecto.Query.Tagged{value: expr}, interval]}, row, query) when operator in ~w[datetime_add date_add]a do + expr({operator, md, [left, expr, interval]}, row, query) + end + defp expr({:datetime_add, _, [left, right, interval]}, row, query) do + DateTime.add(expr(left, row, query), interval_to_seconds(interval) * expr(right, row, query), :second) + end + defp expr({:date_add, _, [left, right, interval]}, row, query) do + Date.add(expr(left, row, query), round(interval_to_days(interval) * expr(right, row, query))) + end + defp expr({op, _, [left, right]}, row, query), do: apply(__MODULE__, op, [expr(left, row, query), expr(right, row, query)]) + defp expr(%Ecto.Query.Tagged{value: expr}, row, query), do: expr(expr, row, query) + defp expr({_selected_as, expr}, row, query), do: expr(expr, row, query) + defp expr(%Ecto.SubQuery{} = subquery, _row, {adapter_meta, _query_meta, query, params, options}), do: execute(adapter_meta, query, {:all, subquery.query}, params, options) |> elem(1) |> List.flatten() + defp expr(expr, _row, _query), do: expr + + @doc false + def select(rows, :all, {adapter_meta, _query_meta, query, _params, _options} = q) do + if query.select && has_aggregates(query.select.fields) do + rows = rows + |> Enum.group_by(&Enum.map_join(query.group_bys, ":", fn {_column, idx} -> :erlang.element(idx, &1) end)) + |> Map.values() + |> Enum.map(fn group -> Enum.map(query.select.fields, &expr(&1, group, q)) end) + |> distinct(query, adapter_meta) + |> offset(query, 0) + |> Enum.take(query.limit) + + {length(rows), rows} + else + rows = rows + |> Enum.map(fn row -> + if query.select do + Enum.map(query.select.fields, &expr(&1, row, q)) + else + row + end + end) + |> distinct(query, adapter_meta) + |> offset(query, 0) + |> Enum.take(query.limit) + + {length(rows), rows} + end + end + def select(rows, :delete_all, {adapter_meta, _query_meta, %Ecto.Query{from: %{source: {source, _module}}} = query, _params, options}) do + mod = :"#{String.downcase(List.last(Module.split(adapter_meta.adapter)))}" + prefix = options[:prefix] || query.from.prefix || query.prefix + table = to_table(adapter_meta, source, prefix, options) + rows + |> distinct(query, adapter_meta) + |> offset(query, 0) + |> Enum.take(query.limit) + |> Enum.reduce({0, nil}, fn row, {count, acc} -> + args = if mod == :mnesia, do: [:erlang.setelement(1, row, table)], else: [table, row] + if apply(mod, :delete_object, args) do + {count + 1, acc} + else + {count, acc} + end + end) + end + def select(rows, :update_all, {%{adapter: EctoQLC.Adapters.Mnesia} = adapter_meta, _query_meta, %Ecto.Query{from: %{source: {source, _module}}} = query, _params, options}) do + {:atomic, value} = :mnesia.transaction(fn -> + table = to_table(adapter_meta, source, query.from.prefix || query.prefix, options) + rows + |> distinct(query, adapter_meta) + |> offset(query, 0) + |> Enum.take(query.limit) + |> Enum.reduce({0, nil}, fn row, {count, acc} -> + if _row = update(:mnesia, table, row, query) do + {count + 1, acc} + else + {count, acc} + end + end) + end) + value + end + def select(rows, :update_all, {adapter_meta, _query_meta, %Ecto.Query{from: %{source: {source, _module}}} = query, _params, options}) do + mod = :"#{String.downcase(List.last(Module.split(adapter_meta.adapter)))}" + table = to_table(adapter_meta, source, query.from.prefix || query.prefix, options) + rows + |> distinct(query, adapter_meta) + |> offset(query, 0) + |> Enum.take(query.limit) + |> Enum.reduce({0, nil}, fn row, {count, acc} -> + if _row = update(mod, table, row, query) do + {count + 1, acc} + else + {count, acc} + end + end) + end + + @doc false + def get_in(data, keys) do + Enum.reduce(keys, data, fn + k, %_{} = struct -> Map.get(struct, String.to_existing_atom(k)) + k, data -> data[k] || data[String.to_existing_atom(k)] + end) + end + + defp update(:ets = mod, table, row, query), do: mod.update_element(table, elem(row, 0), Keyword.values(query.updates)) + defp update(:mnesia = mod, _table, row, query), do: mod.write(Enum.reduce(Keyword.values(query.updates), row, fn {idx, value}, row -> :erlang.setelement(idx, row, value) end)) + defp update(:dets = mod, table, row, query), do: mod.insert(table, Enum.reduce(Keyword.values(query.updates), row, fn {idx, value}, row -> :erlang.setelement(idx, row, value) end)) + + @doc false + def delete(mod, adapter_meta, %{source: source, prefix: prefix, schema: schema}, filters, options) when mod in ~w[dets ets]a do + table = to_table(adapter_meta, source, prefix, options) + ms = to_match_spec(adapter_meta, schema, filters) + {query_time, count} = :timer.tc(mod, :select_delete, [table, ms]) + if 0 < count do + {:ok, []} + else + {:error, :stale} + end + |> log(source, "DELETE #{inspect source} #{inspect filters} MATCHSPEC #{inspect ms}", query_time, 0, 0, 0, :delete_all, adapter_meta.telemetry, filters, options ++ adapter_meta.opts) + end + def delete(:mnesia = mod, adapter_meta, %{source: source, prefix: prefix, schema: schema}, filters, options) do + ms = to_match_spec(adapter_meta, schema, filters) + fun = fn -> + with table <- to_table(adapter_meta, source, prefix, options), + [row] <- mod.select(table, ms), + :ok <- mod.delete_object(row) do + {:ok, []} + else + _ -> {:error, :stale} + end + end + {query_time, {:atomic, result}} = :timer.tc(mod, :transaction, [fun]) + log(result, source, "DELETE #{inspect source} #{inspect filters} MATCHSPEC #{inspect ms}", query_time, 0, 0, 0, :delete_all, adapter_meta.telemetry, filters, options ++ adapter_meta.opts) + end + + @doc false + def insert(:dets = mod, adapter_meta, %{schema: schema, source: source, prefix: prefix}, fields, _on_conflict, returning, options) do + table = to_table(adapter_meta, source, prefix, options) + primary_key = schema.__schema__(:primary_key) + columns = schema.__schema__(:fields) + key = get_key(primary_key, columns, fields) + record = to_record({key}, columns, primary_key, fields) + file = if dir = Application.get_env(mod, :dir), do: Path.join(dir, "#{table}"), else: table + options = Keyword.take(Keyword.merge([file: '#{file}'], options), ~w[access auto_save estimated_no_objects file max_no_slots min_no_slots keypos ram_file repair type]a) + {query_time, result} = with {:ok, ^table} <- mod.open_file(table, options), + {query_time, true} <- :timer.tc(mod, :insert_new, [table, record]), + :ok <- mod.sync(table) do + {query_time, {:ok, Enum.map(returning, &fields[&1])}} + else + {query_time, false} when is_integer(query_time) -> {query_time, {:invalid, [unique: "primary_key"]}} + end + log(result, source, "INSERT INTO #{inspect source} RETURNING #{inspect returning} #{inspect fields}", query_time, 0, 0, 0, :insert, adapter_meta.telemetry, fields, options ++ adapter_meta.opts) + end + def insert(:ets = mod, adapter_meta, %{schema: schema, source: source, prefix: prefix}, fields, _on_conflict, returning, options) do + table = to_table(adapter_meta, source, prefix, options) + primary_key = schema.__schema__(:primary_key) + columns = schema.__schema__(:fields) + key = get_key(primary_key, columns, fields) + record = to_record({key}, columns, primary_key, fields) + + {query_time, result} = case :timer.tc(mod, :insert_new, [table, record]) do + {query_time, true} -> {query_time, {:ok, Enum.map(returning, &fields[&1])}} + {query_time, false} -> {query_time, {:invalid, [unique: "primary_key"]}} + end + log(result, source, "INSERT INTO #{inspect source} RETURNING #{inspect returning} #{inspect fields}", query_time, 0, 0, 0, :insert, adapter_meta.telemetry, fields, options ++ adapter_meta.opts) + end + def insert(:mnesia = mod, adapter_meta, %{schema: schema, source: source, prefix: prefix}, fields, _on_conflict, returning, options) do + table = to_table(adapter_meta, source, prefix, options) + primary_key = schema.__schema__(:primary_key) + columns = schema.__schema__(:fields) + key = get_key(primary_key, columns, fields) + record = to_record({table, key}, columns, primary_key, fields) + fun = fn -> + with [] <- mod.wread({table, key}), + :ok <- mod.write(record) do + {:ok, []} + else + [_record] -> {:invalid, [unique: "primary_key"]} + + {:aborted, {:no_exists, ^table}} -> {:invalid, [no_exists: table]} + + {:aborted, {:bad_type, _}} -> {:invalid, [bad_type: record]} + end + end + + {query_time, result} = case :timer.tc(mod, :transaction, [fun]) do + {query_time, {:aborted, {:bad_type, ^record}}} -> {query_time, {:invalid, [bad_type: inspect(record)]}} + {query_time, {:atomic, :ok}} -> {query_time, {:ok, Enum.map(fields, &Enum.map(returning, fn k -> &1[k] end))}} + {query_time, {:atomic, result}} -> {query_time, result} + end + + log(result, source, "INSERT INTO #{inspect source} RETURNING #{inspect returning} #{inspect fields}", query_time, 0, 0, 0, :insert, adapter_meta.telemetry, fields, options ++ adapter_meta.opts) + end + + @doc false + def insert_all(:dets = mod, adapter_meta, %{schema: schema, source: source, prefix: prefix}, _header, rows, _on_conflict, returning, _placeholders, options) do + table = to_table(adapter_meta, source, prefix, options) + {query_time, rows} = if is_list(rows), do: {0, rows}, else: :timer.tc(adapter_meta.repo, :all, [rows]) + primary_key = schema.__schema__(:primary_key) + columns = schema.__schema__(:fields) + records = for row <- rows do + key = get_key(primary_key, columns, row) + to_record({key}, columns, primary_key, row) + end + file = if dir = Application.get_env(mod, :dir), do: Path.join(dir, "#{table}"), else: table + options = Keyword.take(Keyword.merge([file: '#{file}'], options), ~w[access auto_save estimated_no_objects file max_no_slots min_no_slots keypos ram_file repair type]a) + + {query_time, result} = with {open_time, {:ok, ^table}} <- :timer.tc(mod, :open_file, [table, options]), + {insert_time, true} <- :timer.tc(mod, :insert_new, [table, records]), + {sync_time, :ok} <- :timer.tc(mod, :sync, [table]) do + result = unless returning == [], do: Enum.map(rows, &Enum.map(returning, fn k -> &1[k] end)) + {query_time + open_time + insert_time + sync_time, {length(records), result}} + else + {insert_time, false} when is_integer(insert_time) -> + {insert_time, {0, nil}} + end + + + log(result, source, "INSERT INTO #{inspect source} RETURNING #{inspect returning} #{inspect rows}", query_time, 0, 0, 0, :insert, adapter_meta.telemetry, rows, options ++ adapter_meta.opts) + end + def insert_all(:ets = mod, adapter_meta, %{schema: schema, source: source, prefix: prefix}, _header, rows, _on_conflict, returning, _placeholders, options) do + table = to_table(adapter_meta, source, prefix, options) + {query_time, rows} = if is_list(rows), do: {0, rows}, else: :timer.tc(adapter_meta.repo, :all, [rows]) + primary_key = schema.__schema__(:primary_key) + columns = schema.__schema__(:fields) + records = for row <- rows, do: to_record({get_key(primary_key, columns, row)}, columns, primary_key, row) + result = unless returning == [], do: Enum.map(rows, &Enum.map(returning, fn k -> &1[k] end)) + {insert_time, value} = :timer.tc(mod, :insert_new, [table, records]) + query_time = query_time + insert_time + if value do + {length(records), result} + else + {0, result} + end + |> log(source, "INSERT INTO #{inspect source} RETURNING #{inspect returning} #{inspect rows}", query_time, 0, 0, 0, :insert, adapter_meta.telemetry, rows, options ++ adapter_meta.opts) + end + def insert_all(:mnesia = mod, adapter_meta, %{schema: schema, source: source, prefix: prefix}, _header, rows, _on_conflict, returning, _placeholders, options) do + table = to_table(adapter_meta, source, prefix, options) + {query_time, rows} = if is_list(rows), do: {0, rows}, else: :timer.tc(adapter_meta.repo, :all, [rows]) + primary_key = schema.__schema__(:primary_key) + columns = schema.__schema__(:fields) + {:atomic, result} = mod.transaction(fn -> + Enum.reduce(rows, {0, []}, fn row, {count, acc} -> + key = get_key(primary_key, columns, row) + record = to_record({table, key}, columns, primary_key, row) + {insert_time, value} = :timer.tc(mod, :write, [record]) + query_time = query_time + insert_time + if value == :ok do + acc = if returning != [], do: acc ++ [Enum.map(returning, fn k -> row[k] end)] + log({count + 1, acc}, source, "INSERT INTO #{inspect source} RETURNING #{inspect returning} #{inspect row}", query_time, 0, 0, 0, :insert, adapter_meta.telemetry, rows, options ++ adapter_meta.opts) + else + {count, acc} + end + end) + end) + result + end + + @doc false + def update(:dets = mod, adapter_meta, %{schema: schema, source: source, prefix: prefix}, fields, params, returning, options) do + table = to_table(adapter_meta, source, prefix, options) + key = to_key(params) + file = if dir = Application.get_env(mod, :dir), do: Path.join(dir, "#{table}"), else: table + options = Keyword.take(Keyword.merge([file: '#{file}'], options), ~w[access auto_save estimated_no_objects file max_no_slots min_no_slots keypos ram_file repair type]a) + {query_time, result} = with {:ok, ^table} <- mod.open_file(table, options), + [row] <- mod.lookup(table, key), + {query_time, :ok} <- :timer.tc(mod, :insert, [table, update_row(schema, fields, row)]), + :ok <- mod.sync(table), + {decode_time, row} <- :timer.tc(Enum, :map, [returning, &fields[&1]]) do + {query_time + decode_time, {:ok, row}} + else + {query_time, _error} when is_integer(query_time) -> {query_time, {:invalid, [unique: "primary_key"]}} + end + log(result, source, "UPDATE INTO #{inspect source} RETURNING #{inspect returning} #{inspect fields}", query_time, 0, 0, 0, :update_all, adapter_meta.telemetry, params, options ++ adapter_meta.opts) + end + def update(:ets = mod, adapter_meta, %{schema: schema, source: source, prefix: prefix}, fields, params, returning, options) do + key = to_key(params) + {records, _count} = Enum.reduce(schema.__schema__(:fields) -- schema.__schema__(:primary_key), {[], 1}, fn k, {acc, count} -> if Keyword.has_key?(fields, k), do: {[{count, fields[k]} | acc], count + 1}, else: {acc, count + 1} end) + table = to_table(adapter_meta, source, prefix, options) + {query_time, result} = with {query_time, true} <- :timer.tc(mod, :update_element, [table, key, records]), + {decode_time, row} <- :timer.tc(Enum, :map, [returning, &fields[&1]]) do + {query_time + decode_time, {:ok, row}} + else + {query_time, _} -> {query_time, {:invalid, [unique: "primary_key"]}} + end + log(result, source, "UPDATE INTO #{inspect source} RETURNING #{inspect returning} #{inspect fields}", query_time, 0, 0, 0, :update_all, adapter_meta.telemetry, params, options ++ adapter_meta.opts) + end + def update(:mnesia = mod, adapter_meta, %{schema: schema, source: source, prefix: prefix}, fields, params, returning, options) do + table = to_table(adapter_meta, source, prefix, options) + key = to_key(params) + fun = fn -> + with [row] <- mod.wread({table, key}), + :ok <- mod.write(update_row(schema, fields, row)) do + {:ok, []} + else + {:error, _resaon} -> + {:invalid, [unique: "primary_key"]} + end + end + + {query_time, result} = case :timer.tc(mod, :transaction, [fun]) do + {query_time, {:atomic, :ok}} -> {query_time, {:ok, Enum.map(fields, &Enum.map(returning, fn k -> &1[k] end))}} + {query_time, {:atomic, result}} -> {query_time, result} + end + log(result, source, "UPDATE INTO #{inspect source} RETURNING #{inspect returning} #{inspect fields}", query_time, 0, 0, 0, :update_all, adapter_meta.telemetry, params, options ++ adapter_meta.opts) + end + + defp distinct(rows, %Ecto.Query{distinct: nil}, _adapter_meta), do: rows + defp distinct(rows, %Ecto.Query{distinct: true}, _adapter_meta), do: Enum.uniq(rows) + defp distinct(rows, %Ecto.Query{distinct: %Ecto.Query.QueryExpr{expr: true}}, _adapter_meta), do: Enum.uniq(rows) + defp distinct(rows, %Ecto.Query{distinct: %Ecto.Query.QueryExpr{expr: expr}} = query, adapter_meta) do + Enum.reduce(expr, rows, fn + {sorter, {{:., _, [{:&, _, [index]}, column]}, _, _}}, rows -> + {_source, module, _prefix} = elem(query.sources, index) + idx = get_index(adapter_meta, column, module.__schema__(:fields), module.__schema__(:primary_key)) + rows + |> Enum.sort_by(fn + [v] -> v + row -> Enum.at(row, idx) + end, sorter) + |> Enum.uniq_by(fn + [v] -> v + row -> Enum.at(row, idx) + end) + + {sorter, {:json_extract_path, _, [{{:., _, [{:&, _, [index]}, column]}, _, _}, keys]}}, rows -> + {_source, module, _prefix} = elem(query.sources, index) + idx = get_index(adapter_meta, column, module.__schema__(:fields), module.__schema__(:primary_key)) + + rows + |> Enum.sort_by(fn + [v] -> v + row -> + Enum.reduce(keys, Enum.at(row, idx), fn + k, %_{} = struct -> Map.get(struct, String.to_existing_atom(k)) + k, data -> data[k] || data[String.to_existing_atom(k)] + end) + end, sorter) + |> Enum.uniq_by(fn + [v] -> v + row -> + Enum.reduce(keys, Enum.at(row, idx), fn + k, %_{} = struct -> Map.get(struct, String.to_existing_atom(k)) + k, data -> data[k] || data[String.to_existing_atom(k)] + end) + end) + end) + end + + defp offset(rows, %{offset: offset}, offset), do: rows + defp offset([_ | rows], query, offset), do: offset(rows, query, offset + 1) + + ## TBD There is a cavet here and we should properly use ex_cldr to do proper calculation + ## E.g months in second is the same as 30 days in seconds, but some months don't have 30 days. + defp interval_to_seconds("year"), do: 31557600 + defp interval_to_seconds("month"), do: 2629800 + defp interval_to_seconds("week"), do: 604800 + defp interval_to_seconds("day"), do: 86400 + defp interval_to_seconds("hour"), do: 3600 + defp interval_to_seconds("minute"), do: 60 + defp interval_to_seconds("seconds"), do: 1 + defp interval_to_seconds("millisecond"), do: 0.001 + defp interval_to_seconds("microsecond"), do: 0.000001 + + defp interval_to_days("year"), do: 365.25 + defp interval_to_days("month"), do: 30.4375 + defp interval_to_days("week"), do: 7 + defp interval_to_days("day"), do: 1 + defp interval_to_days("hour"), do: 0.04166667 + defp interval_to_days("minute"), do: 6.944444e-4 + defp interval_to_days("seconds"), do: 1.157407e-5 + defp interval_to_days("millisecond"), do: 1.157407e-8 + defp interval_to_days("microsecond"), do: 1.157407e-11 + + defp unroll({:in, _, [{{:., [], [{:&, [], [index]}, column]}, _, _}, values]}, {adapter_meta, _query_meta, query, _params, _options} = q, operator) do + [v | values] = unbind(values, q) + {<>, module, _prefix} = elem(query.sources, index) + el = to_element(adapter_meta, column, module.__schema__(:fields), module.__schema__(:primary_key), "#{String.upcase(s)}#{index}") + values + |> Enum.reduce(["#{el} #{to_erlang_term(operator)} #{v}"], fn v, acc -> acc ++ [" orelse #{el} #{to_erlang_term(operator)} #{v}"] end) + |> to_string() + end + + defp unbind({:^, _, [index]}, {_adapter_meta, _query_meta, _query, params, _options}), do: to_erlang_term(Enum.at(params, index)) + defp unbind({:^, _, values}, {_adapter_meta, _query_meta, _query, params, _options}), do: Enum.map(values, &to_erlang_term(Enum.at(params, &1, &1))) + defp unbind([_ | _] = values, query), do: Enum.map(values, &unbind(&1, query)) + defp unbind(value, _meta), do: to_erlang_term(value) + + defp to_erlang_term(<>), do: '<<"#{value}">>' + defp to_erlang_term(:or), do: :orelse + defp to_erlang_term(:and), do: :andalso + defp to_erlang_term(:<=), do: :"=<" + defp to_erlang_term(:===), do: :"=:=" + defp to_erlang_term(op) when op in ~w[!= not]a, do: :"/=" + defp to_erlang_term(op) when op in ~w[> < >= == + - * /]a, do: "#{op}" + defp to_erlang_term(value) when is_list(value), do: "[#{Enum.map_join(value, ", ", &to_erlang_term(&1))}]" + defp to_erlang_term(value) when is_tuple(value), do: "#{value |> Tuple.to_list() |> Enum.map(&to_erlang_term/1) |> List.to_tuple() |> inspect}" + defp to_erlang_term(value) when is_atom(value), do: "'#{value}'" + defp to_erlang_term(value) when is_number(value), do: value + defp to_erlang_term(value) when is_map(value), do: "#" <> "{" <> Enum.map_join(value, ", ", fn {k, v} -> "#{to_erlang_term(k)} => #{to_erlang_term(v)}" end) <> "}" + defp to_erlang_term(value), do: value + + + defp to_record(tuple, [_ | columns], [], fields), do: Enum.reduce(columns, tuple, &Tuple.insert_at(&2, tuple_size(&2), fields[&1])) + defp to_record(tuple, columns, primary_key, fields), do: Enum.reduce(columns -- primary_key, tuple, &Tuple.insert_at(&2, tuple_size(&2), fields[&1])) + + defp to_element(adapter_meta, column, columns, [_primary_key] = primary_keys, var) do + "element(#{get_index(adapter_meta, column, columns, primary_keys)}, #{var})" + end + defp to_element(adapter_meta, column, columns, primary_keys, var) do + idx = get_index(adapter_meta, column, columns, primary_keys) + if column in primary_keys do + "element(#{idx}, element(#{if adapter_meta.adapter == EctoQLC.Adapters.Mnesia, do: 2, else: 1}, #{var}))" + else + "element(#{idx}, #{var})" + end + end + + defp get_index(adapter_meta, column, columns, [_primary_key]) do + Enum.find_index(columns, &(&1 == column)) + |> Kernel.||(0) + |> Kernel.+(if(adapter_meta.adapter == EctoQLC.Adapters.Mnesia, do: 2, else: 1)) + end + defp get_index(adapter_meta, column, columns, primary_keys) do + if column in primary_keys do + Enum.find_index(primary_keys, &(&1 == column)) + 1 + else + Enum.find_index(columns -- primary_keys, &(&1 == column)) + length(primary_keys) + if(adapter_meta.adapter == EctoQLC.Adapters.Mnesia, do: 1, else: 1) + end + end + + defp to_key(params) do + case Keyword.values(params) do + [k] -> k + values -> List.to_tuple(values) + end + end + + defp to_table(adapter_meta, source, prefix, options) do + Module.concat([adapter_meta.adapter, options[:prefix] || prefix, source]) + end + + defp log(result, source, query, query_time, decode_time, queue_time, idle_time, operator, {repo, log, event_name} = _telemetry, params, opts) do + query = String.Chars.to_string(query) + stacktrace = Keyword.get(opts, :stacktrace) + if event_name = Keyword.get(opts, :telemetry_event, event_name) do + :telemetry.execute(event_name, + %{query_time: query_time, decode_time: decode_time, queue_time: queue_time, idle_time: idle_time, total_time: query_time + decode_time + queue_time + idle_time}, + %{type: :ecto_qlc_query, repo: repo, result: result, params: params, query: query, source: source, stacktrace: stacktrace, options: Keyword.get(opts, :telemetry_options, [])}) + end + fun = fn -> log_iodata(query_time, decode_time, queue_time, idle_time, repo, source, query, opts[:cast_params] || params, result, stacktrace) end + case Keyword.get(opts, :log, log) do + true -> + Logger.log(:debug, fun, ansi_color: operator_to_color(operator)) + result + false -> + :ok + result + level -> + Logger.log(level, fun, ansi_color: operator_to_color(operator)) + result + end + end + + defp log_iodata(query_time, decode_time, queue_time, idle_time, repo, source, query, params, result, stacktrace) do + result = if is_tuple(result) and is_atom(elem(result, 0)), do: String.upcase("#{elem(result, 0)}"), else: "OK" + stacktrace = case stacktrace do + [_ | _] = stacktrace -> + {module, function, arity, info} = last_non_ecto(Enum.reverse(stacktrace), repo, nil) + [?\n, IO.ANSI.light_black(), "↳ ", Exception.format_mfa(module, function, arity), log_stacktrace_info(info), IO.ANSI.reset()] + _ -> [] + end + ['QUERY ', + result, + " source=#{inspect(source)}", + format_time("db", query_time), + format_time("decode", decode_time), + format_time("queue", queue_time), + format_time("idle_time", idle_time), + ?\n, + query, + ?\s, + inspect(params, charlists: false), + List.wrap(stacktrace)] + end + + defp format_time(label, time) when time > 999, do: [?\s, label, ?=, :io_lib_format.fwrite_g(time / 1000), ?m, ?s] + defp format_time(label, time) when time <= 999, do: [?\s, label, ?=, "#{time}", ?μ, ?s] + + defp log_stacktrace_info([file: file, line: line] ++ _rest), do: [", at: ", file, ?:, Integer.to_string(line)] + defp log_stacktrace_info(_), do: [] + + defp last_non_ecto([{mod, _, _, _} | _stacktrace], repo, last) when mod == repo or mod in [Ecto.Repo.Queryable, Ecto.Repo.Schema, Ecto.Repo.Transaction], do: last + defp last_non_ecto([last | stacktrace], repo, _last), do: last_non_ecto(stacktrace, repo, last) + defp last_non_ecto([], _repo, last), do: last + + defp operator_to_color(:all), do: :cyan + defp operator_to_color(:update_all), do: :yellow + defp operator_to_color(:delete_all), do: :red + defp operator_to_color(:insert), do: :green + defp operator_to_color(_op), do: nil +end diff --git a/lib/ecto/adapters/qlc/application.ex b/lib/ecto/adapters/qlc/application.ex new file mode 100644 index 0000000..372d3da --- /dev/null +++ b/lib/ecto/adapters/qlc/application.ex @@ -0,0 +1,8 @@ +defmodule EctoQLC.Adapters.QLC.Application do + @moduledoc false + use Application + + def start(_type, _args) do + Supervisor.start_link([], [strategy: :one_for_one, name: EctoQLC.Adapters.QLC.Supervisor]) + end +end diff --git a/mix.exs b/mix.exs new file mode 100644 index 0000000..ffc7213 --- /dev/null +++ b/mix.exs @@ -0,0 +1,91 @@ +defmodule EctoQlc.MixProject do + use Mix.Project + + @source_url "https://github.com/schultzer/ecto_qlc" + @version "0.1.0" + + def project do + [ + app: :ecto_qlc, + version: @version, + elixir: "~> 1.14", + elixirc_paths: elixirc_paths(Mix.env()), + start_permanent: Mix.env() == :prod, + deps: deps(), + description: "QLC-based adapters for Ecto", + package: package(), + name: "Ecto QLC", + docs: docs(), + ] + end + + # Run "mix help compile.app" to learn about applications. + def application do + [ + extra_applications: [:logger, :mnesia], + mod: {EctoQLC.Adapters.QLC.Application, []} + ] + end + + # Specifies which paths to compile per environment. + defp elixirc_paths(:test), do: ~w[lib test/support] + defp elixirc_paths(:bench), do: ~w[lib test/support] + defp elixirc_paths(_), do: ~w[lib] + + # Run "mix help deps" to learn about dependencies. + defp deps do + [ + ecto_dep(), + ecto_sql_dep(), + {:telemetry, "~> 0.4.0 or ~> 1.0"}, + {:ex_doc, "~> 0.29", only: :dev}, + {:benchee, "~> 1.1.0", only: :bench}, + {:benchee_html, "~> 1.0", only: :bench}, + {:postgrex, ">= 0.0.0", only: [:bench, :test]}, + {:jason, ">= 0.0.0", only: [:bench, :test]} + ] + end + + defp ecto_dep do + if path = System.get_env("ECTO_PATH") do + {:ecto, path: path} + else + {:ecto, "~> 3.9.0"} + end + end + + defp ecto_sql_dep do + if path = System.get_env("ECTO_SQL_PATH") do + {:ecto_sql, path: path} + else + {:ecto_sql, "~> 3.9.0"} + end + end + + defp package do + [ + maintainers: ["Benjamin Schultzer"], + licenses: ["Apache-2.0"], + links: %{"GitHub" => @source_url}, + files: ~w(.formatter.exs mix.exs README.md CHANGELOG.md lib) + ] + end + + defp docs do + [ + main: "EctoQLC.Adapters.QLC", + source_ref: "v#{@version}", + canonical: "http://hexdocs.pm/ecto_qlc", + source_url: @source_url, + extras: ["CHANGELOG.md"], + skip_undefined_reference_warnings_on: ["CHANGELOG.md"], + groups_for_modules: [ + "Built-in adapters": [ + EctoQLC.Adapters.DETS, + EctoQLC.Adapters.ETS, + EctoQLC.Adapters.Mnesia + ] + ] + ] + end +end diff --git a/mix.lock b/mix.lock new file mode 100644 index 0000000..b62fc7d --- /dev/null +++ b/mix.lock @@ -0,0 +1,21 @@ +%{ + "benchee": {:hex, :benchee, "1.1.0", "f3a43817209a92a1fade36ef36b86e1052627fd8934a8b937ac9ab3a76c43062", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.0", [hex: :statistex, repo: "hexpm", optional: false]}], "hexpm", "7da57d545003165a012b587077f6ba90b89210fd88074ce3c60ce239eb5e6d93"}, + "benchee_html": {:hex, :benchee_html, "1.0.0", "5b4d24effebd060f466fb460ec06576e7b34a00fc26b234fe4f12c4f05c95947", [:mix], [{:benchee, ">= 0.99.0 and < 2.0.0", [hex: :benchee, repo: "hexpm", optional: false]}, {:benchee_json, "~> 1.0", [hex: :benchee_json, repo: "hexpm", optional: false]}], "hexpm", "5280af9aac432ff5ca4216d03e8a93f32209510e925b60e7f27c33796f69e699"}, + "benchee_json": {:hex, :benchee_json, "1.0.0", "cc661f4454d5995c08fe10dd1f2f72f229c8f0fb1c96f6b327a8c8fc96a91fe5", [:mix], [{:benchee, ">= 0.99.0 and < 2.0.0", [hex: :benchee, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "da05d813f9123505f870344d68fb7c86a4f0f9074df7d7b7e2bb011a63ec231c"}, + "connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"}, + "db_connection": {:hex, :db_connection, "2.4.3", "3b9aac9f27347ec65b271847e6baeb4443d8474289bd18c1d6f4de655b70c94d", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "c127c15b0fa6cfb32eed07465e05da6c815b032508d4ed7c116122871df73c12"}, + "decimal": {:hex, :decimal, "2.0.0", "a78296e617b0f5dd4c6caf57c714431347912ffb1d0842e998e9792b5642d697", [:mix], [], "hexpm", "34666e9c55dea81013e77d9d87370fe6cb6291d1ef32f46a1600230b1d44f577"}, + "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, + "earmark_parser": {:hex, :earmark_parser, "1.4.29", "149d50dcb3a93d9f3d6f3ecf18c918fb5a2d3c001b5d3305c926cddfbd33355b", [:mix], [], "hexpm", "4902af1b3eb139016aed210888748db8070b8125c2342ce3dcae4f38dcc63503"}, + "ecto": {:hex, :ecto, "3.9.2", "017db3bc786ff64271108522c01a5d3f6ba0aea5c84912cfb0dd73bf13684108", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "21466d5177e09e55289ac7eade579a642578242c7a3a9f91ad5c6583337a9d15"}, + "ecto_sql": {:hex, :ecto_sql, "3.9.1", "9bd5894eecc53d5b39d0c95180d4466aff00e10679e13a5cfa725f6f85c03c22", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.9.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.16.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "5fd470a4fff2e829bbf9dcceb7f3f9f6d1e49b4241e802f614de6b8b67c51118"}, + "ex_doc": {:hex, :ex_doc, "0.29.1", "b1c652fa5f92ee9cf15c75271168027f92039b3877094290a75abcaac82a9f77", [:mix], [{:earmark_parser, "~> 1.4.19", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "b7745fa6374a36daf484e2a2012274950e084815b936b1319aeebcf7809574f6"}, + "jason": {:hex, :jason, "1.4.0", "e855647bc964a44e2f67df589ccf49105ae039d4179db7f6271dfd3843dc27e6", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "79a3791085b2a0f743ca04cec0f7be26443738779d09302e01318f97bdb82121"}, + "makeup": {:hex, :makeup, "1.1.0", "6b67c8bc2882a6b6a445859952a602afc1a41c2e08379ca057c0f525366fc3ca", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "0a45ed501f4a8897f580eabf99a2e5234ea3e75a4373c8a52824f6e873be57a6"}, + "makeup_elixir": {:hex, :makeup_elixir, "0.16.0", "f8c570a0d33f8039513fbccaf7108c5d750f47d8defd44088371191b76492b0b", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "28b2cbdc13960a46ae9a8858c4bebdec3c9a6d7b4b9e7f4ed1502f8159f338e7"}, + "makeup_erlang": {:hex, :makeup_erlang, "0.1.1", "3fcb7f09eb9d98dc4d208f49cc955a34218fc41ff6b84df7c75b3e6e533cc65f", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "174d0809e98a4ef0b3309256cbf97101c6ec01c4ab0b23e926a9e17df2077cbb"}, + "nimble_parsec": {:hex, :nimble_parsec, "1.2.3", "244836e6e3f1200c7f30cb56733fd808744eca61fd182f731eac4af635cc6d0b", [:mix], [], "hexpm", "c8d789e39b9131acf7b99291e93dae60ab48ef14a7ee9d58c6964f59efb570b0"}, + "postgrex": {:hex, :postgrex, "0.16.5", "fcc4035cc90e23933c5d69a9cd686e329469446ef7abba2cf70f08e2c4b69810", [:mix], [{:connection, "~> 1.1", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "edead639dc6e882618c01d8fc891214c481ab9a3788dfe38dd5e37fd1d5fb2e8"}, + "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, + "telemetry": {:hex, :telemetry, "1.1.0", "a589817034a27eab11144ad24d5c0f9fab1f58173274b1e9bae7074af9cbee51", [:rebar3], [], "hexpm", "b727b2a1f75614774cff2d7565b64d0dfa5bd52ba517f16543e6fc7efcc0df48"}, +} diff --git a/priv/repo/migrations/00000000000001_create_users.exs b/priv/repo/migrations/00000000000001_create_users.exs new file mode 100644 index 0000000..711a678 --- /dev/null +++ b/priv/repo/migrations/00000000000001_create_users.exs @@ -0,0 +1,14 @@ +defmodule EctoQLC.Repo.Migrations.CreateUser do + use Ecto.Migration + + def change do + create table(:users) do + add :email, :string + add :password_hash, :string, default: "" + + timestamps(type: :utc_datetime_usec) + end + + create index(:users, [:email]) + end +end diff --git a/priv/repo/migrations/00000000000002_create_users_sessions.exs b/priv/repo/migrations/00000000000002_create_users_sessions.exs new file mode 100644 index 0000000..b88e63e --- /dev/null +++ b/priv/repo/migrations/00000000000002_create_users_sessions.exs @@ -0,0 +1,12 @@ +defmodule EctoQLC.Repo.Migrations.CreateUserSession do + use Ecto.Migration + + def change do + create table(:users_sessions) do + add :user_id, references(:users) + add :meta, :map + + timestamps(updated_at: false, type: :utc_datetime_usec) + end + end +end diff --git a/priv/repo/migrations/00000000000003_add_token_to_users_sessions.exs b/priv/repo/migrations/00000000000003_add_token_to_users_sessions.exs new file mode 100644 index 0000000..48df86e --- /dev/null +++ b/priv/repo/migrations/00000000000003_add_token_to_users_sessions.exs @@ -0,0 +1,9 @@ +defmodule EctoQLC.Repo.Migrations.AddTokenToUserSession do + use Ecto.Migration + + def change do + alter table(:users_sessions) do + add :token, :string + end + end +end diff --git a/test/ecto/dets_test.exs b/test/ecto/dets_test.exs new file mode 100644 index 0000000..b0194b5 --- /dev/null +++ b/test/ecto/dets_test.exs @@ -0,0 +1,507 @@ +defmodule EctoQLC.Adapters.DETSTest do + use EctoQLC.DataCase, repo: :dets + + describe "Ecto.Query.API" do + + test "!=/2" do + assert %User{} = Repo.one!(where(User, [u], u.email != "body")) + end + + test "*/2" do + assert %User{} = Repo.one!(where(User, [u], u.id * 1 == u.id)) + end + + test "+/2", %{user: user} do + assert %{id: user.id} == Repo.one!(select(User, [u], %{id: u.id + 0})) + end + + test "-/2", %{user: user} do + assert user.id == Repo.one!(select(User, [u], u.id - 0)) + end + + test "//2", %{user: user} do + assert user.id == Repo.one!(select(User, [u], u.id / 1)) + end + + test "/2" do + assert Repo.one!(select(User, [u], u.id > 0)) + end + + test ">=/2" do + assert Repo.one!(select(User, [u], u.id >= u.id)) + end + + test "ago/2" do + assert %User{} = Repo.one(where(User, [u], u.inserted_at > ago(3, "month"))) + end + + test "all/1" do + assert Repo.one(from(us in UserSession, select: avg(us.id), group_by: [us.user_id])) + assert %User{} = Repo.one(from u in User, where: u.id <= all(from(us in UserSession, select: avg(us.user_id), group_by: [us.user_id])) and u.id >= all(from(us in UserSession, select: avg(us.user_id), group_by: [us.user_id]))) + assert %User{email: "user@example.com"} = Repo.one(from u in User, where: u.id == all(from(u in User, select: max(u.id)))) + end + + test "and/2" do + assert %User{} = Repo.one(where(User, [u], u.email == "user@example.com" and u.id != 69)) + end + + test "any/1" do + assert %User{email: "user@example.com"} = Repo.one(from u in User, where: u.id == any(from(u in User, select: [u.id], where: u.email == "user@example.com"))) + end + + test "as/2" do + assert %User{} = Repo.one(from(User, as: :user)) + end + + test "avg/1" do + assert %User{} = Repo.one(group_by(select(User, [u], %{u | id: avg(u.id)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "coalesce/2" do + assert "user@example.com" == Repo.one(select(User, [u], u.email |> coalesce("NULL") |> coalesce("1") |> coalesce("0"))) + end + + test "count/0" do + assert 1 = Repo.one(select(User, [u], count())) + end + + test "count/1" do + assert 1 = Repo.one(select(User, [u], count(u.id))) + end + + test "count/3" do + Repo.insert!(%User{email: "user@example.com"}) + assert 2 = Repo.one(select(User, [u], count(u.id, :distinct))) + assert 1 = Repo.one(select(User, [u], count(u.email, :distinct))) + end + + test "date_add/3", %{user: user} do + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{inserted_at: date_add(^Date.utc_today(), 1, "month")})) + assert 30 == Date.diff(inserted_at, user.inserted_at) + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{inserted_at: date_add(type(u.inserted_at, :date), 1, "month")})) + assert 30 == Date.diff(inserted_at, user.inserted_at) + end + + test "datetime_add/3", %{user: user} do + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{u | inserted_at: datetime_add(type(^DateTime.utc_now(), :utc_datetime_usec), 1, "month")})) + assert 30 == DateTime.diff(inserted_at, user.inserted_at, :day) + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{u | inserted_at: datetime_add(u.inserted_at, 1, "month")})) + assert 30 == DateTime.diff(inserted_at, user.inserted_at, :day) + end + + test "exists/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support parent_as in a subquery's where clauses/, fn -> + assert %User{} = Repo.one(from(User, as: :user, where: exists(from(us in UserSession, where: parent_as(:user).id == us.user_id and parent_as(:user).email != "email", select: 1)))) + end + assert %User{} = Repo.one(from(User, as: :user, where: exists(from(us in UserSession)))) + end + + test "field/2" do + assert %User{} = Repo.one(where(User, [u], fragment("byte_size(?)", field(u, :email)) == 16)) + end + + test "filter/2" do + assert 0 == Repo.one(from u in User, select: avg(u.id) |> filter(u.id < -1)) + assert 0 < Repo.one(from u in User, select: avg(u.id) |> filter(u.id >= -1)) + end + + test "fragment/1" do + refute Repo.one(where(User, [u], fragment("byte_size(?)", u.email) == 50)) + assert %User{} = Repo.one(where(User, [u], fragment("byte_size(?)", u.email) == 16)) + assert_raise Ecto.QueryError, ~r/QLC adapter does not support fragemnt in select clauses in query/, fn -> + assert %User{email: "user@example.com"} = + User + |> select([u], %{u | email: fragment("? ?", u.email, u.email)}) + |> where([u], fragment("byte_size(?)", u.email) == 16) + |> Repo.one() + end + end + + test "from_now/2" do + assert %User{} = Repo.one(where(User, [u], u.inserted_at < from_now(3, "month"))) + end + + test "ilike/2" do + refute Repo.one(where(User, [u], ilike(u.email, "aaaaa"))) + assert %User{} = Repo.one(where(User, [u], ilike(u.email, "USER@example.com"))) + end + + test "in/2" do + assert %User{} = Repo.one(where(User, [u], u.email in ["user@example.com", "USER@example.com"])) + end + + test "is_nil/2" do + assert %User{} = Repo.one(where(User, [u], not is_nil(u.email))) + end + + test "json_extract_path/2" do + assert ~w[localhost 0.0.0.0.0 0.0.5.0.0] == Repo.all(from(u in UserSession, order_by: u.meta["remote_ip"], select: u.meta["remote_ip"])) + end + + test "like/2" do + refute Repo.one(where(User, [u], ilike(u.email, "aaaaa"))) + assert %User{} = Repo.one(where(User, [u], like(u.email, "user@example.com"))) + end + + test "map/2" do + assert %{email: "user@example.com"} == Repo.one(from u in User, select: map(u, [:email])) + end + + test "max/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | id: max(u.email)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "merge/2" do + assert %{left: "left", email: "user@example.com"} == Repo.one(from u in User, select: merge(%{left: "left"}, %{email: u.email})) + end + + test "min/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | email: min(u.email)}), [:id, :email, :password_hash, :inserted_at, :updated_at])) + end + + test "not/1" do + assert %User{email: "user@example.com"} = Repo.one(where(User, [u], not(u.id == 69))) + end + + test "or/2" do + assert %User{email: "user@example.com"} = Repo.one(where(User, [u], u.id == 69 or u.email == "user@example.com")) + end + + test "selected_as/2", %{user: %{inserted_at: posted}} do + query = from u in User, + select: %{ + posted: selected_as(u.inserted_at, :date), + sum_visits: u.id |> coalesce(0) |> sum() |> selected_as(:sum_visits) + }, + group_by: selected_as(:date), + order_by: selected_as(:sum_visits) + + assert %{posted: ^posted} = Repo.one(query) + end + + test "struct/2" do + assert %User{email: "user@example.com", inserted_at: nil} = Repo.one(select(User, [u], struct(u, [:email]))) + end + + test "sum/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | id: sum(u.id)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "type/2" do + email = "string" + assert %User{email: "string"} = Repo.one(select(User, [u], %{u | email: type(^email, u.email)})) + end + end + + describe "Ecto.Query" do + + test "distinct/3" do + assert ~w[localhost 0.0.0.0.0 0.0.5.0.0] == Repo.all(from(us in UserSession, distinct: true, order_by: [us.meta["remote_ip"]], select: us.meta["remote_ip"])) + assert ~w[0.0.0.0.0 0.0.5.0.0 localhost] == Repo.all(from(us in UserSession, distinct: us.meta["remote_ip"], order_by: [us.inserted_at], select: us.meta["remote_ip"])) + assert ~w[localhost 0.0.5.0.0 0.0.0.0.0] == Repo.all(from(us in UserSession, distinct: [desc: us.meta["remote_ip"]], order_by: [us.inserted_at], select: us.meta["remote_ip"])) + end + + test "dynamic/2" do + assert %User{} = Repo.one(where(User, ^dynamic([u], u.id != 69))) + end + + test "except/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: except/, fn -> + assert nil == Repo.one(from u in User, select: u.email, except: ^(from u in User, select: u.email)) + end + end + + test "except_all/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: except_all/, fn -> + assert nil == Repo.one(from u in User, select: u.email, except_all: ^(from u in User, select: u.email)) + end + end + + test "first/2" do + assert %User{email: "user@example.com"} = Repo.one(first(User)) + end + + test "from/2" do + assert %User{email: "user@example.com"} = Repo.one(from(User)) + end + + test "group_by/2" do + assert {"user@example.com", 1} == Repo.one(from(u in User, group_by: u.email, select: {u.email, count(u.id)})) + end + + test "having/3" do + assert {"user@example.com", 1} == Repo.one(from(u in User, group_by: u.email, having: avg(u.id) > 10, select: {u.email, count(u.id)})) + end + + test "intersect/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: intersect/, fn -> + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, intersect: ^(from u in User, select: u.email)) + end + end + + test "intersect_all/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: intersect_all/, fn -> + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, intersect_all: ^(from u in User, select: u.email)) + end + end + + test "join/5" do + assert [%User{email: "user@example.com"}] = + from(User, as: :user) + |> join(:left, [user: user], session in assoc(user, :sessions), on: not is_nil(session.meta["remote_ip"]), as: :sessions) + |> preload([sessions: sessions], [sessions: sessions]) + |> where([user: user], user.email == "user@example.com") + |> Repo.all() + end + + test "last/1" do + assert %User{email: "user@example.com"} = Repo.one(last(User)) + end + + test "limit/2" do + assert [%UserSession{}, %UserSession{}] = Repo.all(order_by(limit(UserSession, 2), [:user_id])) + end + + test "lock/2" do + assert_raise Ecto.QueryError, ~r/DETS adapter does not support locks in query/, fn -> + assert [] == Repo.all(from(u in User, where: u.email == "user@example.com", lock: "write")) + end + end + + test "offset/2" do + assert %UserSession{token: "C"} = Repo.one(order_by(offset(UserSession, 2), [:token])) + end + + test "or_having/2" do + assert %User{email: "user@example.com"} = User |> having([u], not is_nil(u.email)) |> or_having([u], u.email == "C") |> or_having([u], u.email == "user@example.com") |> group_by([:id]) |> Repo.one() + end + + test "or_where/0" do + assert %User{email: "user@example.com"} = User |> or_where(email: "B") |> where(email: "A") |> or_where(email: "user@example.com") |> Repo.one() + end + + test "order_by/3" do + assert [%{token: "C"}, %{token: "B"}, %{token: "A"}] = Repo.all(select(order_by(UserSession, [desc: :token, asc: :id]), [u], u)) + assert [%{token: "A"}, %{token: "B"}, %{token: "C"}] = Repo.all(order_by(UserSession, [u], [asc: u.token, desc: u.id])) + end + + test "preload/3", %{user: user} do + assert %User{email: "user@example.com", sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(user, [:sessions], force: true) + assert %User{email: "user@example.com", sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(user, [:sessions], in_parrallel: false) + assert %User{email: nil, sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(%User{id: user.id}, [:sessions]) + end + + test "reverse_order/3" do + assert Repo.all(reverse_order(order_by(User, asc: :id))) == Repo.all(order_by(User, desc: :id)) + end + + test "select/1" do + assert ["user@example.com"] == Repo.all(select(User, [u], u.email)) + assert [2] == Repo.all(select(User, [u], 2)) + end + + test "select_merge/2" do + assert %{email: "body"} == Repo.one(select_merge(select(User, [u], %{email: u.email}), %{email: "body"})) + end + + test "subquery/2" do + assert Repo.one(where(User, [u], u.email in subquery(select(User, [:email])))) + refute Repo.one(where(User, [u], u.email not in subquery(select(User, [:email])))) + assert Repo.one(select(User, [u], u.email in subquery(select(User, [:email])))) + assert 0 < Repo.one(from u in subquery(where(User, email: "user@example.com")), select: avg(u.id)) + assert [%User{email: "user@example.com"}] = Repo.all(join(User, :inner, [u], t in subquery(where(User, email: "user@example.com")), as: :users)) + end + + test "union/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: union/, fn -> + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, union: ^(from u in User, select: u.email)) + end + end + + test "union_all/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: union_all/, fn -> + assert ["user@example.com", "user@example.com"] == Repo.all(from u in User, select: u.email, union_all: ^(from u in User, select: u.email)) + end + end + + test "update/3" do + now = DateTime.utc_now() + assert {1, nil} = Repo.update_all(update(User, [set: [email: "user@example.com", updated_at: ^now]]), []) + assert [%{email: "user@example.com", updated_at: ^now}] = Repo.all(where(User, [email: "user@example.com"])) + end + + test "where/2" do + assert [%User{email: "user@example.com"}] = Repo.all(where(User, [email: "user@example.com"])) + end + + test "windows/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support windows/, fn -> + assert [{"user@example.com", _decimal}] = Repo.all(from u in User, select: {u.email, over(avg(u.id), :email)}, windows: [email: [partition_by: u.email]]) + end + end + + test "with_cte/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support CTE/, fn -> + assert [ + %User{}, + %User{}, + %User{} + ] = User + |> recursive_ctes(true) + |> with_cte("sessions", as: ^from(UserSession)) + |> join(:left, [u], us in "users_sessions", on: us.user_id == u.id) + |> Repo.all() + end + end + end + + describe "Repo" do + + test "aggregate/3" do + assert 1 == Repo.aggregate(User, :count) + assert 1 == Repo.aggregate(User, :count) + end + + test "aggregate/4" do + Repo.aggregate(User, :count, :id) + assert 1 == Repo.aggregate(User, :count, :id) + end + + test "all/2" do + assert [%User{}] = Repo.all(User) + assert [%User{}] = Repo.all(User) + end + + test "delete_all/2" do + assert {3, nil} == Repo.delete_all(UserSession) + assert [%User{}] = Repo.all(User) + assert {1, nil} = Repo.delete_all(where(User, [email: "user@example.com"])) + assert [] == Repo.all(User) + end + + test "delete!/2", %{user: user} do + assert Repo.delete!(user) + end + + test "delete/2", %{user: user} do + assert {:ok, %User{}} = Repo.delete(user) + end + + test "insert!/2" do + assert Repo.insert!(%User{}) + assert Repo.insert!(%User{sessions: [%UserSession{}]}) + end + + test "insert/2" do + assert {:ok, %User{}} = Repo.insert(%User{}) + assert {:ok, %User{}} = Repo.insert(%User{}) + end + + test "insert_all/3" do + now = DateTime.utc_now() + assert {2, [%User{inserted_at: %DateTime{}, updated_at: %DateTime{}}, %User{inserted_at: %DateTime{}, updated_at: %DateTime{}}]} = Repo.insert_all(User, [%{inserted_at: now, updated_at: now}, %{inserted_at: now, updated_at: now}], returning: true) + assert {2, nil} = Repo.insert_all(User, [%{inserted_at: now, updated_at: now}, %{inserted_at: now, updated_at: now}]) + assert {2, nil} = Repo.insert_all(User, [%{inserted_at: {:placeholder, :now}, updated_at: {:placeholder, :now}}, %{inserted_at: {:placeholder, :now}, updated_at: {:placeholder, :now}}], placeholders: %{now: now}) + end + + test "insert_or_update!/2" do + changeset = User.changeset(%User{}, %{}) + assert user = Repo.insert_or_update!(changeset) + assert %User{password_hash: "password_hash"} = Repo.insert_or_update!(User.changeset(user, %{password_hash: "password_hash"})) + end + + test "insert_or_update/2" do + changeset = User.changeset(%User{}, %{}) + assert {:ok, user} = Repo.insert_or_update(changeset) + assert {:ok, %User{password_hash: "password_hash"}} = Repo.insert_or_update(User.changeset(user, %{password_hash: "password_hash"})) + end + + test "load/2" do + assert %User{email: "test"} = Repo.load(User, [email: "test"]) + end + + test "preload/3", %{user: user} do + assert ^user = Repo.preload(user, [:sessions]) + end + + test "reload!/2", %{user: %{sessions: [session | _]}} do + assert ^session = Repo.reload!(session) + end + + test "reload/2", %{user: %{sessions: [session | _]}} do + assert ^session = Repo.reload!(session) + end + + test "update!/2", %{user: user} do + changeset = User.changeset(user, %{password_hash: "password_hash"}) + assert %User{password_hash: "password_hash"} = Repo.update!(changeset) + end + + test "update/2", %{user: user} do + changeset = User.changeset(user, %{password_hash: "password_hash"}) + assert {:ok, %User{password_hash: "password_hash"}} = Repo.update(changeset) + end + + test "checked_out?/0" do + refute Repo.checked_out?() + end + + test "checkout/2" do + assert %User{} = Repo.checkout(fn -> + assert Repo.checked_out?() + Repo.one(User) end) + end + + test "exists?/2" do + assert Repo.exists?(User) + assert Repo.exists?(where(User, email: "user@example.com")) + end + + test "get!/3", %{user: user} do + assert Repo.get!(User, user.id) + end + + test "get/3", %{user: user} do + assert Repo.get(User, user.id) + end + + test "get_by!/3" do + assert Repo.get_by!(User, [email: "user@example.com"]) + end + + test "get_by/3" do + assert Repo.get_by(User, [email: "user@example.com"]) + end + + test "one!/2" do + assert %User{} = Repo.one!(User) + end + + test "one/2" do + assert %User{} = Repo.one(User) + end + + test "stream/2" do + assert {:ok, [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.transaction(fn -> Enum.to_list(Repo.stream(UserSession, max_rows: 2)) end) + end + + test "update_all/2" do + now = DateTime.utc_now() + assert {1, nil} == Repo.update_all(User, [set: [password_hash: "password_hash", updated_at: now]]) + assert [%User{password_hash: "password_hash", updated_at: ^now}] = Repo.all(User) + end + end +end diff --git a/test/ecto/ets_test.exs b/test/ecto/ets_test.exs new file mode 100644 index 0000000..1c5072d --- /dev/null +++ b/test/ecto/ets_test.exs @@ -0,0 +1,507 @@ +defmodule EctoQLC.Adapters.ETSTest do + use EctoQLC.DataCase, repo: :ets + + describe "Ecto.Query.API" do + + test "!=/2" do + assert %User{} = Repo.one!(where(User, [u], u.email != "body")) + end + + test "*/2" do + assert %User{} = Repo.one!(where(User, [u], u.id * 1 == u.id)) + end + + test "+/2", %{user: user} do + assert %{id: user.id} == Repo.one!(select(User, [u], %{id: u.id + 0})) + end + + test "-/2", %{user: user} do + assert user.id == Repo.one!(select(User, [u], u.id - 0)) + end + + test "//2", %{user: user} do + assert user.id == Repo.one!(select(User, [u], u.id / 1)) + end + + test "/2" do + assert Repo.one!(select(User, [u], u.id > 0)) + end + + test ">=/2" do + assert Repo.one!(select(User, [u], u.id >= u.id)) + end + + test "ago/2" do + assert %User{} = Repo.one(where(User, [u], u.inserted_at > ago(3, "month"))) + end + + test "all/1" do + assert Repo.one(from(us in UserSession, select: avg(us.id), group_by: [us.user_id])) + assert %User{} = Repo.one(from u in User, where: u.id <= all(from(us in UserSession, select: avg(us.user_id), group_by: [us.user_id])) and u.id >= all(from(us in UserSession, select: avg(us.user_id), group_by: [us.user_id]))) + assert %User{email: "user@example.com"} = Repo.one(from u in User, where: u.id == all(from(u in User, select: max(u.id)))) + end + + test "and/2" do + assert %User{} = Repo.one(where(User, [u], u.email == "user@example.com" and u.id != 69)) + end + + test "any/1" do + assert %User{email: "user@example.com"} = Repo.one(from u in User, where: u.id == any(from(u in User, select: [u.id], where: u.email == "user@example.com"))) + end + + test "as/2" do + assert %User{} = Repo.one(from(User, as: :user)) + end + + test "avg/1" do + assert %User{} = Repo.one(group_by(select(User, [u], %{u | id: avg(u.id)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "coalesce/2" do + assert "user@example.com" == Repo.one(select(User, [u], u.email |> coalesce("NULL") |> coalesce("1") |> coalesce("0"))) + end + + test "count/0" do + assert 1 = Repo.one(select(User, [u], count())) + end + + test "count/1" do + assert 1 = Repo.one(select(User, [u], count(u.id))) + end + + test "count/3" do + Repo.insert!(%User{email: "user@example.com"}) + assert 2 = Repo.one(select(User, [u], count(u.id, :distinct))) + assert 1 = Repo.one(select(User, [u], count(u.email, :distinct))) + end + + test "date_add/3", %{user: user} do + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{inserted_at: date_add(^Date.utc_today(), 1, "month")})) + assert 30 == Date.diff(inserted_at, user.inserted_at) + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{inserted_at: date_add(type(u.inserted_at, :date), 1, "month")})) + assert 30 == Date.diff(inserted_at, user.inserted_at) + end + + test "datetime_add/3", %{user: user} do + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{u | inserted_at: datetime_add(type(^DateTime.utc_now(), :utc_datetime_usec), 1, "month")})) + assert 30 == DateTime.diff(inserted_at, user.inserted_at, :day) + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{u | inserted_at: datetime_add(u.inserted_at, 1, "month")})) + assert 30 == DateTime.diff(inserted_at, user.inserted_at, :day) + end + + test "exists/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support parent_as in a subquery's where clauses/, fn -> + assert %User{} = Repo.one(from(User, as: :user, where: exists(from(us in UserSession, where: parent_as(:user).id == us.user_id and parent_as(:user).email != "email", select: 1)))) + end + assert %User{} = Repo.one(from(User, as: :user, where: exists(from(us in UserSession)))) + end + + test "field/2" do + assert %User{} = Repo.one(where(User, [u], fragment("byte_size(?)", field(u, :email)) == 16)) + end + + test "filter/2" do + assert 0 == Repo.one(from u in User, select: avg(u.id) |> filter(u.id < -1)) + assert 0 < Repo.one(from u in User, select: avg(u.id) |> filter(u.id >= -1)) + end + + test "fragment/1" do + refute Repo.one(where(User, [u], fragment("byte_size(?)", u.email) == 50)) + assert %User{} = Repo.one(where(User, [u], fragment("byte_size(?)", u.email) == 16)) + assert_raise Ecto.QueryError, ~r/QLC adapter does not support fragemnt in select clauses in query/, fn -> + assert %User{email: "user@example.com"} = + User + |> select([u], %{u | email: fragment("? ?", u.email, u.email)}) + |> where([u], fragment("byte_size(?)", u.email) == 16) + |> Repo.one() + end + end + + test "from_now/2" do + assert %User{} = Repo.one(where(User, [u], u.inserted_at < from_now(3, "month"))) + end + + test "ilike/2" do + refute Repo.one(where(User, [u], ilike(u.email, "aaaaa"))) + assert %User{} = Repo.one(where(User, [u], ilike(u.email, "USER@example.com"))) + end + + test "in/2" do + assert %User{} = Repo.one(where(User, [u], u.email in ["user@example.com", "USER@example.com"])) + end + + test "is_nil/2" do + assert %User{} = Repo.one(where(User, [u], not is_nil(u.email))) + end + + test "json_extract_path/2" do + assert ~w[localhost 0.0.0.0.0 0.0.5.0.0] == Repo.all(from(u in UserSession, order_by: u.meta["remote_ip"], select: u.meta["remote_ip"])) + end + + test "like/2" do + refute Repo.one(where(User, [u], ilike(u.email, "aaaaa"))) + assert %User{} = Repo.one(where(User, [u], like(u.email, "user@example.com"))) + end + + test "map/2" do + assert %{email: "user@example.com"} == Repo.one(from u in User, select: map(u, [:email])) + end + + test "max/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | id: max(u.email)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "merge/2" do + assert %{left: "left", email: "user@example.com"} == Repo.one(from u in User, select: merge(%{left: "left"}, %{email: u.email})) + end + + test "min/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | email: min(u.email)}), [:id, :email, :password_hash, :inserted_at, :updated_at])) + end + + test "not/1" do + assert %User{email: "user@example.com"} = Repo.one(where(User, [u], not(u.id == 69))) + end + + test "or/2" do + assert %User{email: "user@example.com"} = Repo.one(where(User, [u], u.id == 69 or u.email == "user@example.com")) + end + + test "selected_as/2", %{user: %{inserted_at: posted}} do + query = from u in User, + select: %{ + posted: selected_as(u.inserted_at, :date), + sum_visits: u.id |> coalesce(0) |> sum() |> selected_as(:sum_visits) + }, + group_by: selected_as(:date), + order_by: selected_as(:sum_visits) + + assert %{posted: ^posted} = Repo.one(query) + end + + test "struct/2" do + assert %User{email: "user@example.com", inserted_at: nil} = Repo.one(select(User, [u], struct(u, [:email]))) + end + + test "sum/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | id: sum(u.id)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "type/2" do + email = "string" + assert %User{email: "string"} = Repo.one(select(User, [u], %{u | email: type(^email, u.email)})) + end + end + + describe "Ecto.Query" do + + test "distinct/3" do + assert ~w[localhost 0.0.0.0.0 0.0.5.0.0] == Repo.all(from(us in UserSession, distinct: true, order_by: [us.meta["remote_ip"]], select: us.meta["remote_ip"])) + assert ~w[0.0.0.0.0 0.0.5.0.0 localhost] == Repo.all(from(us in UserSession, distinct: us.meta["remote_ip"], order_by: [us.inserted_at], select: us.meta["remote_ip"])) + assert ~w[localhost 0.0.5.0.0 0.0.0.0.0] == Repo.all(from(us in UserSession, distinct: [desc: us.meta["remote_ip"]], order_by: [us.inserted_at], select: us.meta["remote_ip"])) + end + + test "dynamic/2" do + assert %User{} = Repo.one(where(User, ^dynamic([u], u.id != 69))) + end + + test "except/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: except/, fn -> + assert nil == Repo.one(from u in User, select: u.email, except: ^(from u in User, select: u.email)) + end + end + + test "except_all/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: except_all/, fn -> + assert nil == Repo.one(from u in User, select: u.email, except_all: ^(from u in User, select: u.email)) + end + end + + test "first/2" do + assert %User{email: "user@example.com"} = Repo.one(first(User)) + end + + test "from/2" do + assert %User{email: "user@example.com"} = Repo.one(from(User)) + end + + test "group_by/2" do + assert {"user@example.com", 1} == Repo.one(from(u in User, group_by: u.email, select: {u.email, count(u.id)})) + end + + test "having/3" do + assert {"user@example.com", 1} == Repo.one(from(u in User, group_by: u.email, having: avg(u.id) > 10, select: {u.email, count(u.id)})) + end + + test "intersect/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: intersect/, fn -> + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, intersect: ^(from u in User, select: u.email)) + end + end + + test "intersect_all/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: intersect_all/, fn -> + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, intersect_all: ^(from u in User, select: u.email)) + end + end + + test "join/5" do + assert [%User{email: "user@example.com"}] = + from(User, as: :user) + |> join(:left, [user: user], session in assoc(user, :sessions), on: not is_nil(session.meta["remote_ip"]), as: :sessions) + |> preload([sessions: sessions], [sessions: sessions]) + |> where([user: user], user.email == "user@example.com") + |> Repo.all() + end + + test "last/1" do + assert %User{email: "user@example.com"} = Repo.one(last(User)) + end + + test "limit/2" do + assert [%UserSession{}, %UserSession{}] = Repo.all(order_by(limit(UserSession, 2), [:user_id])) + end + + test "lock/2" do + assert_raise Ecto.QueryError, ~r/ETS adapter does not support locks in query/, fn -> + assert [] == Repo.all(from(u in User, where: u.email == "user@example.com", lock: "write")) + end + end + + test "offset/2" do + assert %UserSession{token: "C"} = Repo.one(order_by(offset(UserSession, 2), [:token])) + end + + test "or_having/2" do + assert %User{email: "user@example.com"} = User |> having([u], not is_nil(u.email)) |> or_having([u], u.email == "C") |> or_having([u], u.email == "user@example.com") |> group_by([:id]) |> Repo.one() + end + + test "or_where/0" do + assert %User{email: "user@example.com"} = User |> or_where(email: "B") |> where(email: "A") |> or_where(email: "user@example.com") |> Repo.one() + end + + test "order_by/3" do + assert [%{token: "C"}, %{token: "B"}, %{token: "A"}] = Repo.all(select(order_by(UserSession, [desc: :token, asc: :id]), [u], u)) + assert [%{token: "A"}, %{token: "B"}, %{token: "C"}] = Repo.all(order_by(UserSession, [u], [asc: u.token, desc: u.id])) + end + + test "preload/3", %{user: user} do + assert %User{email: "user@example.com", sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(user, [:sessions], force: true) + assert %User{email: "user@example.com", sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(user, [:sessions], in_parrallel: false) + assert %User{email: nil, sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(%User{id: user.id}, [:sessions]) + end + + test "reverse_order/3" do + assert Repo.all(reverse_order(order_by(User, asc: :id))) == Repo.all(order_by(User, desc: :id)) + end + + test "select/1" do + assert ["user@example.com"] == Repo.all(select(User, [u], u.email)) + assert [2] == Repo.all(select(User, [u], 2)) + end + + test "select_merge/2" do + assert %{email: "body"} == Repo.one(select_merge(select(User, [u], %{email: u.email}), %{email: "body"})) + end + + test "subquery/2" do + assert Repo.one(where(User, [u], u.email in subquery(select(User, [:email])))) + refute Repo.one(where(User, [u], u.email not in subquery(select(User, [:email])))) + assert Repo.one(select(User, [u], u.email in subquery(select(User, [:email])))) + assert 0 < Repo.one(from u in subquery(where(User, email: "user@example.com")), select: avg(u.id)) + assert [%User{email: "user@example.com"}] = Repo.all(join(User, :inner, [u], t in subquery(where(User, email: "user@example.com")), as: :users)) + end + + test "union/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: union/, fn -> + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, union: ^(from u in User, select: u.email)) + end + end + + test "union_all/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: union_all/, fn -> + assert ["user@example.com", "user@example.com"] == Repo.all(from u in User, select: u.email, union_all: ^(from u in User, select: u.email)) + end + end + + test "update/3" do + now = DateTime.utc_now() + assert {1, nil} = Repo.update_all(update(User, [set: [email: "user@example.com", updated_at: ^now]]), []) + assert [%{email: "user@example.com", updated_at: ^now}] = Repo.all(where(User, [email: "user@example.com"])) + end + + test "where/2" do + assert [%User{email: "user@example.com"}] = Repo.all(where(User, [email: "user@example.com"])) + end + + test "windows/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support windows/, fn -> + assert [{"user@example.com", _decimal}] = Repo.all(from u in User, select: {u.email, over(avg(u.id), :email)}, windows: [email: [partition_by: u.email]]) + end + end + + test "with_cte/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support CTE/, fn -> + assert [ + %User{}, + %User{}, + %User{} + ] = User + |> recursive_ctes(true) + |> with_cte("sessions", as: ^from(UserSession)) + |> join(:left, [u], us in "users_sessions", on: us.user_id == u.id) + |> Repo.all() + end + end + end + + describe "Repo" do + + test "aggregate/3" do + assert 1 == Repo.aggregate(User, :count) + assert 1 == Repo.aggregate(User, :count) + end + + test "aggregate/4" do + Repo.aggregate(User, :count, :id) + assert 1 == Repo.aggregate(User, :count, :id) + end + + test "all/2" do + assert [%User{}] = Repo.all(User) + assert [%User{}] = Repo.all(User) + end + + test "delete_all/2" do + assert {3, nil} == Repo.delete_all(UserSession) + assert [%User{}] = Repo.all(User) + assert {1, nil} = Repo.delete_all(where(User, [email: "user@example.com"])) + assert [] == Repo.all(User) + end + + test "delete!/2", %{user: user} do + assert Repo.delete!(user) + end + + test "delete/2", %{user: user} do + assert {:ok, %User{}} = Repo.delete(user) + end + + test "insert!/2" do + assert Repo.insert!(%User{}) + assert Repo.insert!(%User{sessions: [%UserSession{}]}) + end + + test "insert/2" do + assert {:ok, %User{}} = Repo.insert(%User{}) + assert {:ok, %User{}} = Repo.insert(%User{}) + end + + test "insert_all/3" do + now = DateTime.utc_now() + assert {2, [%User{}, %User{}]} = Repo.insert_all(User, [%{inserted_at: now, updated_at: now}, %{inserted_at: now, updated_at: now}], returning: true) + assert {2, nil} = Repo.insert_all(User, [%{inserted_at: now, updated_at: now}, %{inserted_at: now, updated_at: now}]) + assert {2, nil} = Repo.insert_all(User, [%{inserted_at: {:placeholder, :now}, updated_at: {:placeholder, :now}}, %{inserted_at: {:placeholder, :now}, updated_at: {:placeholder, :now}}], placeholders: %{now: now}) + end + + test "insert_or_update!/2" do + changeset = User.changeset(%User{}, %{}) + assert user = Repo.insert_or_update!(changeset) + assert %User{password_hash: "password_hash"} = Repo.insert_or_update!(User.changeset(user, %{password_hash: "password_hash"})) + end + + test "insert_or_update/2" do + changeset = User.changeset(%User{}, %{}) + assert {:ok, user} = Repo.insert_or_update(changeset) + assert {:ok, %User{password_hash: "password_hash"}} = Repo.insert_or_update(User.changeset(user, %{password_hash: "password_hash"})) + end + + test "load/2" do + assert %User{email: "test"} = Repo.load(User, [email: "test"]) + end + + test "preload/3", %{user: user} do + assert ^user = Repo.preload(user, [:sessions]) + end + + test "reload!/2", %{user: %{sessions: [session | _]}} do + assert ^session = Repo.reload!(session) + end + + test "reload/2", %{user: %{sessions: [session | _]}} do + assert ^session = Repo.reload!(session) + end + + test "update!/2", %{user: user} do + changeset = User.changeset(user, %{password_hash: "password_hash"}) + assert %User{password_hash: "password_hash"} = Repo.update!(changeset) + end + + test "update/2", %{user: user} do + changeset = User.changeset(user, %{password_hash: "password_hash"}) + assert {:ok, %User{password_hash: "password_hash"}} = Repo.update(changeset) + end + + test "checked_out?/0" do + refute Repo.checked_out?() + end + + test "checkout/2" do + assert %User{} = Repo.checkout(fn -> + assert Repo.checked_out?() + Repo.one(User) end) + end + + test "exists?/2" do + assert Repo.exists?(User) + assert Repo.exists?(where(User, email: "user@example.com")) + end + + test "get!/3", %{user: user} do + assert Repo.get!(User, user.id) + end + + test "get/3", %{user: user} do + assert Repo.get(User, user.id) + end + + test "get_by!/3" do + assert Repo.get_by!(User, [email: "user@example.com"]) + end + + test "get_by/3" do + assert Repo.get_by(User, [email: "user@example.com"]) + end + + test "one!/2" do + assert %User{} = Repo.one!(User) + end + + test "one/2" do + assert %User{} = Repo.one(User) + end + + test "stream/2" do + assert {:ok, [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.transaction(fn -> Enum.to_list(Repo.stream(UserSession, max_rows: 2)) end) + end + + test "update_all/2" do + now = DateTime.utc_now() + assert {1, nil} == Repo.update_all(User, [set: [password_hash: "password_hash", updated_at: now]]) + assert [%User{password_hash: "password_hash", updated_at: ^now}] = Repo.all(User) + end + end +end diff --git a/test/ecto/mnesia_test.exs b/test/ecto/mnesia_test.exs new file mode 100644 index 0000000..05cac37 --- /dev/null +++ b/test/ecto/mnesia_test.exs @@ -0,0 +1,510 @@ +defmodule EctoQLC.Adapters.MnesiaTest do + use EctoQLC.DataCase, repo: :mnesia + + describe "Ecto.Query.API" do + + test "!=/2" do + assert %User{} = Repo.one!(where(User, [u], u.email != "body")) + end + + test "*/2" do + assert %User{} = Repo.one!(where(User, [u], u.id * 1 == u.id)) + end + + test "+/2", %{user: user} do + assert %{id: user.id} == Repo.one!(select(User, [u], %{id: u.id + 0})) + end + + test "-/2", %{user: user} do + assert user.id == Repo.one!(select(User, [u], u.id - 0)) + end + + test "//2", %{user: user} do + assert user.id == Repo.one!(select(User, [u], u.id / 1)) + end + + test "/2" do + assert Repo.one!(select(User, [u], u.id > 0)) + end + + test ">=/2" do + assert Repo.one!(select(User, [u], u.id >= u.id)) + end + + test "ago/2" do + assert %User{} = Repo.one(where(User, [u], u.inserted_at > ago(3, "month"))) + end + + test "all/1" do + assert Repo.one(from(us in UserSession, select: avg(us.id), group_by: [us.user_id])) + assert %User{} = Repo.one(from u in User, where: u.id <= all(from(us in UserSession, select: avg(us.user_id), group_by: [us.user_id])) and u.id >= all(from(us in UserSession, select: avg(us.user_id), group_by: [us.user_id]))) + assert %User{email: "user@example.com"} = Repo.one(from u in User, where: u.id == all(from(u in User, select: max(u.id)))) + end + + test "and/2" do + assert %User{} = Repo.one(where(User, [u], u.email == "user@example.com" and u.id != 69)) + end + + test "any/1" do + assert %User{email: "user@example.com"} = Repo.one(from u in User, where: u.id == any(from(u in User, select: [u.id], where: u.email == "user@example.com"))) + end + + test "as/2" do + assert %User{} = Repo.one(from(User, as: :user)) + end + + test "avg/1" do + assert %User{} = Repo.one(group_by(select(User, [u], %{u | id: avg(u.id)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "coalesce/2" do + assert "user@example.com" == Repo.one(select(User, [u], u.email |> coalesce("NULL") |> coalesce("1") |> coalesce("0"))) + end + + test "count/0" do + assert 1 = Repo.one(select(User, [u], count())) + end + + test "count/1" do + assert 1 = Repo.one(select(User, [u], count(u.id))) + end + + test "count/3" do + Repo.insert!(%User{email: "user@example.com"}) + assert 2 = Repo.one(select(User, [u], count(u.id, :distinct))) + assert 1 = Repo.one(select(User, [u], count(u.email, :distinct))) + end + + test "date_add/3", %{user: user} do + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{inserted_at: date_add(^Date.utc_today(), 1, "month")})) + assert 30 == Date.diff(inserted_at, user.inserted_at) + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{inserted_at: date_add(type(u.inserted_at, :date), 1, "month")})) + assert 30 == Date.diff(inserted_at, user.inserted_at) + end + + test "datetime_add/3", %{user: user} do + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{u | inserted_at: datetime_add(type(^DateTime.utc_now(), :utc_datetime_usec), 1, "month")})) + assert 30 == DateTime.diff(inserted_at, user.inserted_at, :day) + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{u | inserted_at: datetime_add(u.inserted_at, 1, "month")})) + assert 30 == DateTime.diff(inserted_at, user.inserted_at, :day) + end + + test "exists/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support parent_as in a subquery's where clauses/, fn -> + assert %User{} = Repo.one(from(User, as: :user, where: exists(from(us in UserSession, where: parent_as(:user).id == us.user_id and parent_as(:user).email != "email", select: 1)))) + end + assert %User{} = Repo.one(from(User, as: :user, where: exists(from(us in UserSession)))) + end + + test "field/2" do + assert %User{} = Repo.one(where(User, [u], fragment("byte_size(?)", field(u, :email)) == 16)) + end + + test "filter/2" do + assert 0 == Repo.one(from u in User, select: avg(u.id) |> filter(u.id < -1)) + assert 0 < Repo.one(from u in User, select: avg(u.id) |> filter(u.id >= -1)) + end + + test "fragment/1" do + refute Repo.one(where(User, [u], fragment("byte_size(?)", u.email) == 50)) + assert %User{} = Repo.one(where(User, [u], fragment("byte_size(?)", u.email) == 16)) + assert_raise Ecto.QueryError, ~r/QLC adapter does not support fragemnt in select clauses in query/, fn -> + assert %User{email: "user@example.com"} = + User + |> select([u], %{u | email: fragment("? ?", u.email, u.email)}) + |> where([u], fragment("byte_size(?)", u.email) == 16) + |> Repo.one() + end + end + + test "from_now/2" do + assert %User{} = Repo.one(where(User, [u], u.inserted_at < from_now(3, "month"))) + end + + test "ilike/2" do + refute Repo.one(where(User, [u], ilike(u.email, "aaaaa"))) + assert %User{} = Repo.one(where(User, [u], ilike(u.email, "USER@example.com"))) + end + + test "in/2" do + assert %User{} = Repo.one(where(User, [u], u.email in ["user@example.com", "USER@example.com"])) + end + + test "is_nil/2" do + assert %User{} = Repo.one(where(User, [u], not is_nil(u.email))) + end + + test "json_extract_path/2" do + assert ~w[localhost 0.0.0.0.0 0.0.5.0.0] == Repo.all(from(u in UserSession, order_by: u.meta["remote_ip"], select: u.meta["remote_ip"])) + end + + test "like/2" do + refute Repo.one(where(User, [u], ilike(u.email, "aaaaa"))) + assert %User{} = Repo.one(where(User, [u], like(u.email, "user@example.com"))) + end + + test "map/2" do + assert %{email: "user@example.com"} == Repo.one(from u in User, select: map(u, [:email])) + end + + test "max/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | id: max(u.email)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "merge/2" do + assert %{left: "left", email: "user@example.com"} == Repo.one(from u in User, select: merge(%{left: "left"}, %{email: u.email})) + end + + test "min/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | email: min(u.email)}), [:id, :email, :password_hash, :inserted_at, :updated_at])) + end + + test "not/1" do + assert %User{email: "user@example.com"} = Repo.one(where(User, [u], not(u.id == 69))) + end + + test "or/2" do + assert %User{email: "user@example.com"} = Repo.one(where(User, [u], u.id == 69 or u.email == "user@example.com")) + end + + test "selected_as/2", %{user: %{inserted_at: posted}} do + query = from u in User, + select: %{ + posted: selected_as(u.inserted_at, :date), + sum_visits: u.id |> coalesce(0) |> sum() |> selected_as(:sum_visits) + }, + group_by: selected_as(:date), + order_by: selected_as(:sum_visits) + + assert %{posted: ^posted} = Repo.one(query) + end + + test "struct/2" do + assert %User{email: "user@example.com", inserted_at: nil} = Repo.one(select(User, [u], struct(u, [:email]))) + end + + test "sum/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | id: sum(u.id)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "type/2" do + email = "string" + assert %User{email: "string"} = Repo.one(select(User, [u], %{u | email: type(^email, u.email)})) + end + end + + describe "Ecto.Query" do + + test "distinct/3" do + assert ~w[localhost 0.0.0.0.0 0.0.5.0.0] == Repo.all(from(us in UserSession, distinct: true, order_by: [us.meta["remote_ip"]], select: us.meta["remote_ip"])) + assert ~w[0.0.0.0.0 0.0.5.0.0 localhost] == Repo.all(from(us in UserSession, distinct: us.meta["remote_ip"], order_by: [us.inserted_at], select: us.meta["remote_ip"])) + assert ~w[localhost 0.0.5.0.0 0.0.0.0.0] == Repo.all(from(us in UserSession, distinct: [desc: us.meta["remote_ip"]], order_by: [us.inserted_at], select: us.meta["remote_ip"])) + end + + test "dynamic/2" do + assert %User{} = Repo.one(where(User, ^dynamic([u], u.id != 69))) + end + + test "except/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: except/, fn -> + assert nil == Repo.one(from u in User, select: u.email, except: ^(from u in User, select: u.email)) + end + end + + test "except_all/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: except_all/, fn -> + assert nil == Repo.one(from u in User, select: u.email, except_all: ^(from u in User, select: u.email)) + end + end + + test "first/2" do + assert %User{email: "user@example.com"} = Repo.one(first(User)) + end + + test "from/2" do + assert %User{email: "user@example.com"} = Repo.one(from(User)) + end + + test "group_by/2" do + assert {"user@example.com", 1} == Repo.one(from(u in User, group_by: u.email, select: {u.email, count(u.id)})) + end + + test "having/3" do + assert {"user@example.com", 1} == Repo.one(from(u in User, group_by: u.email, having: avg(u.id) > 10, select: {u.email, count(u.id)})) + end + + test "intersect/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: intersect/, fn -> + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, intersect: ^(from u in User, select: u.email)) + end + end + + test "intersect_all/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: intersect_all/, fn -> + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, intersect_all: ^(from u in User, select: u.email)) + end + end + + test "join/5" do + assert [%User{email: "user@example.com"}] = + from(User, as: :user) + |> join(:left, [user: user], session in assoc(user, :sessions), on: not is_nil(session.meta["remote_ip"]), as: :sessions) + |> preload([sessions: sessions], [sessions: sessions]) + |> where([user: user], user.email == "user@example.com") + |> Repo.all() + end + + test "last/1" do + assert %User{email: "user@example.com"} = Repo.one(last(User)) + end + + test "limit/2" do + assert [%UserSession{}, %UserSession{}] = Repo.all(order_by(limit(UserSession, 2), [:user_id])) + end + + test "lock/2" do + assert [%User{}] = Repo.all(from(u in User, lock: "read")) + assert [%User{}] = Repo.all(from(u in User, lock: "write")) + assert [%User{}] = Repo.all(from(u in User, lock: "sticky_write")) + assert_raise Ecto.QueryError, ~r/Unsupported lock: "{:global {__MODULE__, User}, read}", supported locks: write, read, stickey_write in query/, fn -> + assert [%User{}] = Repo.all(from(u in User, lock: "{:global {__MODULE__, User}, read}")) + end + end + + test "offset/2" do + assert %UserSession{token: "C"} = Repo.one(order_by(offset(UserSession, 2), [:token])) + end + + test "or_having/2" do + assert %User{email: "user@example.com"} = User |> having([u], not is_nil(u.email)) |> or_having([u], u.email == "C") |> or_having([u], u.email == "user@example.com") |> group_by([:id]) |> Repo.one() + end + + test "or_where/0" do + assert %User{email: "user@example.com"} = User |> or_where(email: "B") |> where(email: "A") |> or_where(email: "user@example.com") |> Repo.one() + end + + test "order_by/3" do + assert [%{token: "C"}, %{token: "B"}, %{token: "A"}] = Repo.all(select(order_by(UserSession, [desc: :token, asc: :id]), [u], u)) + assert [%{token: "A"}, %{token: "B"}, %{token: "C"}] = Repo.all(order_by(UserSession, [u], [asc: u.token, desc: u.id])) + end + + test "preload/3", %{user: user} do + assert %User{email: "user@example.com", sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(user, [:sessions], force: true) + assert %User{email: "user@example.com", sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(user, [:sessions], in_parrallel: false) + assert %User{email: nil, sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(%User{id: user.id}, [:sessions]) + end + + test "reverse_order/3" do + assert Repo.all(reverse_order(order_by(User, asc: :id))) == Repo.all(order_by(User, desc: :id)) + end + + test "select/1" do + assert ["user@example.com"] == Repo.all(select(User, [u], u.email)) + assert [2] == Repo.all(select(User, [u], 2)) + end + + test "select_merge/2" do + assert %{email: "body"} == Repo.one(select_merge(select(User, [u], %{email: u.email}), %{email: "body"})) + end + + test "subquery/2" do + assert Repo.one(where(User, [u], u.email in subquery(select(User, [:email])))) + refute Repo.one(where(User, [u], u.email not in subquery(select(User, [:email])))) + assert Repo.one(select(User, [u], u.email in subquery(select(User, [:email])))) + assert 0 < Repo.one(from u in subquery(where(User, email: "user@example.com")), select: avg(u.id)) + assert [%User{email: "user@example.com"}] = Repo.all(join(User, :inner, [u], t in subquery(where(User, email: "user@example.com")), as: :users)) + end + + test "union/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: union/, fn -> + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, union: ^(from u in User, select: u.email)) + end + end + + test "union_all/1" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support combinations like: union_all/, fn -> + assert ["user@example.com", "user@example.com"] == Repo.all(from u in User, select: u.email, union_all: ^(from u in User, select: u.email)) + end + end + + test "update/3" do + now = DateTime.utc_now() + assert {1, nil} = Repo.update_all(update(User, [set: [email: "user@example.com", updated_at: ^now]]), []) + assert [%{email: "user@example.com", updated_at: ^now}] = Repo.all(where(User, [email: "user@example.com"])) + end + + test "where/2" do + assert [%User{email: "user@example.com"}] = Repo.all(where(User, [email: "user@example.com"])) + end + + test "windows/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support windows/, fn -> + assert [{"user@example.com", _decimal}] = Repo.all(from u in User, select: {u.email, over(avg(u.id), :email)}, windows: [email: [partition_by: u.email]]) + end + end + + test "with_cte/2" do + assert_raise Ecto.QueryError, ~r/QLC adapter does not support CTE/, fn -> + assert [ + %User{}, + %User{}, + %User{} + ] = User + |> recursive_ctes(true) + |> with_cte("sessions", as: ^from(UserSession)) + |> join(:left, [u], us in "users_sessions", on: us.user_id == u.id) + |> Repo.all() + end + end + end + + describe "Repo" do + + test "aggregate/3" do + assert 1 == Repo.aggregate(User, :count) + assert 1 == Repo.aggregate(User, :count) + end + + test "aggregate/4" do + Repo.aggregate(User, :count, :id) + assert 1 == Repo.aggregate(User, :count, :id) + end + + test "all/2" do + assert [%User{}] = Repo.all(User) + assert [%User{}] = Repo.all(User) + end + + test "delete_all/2" do + assert {3, nil} == Repo.delete_all(UserSession) + assert [%User{}] = Repo.all(User) + assert {1, nil} = Repo.delete_all(where(User, [email: "user@example.com"])) + assert [] == Repo.all(User) + end + + test "delete!/2", %{user: user} do + assert Repo.delete!(user) + end + + test "delete/2", %{user: user} do + assert {:ok, %User{}} = Repo.delete(user) + end + + test "insert!/2" do + assert Repo.insert!(%User{}) + assert Repo.insert!(%User{sessions: [%UserSession{}]}) + end + + test "insert/2" do + assert {:ok, %User{}} = Repo.insert(%User{}) + assert {:ok, %User{}} = Repo.insert(%User{}) + end + + test "insert_all/3" do + now = DateTime.utc_now() + assert {2, [%User{}, %User{}]} = Repo.insert_all(User, [%{inserted_at: now, updated_at: now}, %{inserted_at: now, updated_at: now}], returning: true) + assert {2, nil} = Repo.insert_all(User, [%{inserted_at: now, updated_at: now}, %{inserted_at: now, updated_at: now}]) + assert {2, nil} = Repo.insert_all(User, [%{inserted_at: {:placeholder, :now}, updated_at: {:placeholder, :now}}, %{inserted_at: {:placeholder, :now}, updated_at: {:placeholder, :now}}], placeholders: %{now: now}) + end + + test "insert_or_update!/2" do + changeset = User.changeset(%User{}, %{}) + assert user = Repo.insert_or_update!(changeset) + assert %User{password_hash: "password_hash"} = Repo.insert_or_update!(User.changeset(user, %{password_hash: "password_hash"})) + end + + test "insert_or_update/2" do + changeset = User.changeset(%User{}, %{}) + assert {:ok, user} = Repo.insert_or_update(changeset) + assert {:ok, %User{password_hash: "password_hash"}} = Repo.insert_or_update(User.changeset(user, %{password_hash: "password_hash"})) + end + + test "load/2" do + assert %User{email: "test"} = Repo.load(User, [email: "test"]) + end + + test "preload/3", %{user: user} do + assert ^user = Repo.preload(user, [:sessions]) + end + + test "reload!/2", %{user: %{sessions: [session | _]}} do + assert ^session = Repo.reload!(session) + end + + test "reload/2", %{user: %{sessions: [session | _]}} do + assert ^session = Repo.reload!(session) + end + + test "update!/2", %{user: user} do + changeset = User.changeset(user, %{password_hash: "password_hash"}) + assert %User{password_hash: "password_hash"} = Repo.update!(changeset) + end + + test "update/2", %{user: user} do + changeset = User.changeset(user, %{password_hash: "password_hash"}) + assert {:ok, %User{password_hash: "password_hash"}} = Repo.update(changeset) + end + + test "checked_out?/0" do + refute Repo.checked_out?() + end + + test "checkout/2" do + assert %User{} = Repo.checkout(fn -> + assert Repo.checked_out?() + Repo.one(User) end) + end + + test "exists?/2" do + assert Repo.exists?(User) + assert Repo.exists?(where(User, email: "user@example.com")) + end + + test "get!/3", %{user: user} do + assert Repo.get!(User, user.id) + end + + test "get/3", %{user: user} do + assert Repo.get(User, user.id) + end + + test "get_by!/3" do + assert Repo.get_by!(User, [email: "user@example.com"]) + end + + test "get_by/3" do + assert Repo.get_by(User, [email: "user@example.com"]) + end + + test "one!/2" do + assert %User{} = Repo.one!(User) + end + + test "one/2" do + assert %User{} = Repo.one(User) + end + + test "stream/2" do + assert {:ok, [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.transaction(fn -> Enum.to_list(Repo.stream(UserSession, max_rows: 2)) end) + end + + test "update_all/2" do + now = DateTime.utc_now() + assert {1, nil} == Repo.update_all(User, [set: [password_hash: "password_hash", updated_at: now]]) + assert [%User{password_hash: "password_hash", updated_at: ^now}] = Repo.all(User) + end + end +end diff --git a/test/ecto/postgres_test.exs b/test/ecto/postgres_test.exs new file mode 100644 index 0000000..083970f --- /dev/null +++ b/test/ecto/postgres_test.exs @@ -0,0 +1,490 @@ +defmodule EctoQLC.Adapters.PostgresTest do + use EctoQLC.DataCase, repo: :postgres + + describe "Ecto.Query.API" do + + test "!=/2" do + assert %User{} = Repo.one!(where(User, [u], u.email != "body")) + end + + test "*/2" do + assert %User{} = Repo.one!(where(User, [u], u.id * 1 == u.id)) + end + + test "+/2", %{user: user} do + assert %{id: user.id} == Repo.one!(select(User, [u], %{id: u.id + 0})) + end + + test "-/2", %{user: user} do + assert user.id == Repo.one!(select(User, [u], u.id - 0)) + end + + test "//2", %{user: user} do + assert user.id == Repo.one!(select(User, [u], u.id / 1)) + end + + test "/2" do + assert Repo.one!(select(User, [u], u.id > 0)) + end + + test ">=/2" do + assert Repo.one!(select(User, [u], u.id >= u.id)) + end + + test "ago/2" do + assert %User{} = Repo.one(where(User, [u], u.inserted_at > ago(3, "month"))) + end + + test "all/1" do + assert %Decimal{} = Repo.one(from(us in UserSession, select: avg(us.id), group_by: [us.user_id])) + assert %User{} = Repo.one(from u in User, where: u.id <= all(from(us in UserSession, select: avg(us.user_id), group_by: [us.user_id])) and u.id >= all(from(us in UserSession, select: avg(us.user_id), group_by: [us.user_id]))) + assert %User{email: "user@example.com"} = Repo.one(from u in User, where: u.id == all(from(u in User, select: max(u.id)))) + end + + test "and/2" do + assert %User{} = Repo.one(where(User, [u], u.email == "user@example.com" and u.id != 69)) + end + + test "any/1" do + assert %User{email: "user@example.com"} = Repo.one(from u in User, where: u.id == any(from(u in User, select: [u.id], where: u.email == "user@example.com"))) + end + + test "as/2" do + assert %User{} = Repo.one(from(User, as: :user)) + end + + test "avg/1" do + assert %User{} = Repo.one(group_by(select(User, [u], %{u | id: avg(u.id)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "coalesce/2" do + assert "user@example.com" == Repo.one(select(User, [u], u.email |> coalesce("NULL") |> coalesce("1") |> coalesce("0"))) + end + + test "count/0" do + assert 1 = Repo.one(select(User, [u], count())) + end + + test "count/1" do + assert 1 = Repo.one(select(User, [u], count(u.id))) + end + + test "count/3" do + Repo.insert!(%User{email: "user@example.com"}) + assert 2 = Repo.one(select(User, [u], count(u.id, :distinct))) + assert 1 = Repo.one(select(User, [u], count(u.email, :distinct))) + end + + test "date_add/3", %{user: user} do + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{inserted_at: date_add(^Date.utc_today(), 1, "month")})) + assert Date.diff(inserted_at, user.inserted_at) in [30, 31] + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{inserted_at: date_add(type(u.inserted_at, :date), 1, "month")})) + assert Date.diff(inserted_at, user.inserted_at) in [30, 31] + end + + test "datetime_add/3", %{user: user} do + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{u | inserted_at: datetime_add(type(^DateTime.utc_now(), :utc_datetime_usec), 1, "month")})) + assert DateTime.diff(inserted_at, user.inserted_at, :day) in [30, 31] + assert %{inserted_at: inserted_at} = Repo.one(select(User, [u], %{u | inserted_at: datetime_add(u.inserted_at, 1, "month")})) + assert DateTime.diff(inserted_at, user.inserted_at, :day) in [30, 31] + end + + test "exists/1" do + assert %User{} = Repo.one(from(User, as: :user, where: exists(from(us in UserSession, where: parent_as(:user).id == us.user_id and parent_as(:user).email != "email", select: 1)))) + assert %User{} = Repo.one(from(User, as: :user, where: exists(from(us in UserSession)))) + end + + test "field/2" do + assert %User{} = Repo.one(where(User, [u], fragment("LENGTH(?)", field(u, :email)) == 16)) + end + + test "filter/2" do + refute Repo.one(from u in User, select: avg(u.id) |> filter(u.id < 69)) + assert %Decimal{} = Repo.one(from u in User, select: avg(u.id) |> filter(u.id >= 69)) + end + + test "fragment/1" do + refute Repo.one(where(User, [u], fragment("LENGTH(?)", u.email) == 50)) + assert %User{} = Repo.one(where(User, [u], fragment("LENGTH(?)", u.email) == 16)) + assert %User{email: "user@example.com"} = User + |> select([u], %{u | email: fragment("?", u.email)}) + |> where([u], fragment("LENGTH(?)", u.email) == 16) + |> Repo.one() + end + + test "from_now/2" do + assert %User{} = Repo.one(where(User, [u], u.inserted_at < from_now(3, "month"))) + end + + test "ilike/2" do + refute Repo.one(where(User, [u], ilike(u.email, "aaaaa"))) + assert %User{} = Repo.one(where(User, [u], ilike(u.email, "USER@example.com"))) + end + + test "in/2" do + assert %User{} = Repo.one(where(User, [u], u.email in ["user@example.com", "USER@example.com"])) + end + + test "is_nil/2" do + assert %User{} = Repo.one(where(User, [u], not is_nil(u.email))) + end + + test "json_extract_path/2" do + assert ~w[localhost 0.0.0.0.0 0.0.5.0.0] == Repo.all(from(u in UserSession, select: u.meta["remote_ip"])) + end + + test "like/2" do + refute Repo.one(where(User, [u], ilike(u.email, "aaaaa"))) + assert %User{} = Repo.one(where(User, [u], like(u.email, "user@example.com"))) + end + + test "map/2" do + assert %{email: "user@example.com"} == Repo.one(from u in User, select: map(u, [:email])) + end + + test "max/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | id: max(u.email)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "merge/2" do + assert %{left: "left", email: "user@example.com"} == Repo.one(from u in User, select: merge(%{left: "left"}, %{email: u.email})) + end + + test "min/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | email: min(u.email)}), [:id, :email, :password_hash, :inserted_at, :updated_at])) + end + + test "not/1" do + assert %User{email: "user@example.com"} = Repo.one(where(User, [u], not(u.id == 69))) + end + + test "or/2" do + assert %User{email: "user@example.com"} = Repo.one(where(User, [u], u.id == 69 or u.email == "user@example.com")) + end + + test "selected_as/2", %{user: %{inserted_at: posted}} do + query = from u in User, + select: %{ + posted: selected_as(u.inserted_at, :date), + sum_visits: u.id |> coalesce(0) |> sum() |> selected_as(:sum_visits) + }, + group_by: selected_as(:date), + order_by: selected_as(:sum_visits) + + assert %{posted: ^posted, sum_visits: %Decimal{}} = Repo.one(query) + end + + test "struct/2" do + assert %User{email: "user@example.com", inserted_at: nil} = Repo.one(select(User, [u], struct(u, [:email]))) + end + + test "sum/1" do + assert %User{email: "user@example.com"} = Repo.one(group_by(select(User, [u], %{u | id: sum(u.id)}), [:email, :password_hash, :inserted_at, :updated_at])) + end + + test "type/2" do + email = "string" + assert %User{email: "string"} = Repo.one(select(User, [u], %{u | email: type(^email, u.email)})) + end + end + + describe "Ecto.Query" do + + test "distinct/3" do + assert ~w[0.0.0.0.0 0.0.5.0.0 localhost] == Repo.all(from(us in UserSession, distinct: true, order_by: [us.meta["remote_ip"]], select: us.meta["remote_ip"])) + assert ~w[0.0.0.0.0 0.0.5.0.0 localhost] == Repo.all(from(us in UserSession, distinct: us.meta["remote_ip"], order_by: [us.inserted_at], select: us.meta["remote_ip"])) + assert ~w[localhost 0.0.5.0.0 0.0.0.0.0] == Repo.all(from(us in UserSession, distinct: [desc: us.meta["remote_ip"]], order_by: [us.inserted_at], select: us.meta["remote_ip"])) + end + + test "dynamic/2" do + assert %User{} = Repo.one(where(User, ^dynamic([u], u.id != 69))) + end + + test "except/2" do + assert nil == Repo.one(from u in User, select: u.email, except: ^(from u in User, select: u.email)) + end + + test "except_all/2" do + assert nil == Repo.one(from u in User, select: u.email, except_all: ^(from u in User, select: u.email)) + end + + test "first/2" do + assert %User{email: "user@example.com"} = Repo.one(first(User)) + end + + test "from/2" do + assert %User{email: "user@example.com"} = Repo.one(from(User)) + end + + test "group_by/2" do + assert {"user@example.com", 1} == Repo.one(from(u in User, group_by: u.email, select: {u.email, count(u.id)})) + end + + test "having/3" do + assert {"user@example.com", 1} == Repo.one(from(u in User, group_by: u.email, having: avg(u.id) > 10, select: {u.email, count(u.id)})) + end + + test "intersect/2" do + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, intersect: ^(from u in User, select: u.email)) + end + + test "intersect_all/1" do + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, intersect_all: ^(from u in User, select: u.email)) + end + + test "join/5" do + assert [%User{email: "user@example.com"}] = + from(User, as: :user) + |> join(:left, [user: user], session in assoc(user, :sessions), on: not is_nil(session.meta["remote_ip"]), as: :sessions) + |> preload([sessions: sessions], [sessions: sessions]) + |> where([user: user], user.email == "user@example.com") + |> Repo.all() + end + + test "last/1" do + assert %User{email: "user@example.com"} = Repo.one(last(User)) + end + + test "limit/2" do + assert [%UserSession{}, %UserSession{}] = Repo.all(order_by(limit(UserSession, 2), [:user_id])) + end + + test "lock/2" do + assert [] == Repo.all(from(u in User, where: u.id == 69, lock: "FOR SHARE NOWAIT")) + end + + test "offset/2" do + assert %UserSession{token: "C"} = Repo.one(order_by(offset(UserSession, 2), [:token])) + end + + test "or_having/2" do + assert %User{email: "user@example.com"} = User |> having([u], not is_nil(u.email)) |> or_having([u], u.email == "C") |> or_having([u], u.email == "user@example.com") |> group_by([:id]) |> Repo.one() + end + + test "or_where/0" do + assert %User{email: "user@example.com"} = User |> or_where(email: "B") |> where(email: "A") |> or_where(email: "user@example.com") |> Repo.one() + end + + test "order_by/3" do + assert [%{token: "C"}, %{token: "B"}, %{token: "A"}] = Repo.all(select(order_by(UserSession, [desc: :token, asc: :id]), [u], u)) + assert [%{token: "A"}, %{token: "B"}, %{token: "C"}] = Repo.all(order_by(UserSession, [u], [asc: u.token, desc: u.id])) + end + + test "preload/3", %{user: user} do + assert %User{email: "user@example.com", sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(user, [:sessions], force: true) + assert %User{email: "user@example.com", sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(user, [:sessions], in_parrallel: false) + assert %User{email: nil, sessions: [%UserSession{}, %UserSession{}, %UserSession{}]} = Repo.preload(%User{id: user.id}, [:sessions]) + end + + test "reverse_order/3" do + assert Repo.all(reverse_order(order_by(User, asc: :id))) == Repo.all(order_by(User, desc: :id)) + end + + test "select/1" do + assert ["user@example.com"] == Repo.all(select(User, [u], u.email)) + assert [2] == Repo.all(select(User, [u], 2)) + end + + test "select_merge/2" do + assert %{email: "body"} == Repo.one(select_merge(select(User, [u], %{email: u.email}), %{email: "body"})) + end + + test "subquery/2" do + assert Repo.one(where(User, [u], u.email in subquery(select(User, [:email])))) + refute Repo.one(where(User, [u], u.email not in subquery(select(User, [:email])))) + assert Repo.one(select(User, [u], u.email in subquery(select(User, [:email])))) + assert [%Decimal{}] = Repo.all(from u in subquery(where(User, email: "user@example.com")), select: avg(u.id)) + assert [%User{email: "user@example.com"}] = Repo.all(join(User, :inner, [u], t in subquery(where(User, email: "user@example.com")), as: :users)) + end + + test "union/1" do + assert ["user@example.com"] == Repo.all(from u in User, select: u.email, union: ^(from u in User, select: u.email)) + end + + test "union_all/1" do + assert ["user@example.com", "user@example.com"] == Repo.all(from u in User, select: u.email, union_all: ^(from u in User, select: u.email)) + end + + test "update/3" do + now = DateTime.utc_now() + assert {1, nil} = Repo.update_all(update(User, [set: [email: "B", updated_at: ^now]]), []) + assert [%{email: "B", updated_at: ^now}] = Repo.all(where(User, [email: "B"])) + end + + test "where/2" do + assert [%User{email: "user@example.com"}] = Repo.all(where(User, [email: "user@example.com"])) + end + + test "windows/2" do + assert [{"user@example.com", _decimal}] = Repo.all(from u in User, select: {u.email, over(avg(u.id), :email)}, windows: [email: [partition_by: u.email]]) + end + + test "with_cte/2" do + assert [ + %User{}, + %User{}, + %User{} + ] = User + |> recursive_ctes(true) + |> with_cte("sessions", as: ^from(UserSession)) + |> join(:left, [u], us in "users_sessions", on: us.user_id == u.id) + |> Repo.all() + end + end + + describe "Repo" do + + test "aggregate/3" do + assert 1 == Repo.aggregate(User, :count) + assert 1 == Repo.aggregate(User, :count) + end + + test "aggregate/4" do + Repo.aggregate(User, :count, :id) + assert 1 == Repo.aggregate(User, :count, :id) + end + + test "all/2" do + assert [%User{}] = Repo.all(User) + assert [%User{}] = Repo.all(User) + end + + test "delete_all/2" do + assert {3, nil} == Repo.delete_all(UserSession) + assert [%User{}] = Repo.all(User) + assert {1, nil} = Repo.delete_all(where(User, [email: "user@example.com"])) + assert [] == Repo.all(User) + end + + test "delete!/2", %{user: %{sessions: [session | _]}} do + assert Repo.delete!(session) + end + + test "delete/2", %{user: %{sessions: [session | _]}} do + assert {:ok, %UserSession{}} = Repo.delete(session) + end + + test "insert!/2" do + assert Repo.insert!(%User{}) + assert Repo.insert!(%User{sessions: [%UserSession{}]}) + end + + test "insert/2" do + assert {:ok, %User{}} = Repo.insert(%User{}) + assert {:ok, %User{}} = Repo.insert(%User{}) + end + + test "insert_all/3" do + now = DateTime.utc_now() + assert {2, [%User{}, %User{}]} = Repo.insert_all(User, [%{inserted_at: now, updated_at: now}, %{inserted_at: now, updated_at: now}], returning: true) + assert {2, nil} = Repo.insert_all(User, [%{inserted_at: now, updated_at: now}, %{inserted_at: now, updated_at: now}]) + assert {2, nil} = Repo.insert_all(User, [%{inserted_at: {:placeholder, :now}, updated_at: {:placeholder, :now}}, %{inserted_at: {:placeholder, :now}, updated_at: {:placeholder, :now}}], placeholders: %{now: now}) + end + + test "insert_or_update!/2" do + changeset = User.changeset(%User{}, %{email: "body"}) + assert user = Repo.insert_or_update!(changeset) + assert %User{email: "updated"} = Repo.insert_or_update!(User.changeset(user, %{email: "updated"})) + changeset = User.changeset(%User{}, %{email: "body"}) + assert user = Repo.insert_or_update!(changeset) + assert %User{email: "updated"} = Repo.insert_or_update!(User.changeset(user, %{email: "updated"})) + end + + test "insert_or_update/2" do + changeset = User.changeset(%User{}, %{email: "body"}) + assert {:ok, user} = Repo.insert_or_update(changeset) + assert {:ok, %User{email: "updated"}} = Repo.insert_or_update(User.changeset(user, %{email: "updated"})) + changeset = User.changeset(%User{}, %{email: "body"}) + assert {:ok, user} = Repo.insert_or_update(changeset) + assert {:ok, %User{email: "updated"}} = Repo.insert_or_update(User.changeset(user, %{email: "updated"})) + end + + test "load/2" do + assert %User{email: "test"} = Repo.load(User, [email: "test"]) + end + + test "preload/3", %{user: user} do + assert ^user = Repo.preload(user, [:sessions]) + end + + test "reload!/2", %{user: %{sessions: [session | _]}} do + assert ^session = Repo.reload!(session) + end + + test "reload/2", %{user: %{sessions: [session | _]}} do + assert ^session = Repo.reload!(session) + end + + test "update!/2", %{user: user} do + changeset = User.changeset(user, %{email: "body"}) + assert %User{email: "body"} = Repo.update!(changeset) + end + + test "update/2", %{user: user} do + changeset = User.changeset(user, %{email: "body"}) + assert {:ok, %User{email: "body"}} = Repo.update(changeset) + end + + test "checked_out?/0" do + refute Repo.checked_out?() + end + + test "checkout/2" do + assert %User{} = Repo.checkout(fn -> + assert Repo.checked_out?() + Repo.one(User) end) + end + + test "exists?/2" do + assert Repo.exists?(User) + assert Repo.exists?(where(User, email: "user@example.com")) + end + + test "get!/3", %{user: user} do + assert Repo.get!(User, user.id) + end + + test "get/3", %{user: user} do + assert Repo.get(User, user.id) + end + + test "get_by!/3" do + assert Repo.get_by!(User, [email: "user@example.com"]) + end + + test "get_by/3" do + assert Repo.get_by(User, [email: "user@example.com"]) + end + + test "one!/2" do + assert %User{} = Repo.one!(User) + end + + test "one/2" do + assert %User{} = Repo.one(User) + end + + test "stream/2", %{user: %{sessions: sessions}} do + assert {:ok, ^sessions} = Repo.transaction(fn -> Enum.to_list(Repo.stream(UserSession, max_rows: 2)) end) + end + + test "update_all/2" do + now = DateTime.utc_now() + assert {1, nil} == Repo.update_all(User, [set: [email: "new", updated_at: now]]) + assert [%User{email: "new", updated_at: ^now}] = Repo.all(User) + end + end +end diff --git a/test/support/accounts/user.ex b/test/support/accounts/user.ex new file mode 100644 index 0000000..a429b0a --- /dev/null +++ b/test/support/accounts/user.ex @@ -0,0 +1,18 @@ +defmodule EctoQLC.Accounts.User do + use Ecto.Schema + + schema "users" do + field :email, :string + field :password, :string, virtual: true, redact: true + field :password_confirmation, :string, virtual: true, redact: true + field :password_hash, :string, redact: true + has_many :sessions, EctoQLC.Accounts.UserSession, on_delete: :delete_all + timestamps(type: :utc_datetime_usec) + end + + def changeset(changeset, attrs \\ %{}) do + changeset + |> Ecto.Changeset.cast(attrs, ~w[email password_hash password password_confirmation]a) + |> Ecto.Changeset.unique_constraint(:id, name: :primary_key) + end +end diff --git a/test/support/accounts/user_session.ex b/test/support/accounts/user_session.ex new file mode 100644 index 0000000..b0b1684 --- /dev/null +++ b/test/support/accounts/user_session.ex @@ -0,0 +1,23 @@ +defmodule EctoQLC.Accounts.UserSession do + use Ecto.Schema + + defmodule Meta do + use Ecto.Schema + embedded_schema do + field :remote_ip, :string + end + end + + schema "users_sessions" do + field :token, :string + embeds_one :meta, Meta + belongs_to :user, EctoQLC.Accounts.User + timestamps(updated_at: false, type: :utc_datetime_usec) + end + + def changeset(changeset, attrs \\ %{}) do + changeset + |> Ecto.Changeset.cast(attrs, ~w[meta token]a) + |> Ecto.Changeset.unique_constraint(:user_id, name: :users_sessions_pkey) + end +end diff --git a/test/support/data_case.ex b/test/support/data_case.ex new file mode 100644 index 0000000..632d2c6 --- /dev/null +++ b/test/support/data_case.ex @@ -0,0 +1,48 @@ +defmodule EctoQLC.DataCase do + use ExUnit.CaseTemplate + + using options do + adapters = %{mnesia: EctoQLC.Adapters.Mnesia, ets: EctoQLC.Adapters.ETS, dets: EctoQLC.Adapters.DETS, postgres: Ecto.Adapters.Postgres} + adapter = adapters[options[:repo]] + quote do + import Ecto.Query + alias EctoQLC.Accounts.User + alias EctoQLC.Accounts.UserSession + + defmodule Repo do + use Ecto.Repo, otp_app: :ecto_qlc, adapter: unquote(adapter) + end + + setup_all context do + if unquote(options[:repo]) == :postgres do + start_supervised(Repo) + Repo.__adapter__.storage_up(Repo.config) + Enum.map([{1, EctoQLC.Repo.Migrations.CreateUser}, {2, EctoQLC.Repo.Migrations.CreateUserSession}, {3, EctoQLC.Repo.Migrations.AddTokenToUserSession}], fn {version, module} -> Ecto.Migrator.up(Repo, version, module, log: false) end) + end + context + end + + setup context do + Application.put_env(:ecto_qlc, Repo, log: true) + if unquote(options[:repo]) == :postgres do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Repo, shared: not context[:async]) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + else + tmp_dir = System.tmp_dir!() + mnesia_dir = '#{Path.join(tmp_dir, "ecto-qlc-#{Ecto.UUID.generate}")}' + dets_dir = '#{Path.join(tmp_dir, "ecto-qlc-#{Ecto.UUID.generate}")}' + File.mkdir(mnesia_dir) + File.mkdir(dets_dir) + Application.put_env(:ecto_qlc, Repo, [dir: mnesia_dir]) + Application.put_env(:mnesia, :dir, mnesia_dir) + Application.put_env(:dets, :dir, dets_dir) + Repo.__adapter__.storage_down(Repo.config) + start_supervised(Repo) + Repo.__adapter__.storage_up(Repo.config) + Enum.map([{1, EctoQLC.Repo.Migrations.CreateUser}, {2, EctoQLC.Repo.Migrations.CreateUserSession}, {3, EctoQLC.Repo.Migrations.AddTokenToUserSession}], fn {version, module} -> Ecto.Migrator.up(Repo, version, module, log: false) end) + end + %{user: Repo.insert!(%User{email: "user@example.com", sessions: [%UserSession{token: "C", meta: %UserSession.Meta{remote_ip: "localhost"}}, %UserSession{token: "B", meta: %UserSession.Meta{remote_ip: "0.0.0.0.0"}}, %UserSession{token: "A", meta: %UserSession.Meta{remote_ip: "0.0.5.0.0"}}]})} + end + end + end +end diff --git a/test/test_helper.exs b/test/test_helper.exs new file mode 100644 index 0000000..b2f451a --- /dev/null +++ b/test/test_helper.exs @@ -0,0 +1,2 @@ +Enum.map(File.ls!("priv/repo/migrations"), &Code.compile_file(Path.join("priv/repo/migrations", &1))) +ExUnit.start()