From db75ae00a2041a167a454daaf23aaf5a123208d7 Mon Sep 17 00:00:00 2001 From: Matthijs Brobbel Date: Tue, 18 Jun 2024 09:25:26 +0200 Subject: [PATCH 1/7] Remove old salsa --- Cargo.toml | 41 - components/salsa-macros/Cargo.toml | 17 - components/salsa-macros/LICENSE-APACHE | 1 - components/salsa-macros/LICENSE-MIT | 1 - components/salsa-macros/README.md | 1 - .../salsa-macros/src/database_storage.rs | 256 ------ components/salsa-macros/src/lib.rs | 148 ---- components/salsa-macros/src/parenthesized.rs | 12 - components/salsa-macros/src/query_group.rs | 767 ----------------- examples/compiler/compiler.rs | 72 -- examples/compiler/implementation.rs | 23 - examples/compiler/interner.rs | 10 - examples/compiler/main.rs | 40 - examples/compiler/values.rs | 35 - examples/hello_world/main.rs | 104 --- examples/selection/main.rs | 36 - examples/selection/util1.rs | 16 - examples/selection/util2.rs | 20 - src/debug.rs | 66 -- src/derived.rs | 241 ------ src/derived/execute.rs | 134 --- src/derived/fetch.rs | 109 --- src/derived/key_to_key_index.rs | 58 -- src/derived/lru.rs | 39 - src/derived/maybe_changed_after.rs | 181 ---- src/derived/memo.rs | 134 --- src/derived/sync.rs | 87 -- src/doctest.rs | 114 --- src/durability.rs | 49 -- src/hash.rs | 5 - src/input.rs | 337 -------- src/intern_id.rs | 129 --- src/interned.rs | 424 ---------- src/lib.rs | 774 ------------------ src/plumbing.rs | 242 ------ src/revision.rs | 70 -- src/runtime.rs | 690 ---------------- src/runtime/dependency_graph.rs | 277 ------- src/runtime/local_state.rs | 244 ------ src/storage.rs | 59 -- tests/cycles.rs | 492 ----------- tests/dyn_trait.rs | 28 - tests/incremental/constants.rs | 148 ---- tests/incremental/counter.rs | 14 - tests/incremental/implementation.rs | 58 -- tests/incremental/log.rs | 16 - tests/incremental/main.rs | 9 - tests/incremental/memoized_dep_inputs.rs | 61 -- tests/incremental/memoized_inputs.rs | 86 -- tests/incremental/memoized_volatile.rs | 78 -- tests/interned.rs | 98 --- tests/lru.rs | 163 ---- tests/macros.rs | 11 - tests/no_send_sync.rs | 33 - tests/on_demand_inputs.rs | 156 ---- tests/panic_safely.rs | 95 --- tests/parallel/cancellation.rs | 132 --- tests/parallel/frozen.rs | 58 -- tests/parallel/independent.rs | 29 - tests/parallel/main.rs | 13 - tests/parallel/parallel_cycle_all_recover.rs | 110 --- tests/parallel/parallel_cycle_mid_recover.rs | 110 --- tests/parallel/parallel_cycle_none_recover.rs | 71 -- tests/parallel/parallel_cycle_one_recovers.rs | 95 --- tests/parallel/race.rs | 38 - tests/parallel/setup.rs | 197 ----- tests/parallel/signal.rs | 40 - tests/parallel/stress.rs | 184 ----- tests/parallel/true_parallel.rs | 126 --- tests/remove_input.rs | 48 -- tests/storage_varieties/implementation.rs | 19 - tests/storage_varieties/main.rs | 5 - tests/storage_varieties/queries.rs | 22 - tests/storage_varieties/tests.rs | 49 -- tests/transparent.rs | 40 - tests/variadic.rs | 51 -- 76 files changed, 9046 deletions(-) delete mode 100644 Cargo.toml delete mode 100644 components/salsa-macros/Cargo.toml delete mode 120000 components/salsa-macros/LICENSE-APACHE delete mode 120000 components/salsa-macros/LICENSE-MIT delete mode 120000 components/salsa-macros/README.md delete mode 100644 components/salsa-macros/src/database_storage.rs delete mode 100644 components/salsa-macros/src/lib.rs delete mode 100644 components/salsa-macros/src/parenthesized.rs delete mode 100644 components/salsa-macros/src/query_group.rs delete mode 100644 examples/compiler/compiler.rs delete mode 100644 examples/compiler/implementation.rs delete mode 100644 examples/compiler/interner.rs delete mode 100644 examples/compiler/main.rs delete mode 100644 examples/compiler/values.rs delete mode 100644 examples/hello_world/main.rs delete mode 100644 examples/selection/main.rs delete mode 100644 examples/selection/util1.rs delete mode 100644 examples/selection/util2.rs delete mode 100644 src/debug.rs delete mode 100644 src/derived.rs delete mode 100644 src/derived/execute.rs delete mode 100644 src/derived/fetch.rs delete mode 100644 src/derived/key_to_key_index.rs delete mode 100644 src/derived/lru.rs delete mode 100644 src/derived/maybe_changed_after.rs delete mode 100644 src/derived/memo.rs delete mode 100644 src/derived/sync.rs delete mode 100644 src/doctest.rs delete mode 100644 src/durability.rs delete mode 100644 src/hash.rs delete mode 100644 src/input.rs delete mode 100644 src/intern_id.rs delete mode 100644 src/interned.rs delete mode 100644 src/lib.rs delete mode 100644 src/plumbing.rs delete mode 100644 src/revision.rs delete mode 100644 src/runtime.rs delete mode 100644 src/runtime/dependency_graph.rs delete mode 100644 src/runtime/local_state.rs delete mode 100644 src/storage.rs delete mode 100644 tests/cycles.rs delete mode 100644 tests/dyn_trait.rs delete mode 100644 tests/incremental/constants.rs delete mode 100644 tests/incremental/counter.rs delete mode 100644 tests/incremental/implementation.rs delete mode 100644 tests/incremental/log.rs delete mode 100644 tests/incremental/main.rs delete mode 100644 tests/incremental/memoized_dep_inputs.rs delete mode 100644 tests/incremental/memoized_inputs.rs delete mode 100644 tests/incremental/memoized_volatile.rs delete mode 100644 tests/interned.rs delete mode 100644 tests/lru.rs delete mode 100644 tests/macros.rs delete mode 100644 tests/no_send_sync.rs delete mode 100644 tests/on_demand_inputs.rs delete mode 100644 tests/panic_safely.rs delete mode 100644 tests/parallel/cancellation.rs delete mode 100644 tests/parallel/frozen.rs delete mode 100644 tests/parallel/independent.rs delete mode 100644 tests/parallel/main.rs delete mode 100644 tests/parallel/parallel_cycle_all_recover.rs delete mode 100644 tests/parallel/parallel_cycle_mid_recover.rs delete mode 100644 tests/parallel/parallel_cycle_none_recover.rs delete mode 100644 tests/parallel/parallel_cycle_one_recovers.rs delete mode 100644 tests/parallel/race.rs delete mode 100644 tests/parallel/setup.rs delete mode 100644 tests/parallel/signal.rs delete mode 100644 tests/parallel/stress.rs delete mode 100644 tests/parallel/true_parallel.rs delete mode 100644 tests/remove_input.rs delete mode 100644 tests/storage_varieties/implementation.rs delete mode 100644 tests/storage_varieties/main.rs delete mode 100644 tests/storage_varieties/queries.rs delete mode 100644 tests/storage_varieties/tests.rs delete mode 100644 tests/transparent.rs delete mode 100644 tests/variadic.rs diff --git a/Cargo.toml b/Cargo.toml deleted file mode 100644 index 730da9d37..000000000 --- a/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -name = "salsa" -version = "0.17.0-pre.2" # CHECK salsa-macros version -authors = ["Salsa developers"] -edition = "2018" -license = "Apache-2.0 OR MIT" -repository = "https://github.com/salsa-rs/salsa" -description = "A generic framework for on-demand, incrementalized computation (experimental)" - -[dependencies] -arc-swap = "1.4.0" -crossbeam-utils = { version = "0.8", default-features = false } -dashmap = "5.3.4" -hashlink = "0.8.0" -indexmap = "2" -lock_api = "0.4.7" -log = "0.4.5" -parking_lot = "0.12.1" -rustc-hash = "1.0" -smallvec = "1.0.0" -oorandom = "11" -salsa-macros = { version = "0.17.0-pre.2", path = "components/salsa-macros" } - -[dev-dependencies] -diff = "0.1.0" -env_logger = "0.9" -linked-hash-map = "0.5.2" -rand = "0.8" -rand_distr = "0.4.3" -test-log = "0.2.11" -insta = "1.8.0" - -[workspace] -members = [ - "components/salsa-macros", - "components/salsa-2022", - "components/salsa-2022-macros", - "examples-2022/calc", - "examples-2022/lazy-input", - "salsa-2022-tests", -] diff --git a/components/salsa-macros/Cargo.toml b/components/salsa-macros/Cargo.toml deleted file mode 100644 index 6e4091e14..000000000 --- a/components/salsa-macros/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "salsa-macros" -version = "0.17.0-pre.2" -authors = ["Salsa developers"] -edition = "2018" -license = "Apache-2.0 OR MIT" -repository = "https://github.com/salsa-rs/salsa" -description = "Procedural macros for the salsa crate" - -[lib] -proc-macro = true - -[dependencies] -heck = "0.4" -proc-macro2 = "1.0" -quote = "1.0" -syn = { version = "1.0", features = ["full", "extra-traits"] } diff --git a/components/salsa-macros/LICENSE-APACHE b/components/salsa-macros/LICENSE-APACHE deleted file mode 120000 index 1cd601d0a..000000000 --- a/components/salsa-macros/LICENSE-APACHE +++ /dev/null @@ -1 +0,0 @@ -../../LICENSE-APACHE \ No newline at end of file diff --git a/components/salsa-macros/LICENSE-MIT b/components/salsa-macros/LICENSE-MIT deleted file mode 120000 index b2cfbdc7b..000000000 --- a/components/salsa-macros/LICENSE-MIT +++ /dev/null @@ -1 +0,0 @@ -../../LICENSE-MIT \ No newline at end of file diff --git a/components/salsa-macros/README.md b/components/salsa-macros/README.md deleted file mode 120000 index fe8400541..000000000 --- a/components/salsa-macros/README.md +++ /dev/null @@ -1 +0,0 @@ -../../README.md \ No newline at end of file diff --git a/components/salsa-macros/src/database_storage.rs b/components/salsa-macros/src/database_storage.rs deleted file mode 100644 index dea28e289..000000000 --- a/components/salsa-macros/src/database_storage.rs +++ /dev/null @@ -1,256 +0,0 @@ -use heck::ToSnakeCase; -use proc_macro::TokenStream; -use syn::parse::{Parse, ParseStream}; -use syn::punctuated::Punctuated; -use syn::{Ident, ItemStruct, Path, Token}; - -type PunctuatedQueryGroups = Punctuated; - -pub(crate) fn database(args: TokenStream, input: TokenStream) -> TokenStream { - let args = syn::parse_macro_input!(args as QueryGroupList); - let input = syn::parse_macro_input!(input as ItemStruct); - - let query_groups = &args.query_groups; - let database_name = &input.ident; - let visibility = &input.vis; - let db_storage_field = quote! { storage }; - - let mut output = proc_macro2::TokenStream::new(); - output.extend(quote! { #input }); - - let query_group_names_snake: Vec<_> = query_groups - .iter() - .map(|query_group| { - let group_name = query_group.name(); - Ident::new(&group_name.to_string().to_snake_case(), group_name.span()) - }) - .collect(); - - let query_group_storage_names: Vec<_> = query_groups - .iter() - .map(|QueryGroup { group_path }| { - quote! { - <#group_path as salsa::plumbing::QueryGroup>::GroupStorage - } - }) - .collect(); - - // For each query group `foo::MyGroup` create a link to its - // `foo::MyGroupGroupStorage` - let mut storage_fields = proc_macro2::TokenStream::new(); - let mut storage_initializers = proc_macro2::TokenStream::new(); - let mut has_group_impls = proc_macro2::TokenStream::new(); - for (((query_group, group_name_snake), group_storage), group_index) in query_groups - .iter() - .zip(&query_group_names_snake) - .zip(&query_group_storage_names) - .zip(0_u16..) - { - let group_path = &query_group.group_path; - - // rewrite the last identifier (`MyGroup`, above) to - // (e.g.) `MyGroupGroupStorage`. - storage_fields.extend(quote! { - #group_name_snake: #group_storage, - }); - - // rewrite the last identifier (`MyGroup`, above) to - // (e.g.) `MyGroupGroupStorage`. - storage_initializers.extend(quote! { - #group_name_snake: #group_storage::new(#group_index), - }); - - // ANCHOR:HasQueryGroup - has_group_impls.extend(quote! { - impl salsa::plumbing::HasQueryGroup<#group_path> for #database_name { - fn group_storage(&self) -> &#group_storage { - &self.#db_storage_field.query_store().#group_name_snake - } - - fn group_storage_mut(&mut self) -> (&#group_storage, &mut salsa::Runtime) { - let (query_store_mut, runtime) = self.#db_storage_field.query_store_mut(); - (&query_store_mut.#group_name_snake, runtime) - } - } - }); - // ANCHOR_END:HasQueryGroup - } - - // create group storage wrapper struct - output.extend(quote! { - #[doc(hidden)] - #visibility struct __SalsaDatabaseStorage { - #storage_fields - } - - impl Default for __SalsaDatabaseStorage { - fn default() -> Self { - Self { - #storage_initializers - } - } - } - }); - - // Create a tuple (D1, D2, ...) where Di is the data for a given query group. - let mut database_data = vec![]; - for QueryGroup { group_path } in query_groups { - database_data.push(quote! { - <#group_path as salsa::plumbing::QueryGroup>::GroupData - }); - } - - // ANCHOR:DatabaseStorageTypes - output.extend(quote! { - impl salsa::plumbing::DatabaseStorageTypes for #database_name { - type DatabaseStorage = __SalsaDatabaseStorage; - } - }); - // ANCHOR_END:DatabaseStorageTypes - - // ANCHOR:DatabaseOps - let mut fmt_ops = proc_macro2::TokenStream::new(); - let mut maybe_changed_ops = proc_macro2::TokenStream::new(); - let mut cycle_recovery_strategy_ops = proc_macro2::TokenStream::new(); - let mut for_each_ops = proc_macro2::TokenStream::new(); - for ((QueryGroup { group_path }, group_storage), group_index) in query_groups - .iter() - .zip(&query_group_storage_names) - .zip(0_u16..) - { - fmt_ops.extend(quote! { - #group_index => { - let storage: &#group_storage = - >::group_storage(self); - storage.fmt_index(self, input, fmt) - } - }); - maybe_changed_ops.extend(quote! { - #group_index => { - let storage: &#group_storage = - >::group_storage(self); - storage.maybe_changed_after(self, input, revision) - } - }); - cycle_recovery_strategy_ops.extend(quote! { - #group_index => { - let storage: &#group_storage = - >::group_storage(self); - storage.cycle_recovery_strategy(self, input) - } - }); - for_each_ops.extend(quote! { - let storage: &#group_storage = - >::group_storage(self); - storage.for_each_query(runtime, &mut op); - }); - } - output.extend(quote! { - impl salsa::plumbing::DatabaseOps for #database_name { - fn ops_database(&self) -> &dyn salsa::Database { - self - } - - fn ops_salsa_runtime(&self) -> &salsa::Runtime { - self.#db_storage_field.salsa_runtime() - } - - fn ops_salsa_runtime_mut(&mut self) -> &mut salsa::Runtime { - self.#db_storage_field.salsa_runtime_mut() - } - - fn fmt_index( - &self, - input: salsa::DatabaseKeyIndex, - fmt: &mut std::fmt::Formatter<'_>, - ) -> std::fmt::Result { - match input.group_index() { - #fmt_ops - i => panic!("salsa: invalid group index {}", i) - } - } - - fn maybe_changed_after( - &self, - input: salsa::DatabaseKeyIndex, - revision: salsa::Revision - ) -> bool { - match input.group_index() { - #maybe_changed_ops - i => panic!("salsa: invalid group index {}", i) - } - } - - fn cycle_recovery_strategy( - &self, - input: salsa::DatabaseKeyIndex, - ) -> salsa::plumbing::CycleRecoveryStrategy { - match input.group_index() { - #cycle_recovery_strategy_ops - i => panic!("salsa: invalid group index {}", i) - } - } - - fn for_each_query( - &self, - mut op: &mut dyn FnMut(&dyn salsa::plumbing::QueryStorageMassOps), - ) { - let runtime = salsa::Database::salsa_runtime(self); - #for_each_ops - } - } - }); - // ANCHOR_END:DatabaseOps - - output.extend(has_group_impls); - - if std::env::var("SALSA_DUMP").is_ok() { - println!("~~~ database_storage"); - println!("{}", output); - println!("~~~ database_storage"); - } - - output.into() -} - -#[derive(Clone, Debug)] -struct QueryGroupList { - query_groups: PunctuatedQueryGroups, -} - -impl Parse for QueryGroupList { - fn parse(input: ParseStream) -> syn::Result { - let query_groups: PunctuatedQueryGroups = input.parse_terminated(QueryGroup::parse)?; - Ok(QueryGroupList { query_groups }) - } -} - -#[derive(Clone, Debug)] -struct QueryGroup { - group_path: Path, -} - -impl QueryGroup { - /// The name of the query group trait. - fn name(&self) -> Ident { - self.group_path.segments.last().unwrap().ident.clone() - } -} - -impl Parse for QueryGroup { - /// ```ignore - /// impl HelloWorldDatabase; - /// ``` - fn parse(input: ParseStream) -> syn::Result { - let group_path: Path = input.parse()?; - Ok(QueryGroup { group_path }) - } -} - -struct Nothing; - -impl Parse for Nothing { - fn parse(_input: ParseStream) -> syn::Result { - Ok(Nothing) - } -} diff --git a/components/salsa-macros/src/lib.rs b/components/salsa-macros/src/lib.rs deleted file mode 100644 index e50236fe7..000000000 --- a/components/salsa-macros/src/lib.rs +++ /dev/null @@ -1,148 +0,0 @@ -//! This crate provides salsa's macros and attributes. - -#![recursion_limit = "256"] - -extern crate proc_macro; -extern crate proc_macro2; -#[macro_use] -extern crate quote; - -use proc_macro::TokenStream; - -mod database_storage; -mod parenthesized; -mod query_group; - -/// The decorator that defines a salsa "query group" trait. This is a -/// trait that defines everything that a block of queries need to -/// execute, as well as defining the queries themselves that are -/// exported for others to use. -/// -/// This macro declares the "prototype" for a group of queries. It will -/// expand into a trait and a set of structs, one per query. -/// -/// For each query, you give the name of the accessor method to invoke -/// the query (e.g., `my_query`, below), as well as its parameter -/// types and the output type. You also give the name for a query type -/// (e.g., `MyQuery`, below) that represents the query, and optionally -/// other details, such as its storage. -/// -/// # Examples -/// -/// The simplest example is something like this: -/// -/// ```ignore -/// #[salsa::query_group] -/// trait TypeckDatabase { -/// #[salsa::input] // see below for other legal attributes -/// fn my_query(&self, input: u32) -> u64; -/// -/// /// Queries can have any number of inputs (including zero); if there -/// /// is not exactly one input, then the key type will be -/// /// a tuple of the input types, so in this case `(u32, f32)`. -/// fn other_query(&self, input1: u32, input2: f32) -> u64; -/// } -/// ``` -/// -/// Here is a list of legal `salsa::XXX` attributes: -/// -/// - Storage attributes: control how the query data is stored and set. These -/// are described in detail in the section below. -/// - `#[salsa::input]` -/// - `#[salsa::memoized]` -/// - `#[salsa::dependencies]` -/// - Query execution: -/// - `#[salsa::invoke(path::to::my_fn)]` -- for a non-input, this -/// indicates the function to call when a query must be -/// recomputed. The default is to call a function in the same -/// module with the same name as the query. -/// - `#[query_type(MyQueryTypeName)]` specifies the name of the -/// dummy struct created for the query. Default is the name of the -/// query, in camel case, plus the word "Query" (e.g., -/// `MyQueryQuery` and `OtherQueryQuery` in the examples above). -/// -/// # Storage attributes -/// -/// Here are the possible storage values for each query. The default -/// is `storage memoized`. -/// -/// ## Input queries -/// -/// Specifying `storage input` will give you an **input -/// query**. Unlike derived queries, whose value is given by a -/// function, input queries are explicitly set by doing -/// `db.query(QueryType).set(key, value)` (where `QueryType` is the -/// `type` specified for the query). Accessing a value that has not -/// yet been set will panic. Each time you invoke `set`, we assume the -/// value has changed, and so we will potentially re-execute derived -/// queries that read (transitively) from this input. -/// -/// ## Derived queries -/// -/// Derived queries are specified by a function. -/// -/// - `#[salsa::memoized]` (the default) -- The result is memoized -/// between calls. If the inputs have changed, we will recompute -/// the value, but then compare against the old memoized value, -/// which can significantly reduce the amount of recomputation -/// required in new revisions. This does require that the value -/// implements `Eq`. -/// - `#[salsa::dependencies]` -- does not cache the value, so it will -/// be recomputed every time it is needed. We do track the inputs, however, -/// so if they have not changed, then things that rely on this query -/// may be known not to have changed. -/// -/// ## Attribute combinations -/// -/// Some attributes are mutually exclusive. For example, it is an error to add -/// multiple storage specifiers: -/// -/// ```compile_fail -/// # use salsa_macros as salsa; -/// #[salsa::query_group] -/// trait CodegenDatabase { -/// #[salsa::input] -/// #[salsa::memoized] -/// fn my_query(&self, input: u32) -> u64; -/// } -/// ``` -/// -/// It is also an error to annotate a function to `invoke` on an `input` query: -/// -/// ```compile_fail -/// # use salsa_macros as salsa; -/// #[salsa::query_group] -/// trait CodegenDatabase { -/// #[salsa::input] -/// #[salsa::invoke(typeck::my_query)] -/// fn my_query(&self, input: u32) -> u64; -/// } -/// ``` -#[proc_macro_attribute] -pub fn query_group(args: TokenStream, input: TokenStream) -> TokenStream { - query_group::query_group(args, input) -} - -/// This attribute is placed on your database struct. It takes a list of the -/// query groups that your database supports. The format looks like so: -/// -/// ```rust,ignore -/// #[salsa::database(MyQueryGroup1, MyQueryGroup2)] -/// struct MyDatabase { -/// runtime: salsa::Runtime, // <-- your database will need this field, too -/// } -/// ``` -/// -/// Here, the struct `MyDatabase` would support the two query groups -/// `MyQueryGroup1` and `MyQueryGroup2`. In addition to the `database` -/// attribute, the struct needs to have a `runtime` field (of type -/// [`salsa::Runtime`]) and to implement the `salsa::Database` trait. -/// -/// See [the `hello_world` example][hw] for more details. -/// -/// [`salsa::Runtime`]: struct.Runtime.html -/// [hw]: https://github.com/salsa-rs/salsa/tree/master/examples/hello_world -#[proc_macro_attribute] -pub fn database(args: TokenStream, input: TokenStream) -> TokenStream { - database_storage::database(args, input) -} diff --git a/components/salsa-macros/src/parenthesized.rs b/components/salsa-macros/src/parenthesized.rs deleted file mode 100644 index e98176446..000000000 --- a/components/salsa-macros/src/parenthesized.rs +++ /dev/null @@ -1,12 +0,0 @@ -pub(crate) struct Parenthesized(pub T); - -impl syn::parse::Parse for Parenthesized -where - T: syn::parse::Parse, -{ - fn parse(input: syn::parse::ParseStream) -> syn::Result { - let content; - syn::parenthesized!(content in input); - content.parse::().map(Parenthesized) - } -} diff --git a/components/salsa-macros/src/query_group.rs b/components/salsa-macros/src/query_group.rs deleted file mode 100644 index 675617f6a..000000000 --- a/components/salsa-macros/src/query_group.rs +++ /dev/null @@ -1,767 +0,0 @@ -use std::convert::TryFrom; - -use crate::parenthesized::Parenthesized; -use heck::ToUpperCamelCase; -use proc_macro::TokenStream; -use proc_macro2::Span; -use quote::ToTokens; -use syn::{ - parse_macro_input, parse_quote, spanned::Spanned, Attribute, Error, FnArg, Ident, ItemTrait, - ReturnType, TraitItem, Type, -}; - -/// Implementation for `[salsa::query_group]` decorator. -pub(crate) fn query_group(args: TokenStream, input: TokenStream) -> TokenStream { - let group_struct = parse_macro_input!(args as Ident); - let input: ItemTrait = parse_macro_input!(input as ItemTrait); - // println!("args: {:#?}", args); - // println!("input: {:#?}", input); - - let input_span = input.span(); - let (trait_attrs, salsa_attrs) = filter_attrs(input.attrs); - if !salsa_attrs.is_empty() { - return Error::new( - input_span, - format!("unsupported attributes: {:?}", salsa_attrs), - ) - .to_compile_error() - .into(); - } - - let trait_vis = input.vis; - let trait_name = input.ident; - let _generics = input.generics.clone(); - let dyn_db = quote! { dyn #trait_name }; - - // Decompose the trait into the corresponding queries. - let mut queries = vec![]; - for item in input.items { - if let TraitItem::Method(method) = item { - let query_name = method.sig.ident.to_string(); - - let mut storage = QueryStorage::Memoized; - let mut cycle = None; - let mut invoke = None; - - let mut query_type = - format_ident!("{}Query", query_name.to_string().to_upper_camel_case()); - let mut num_storages = 0; - - // Extract attributes. - let (attrs, salsa_attrs) = filter_attrs(method.attrs); - for SalsaAttr { name, tts, span } in salsa_attrs { - match name.as_str() { - "memoized" => { - storage = QueryStorage::Memoized; - num_storages += 1; - } - "dependencies" => { - storage = QueryStorage::Dependencies; - num_storages += 1; - } - "input" => { - storage = QueryStorage::Input; - num_storages += 1; - } - "interned" => { - storage = QueryStorage::Interned; - num_storages += 1; - } - "cycle" => { - cycle = Some(parse_macro_input!(tts as Parenthesized).0); - } - "invoke" => { - invoke = Some(parse_macro_input!(tts as Parenthesized).0); - } - "query_type" => { - query_type = parse_macro_input!(tts as Parenthesized).0; - } - "transparent" => { - storage = QueryStorage::Transparent; - num_storages += 1; - } - _ => { - return Error::new(span, format!("unknown salsa attribute `{}`", name)) - .to_compile_error() - .into(); - } - } - } - - let sig_span = method.sig.span(); - // Check attribute combinations. - if num_storages > 1 { - return Error::new(sig_span, "multiple storage attributes specified") - .to_compile_error() - .into(); - } - match &invoke { - Some(invoke) if storage == QueryStorage::Input => { - return Error::new( - invoke.span(), - "#[salsa::invoke] cannot be set on #[salsa::input] queries", - ) - .to_compile_error() - .into(); - } - _ => {} - } - - // Extract keys. - let mut iter = method.sig.inputs.iter(); - let self_receiver = match iter.next() { - Some(FnArg::Receiver(sr)) if sr.mutability.is_none() => sr, - _ => { - return Error::new( - sig_span, - format!("first argument of query `{}` must be `&self`", query_name), - ) - .to_compile_error() - .into(); - } - }; - let mut keys: Vec<(Ident, Type)> = vec![]; - for (idx, arg) in iter.enumerate() { - match arg { - FnArg::Typed(syn::PatType { pat, ty, .. }) => keys.push(( - match pat.as_ref() { - syn::Pat::Ident(ident_pat) => ident_pat.ident.clone(), - _ => format_ident!("key{}", idx), - }, - Type::clone(ty), - )), - arg => { - return Error::new( - arg.span(), - format!("unsupported argument `{:?}` of `{}`", arg, query_name,), - ) - .to_compile_error() - .into(); - } - } - } - - // Extract value. - let value = match method.sig.output { - ReturnType::Type(_, ref ty) => ty.as_ref().clone(), - ref ret => { - return Error::new( - ret.span(), - format!("unsupported return type `{:?}` of `{}`", ret, query_name), - ) - .to_compile_error() - .into(); - } - }; - - // For `#[salsa::interned]` keys, we create a "lookup key" automatically. - // - // For a query like: - // - // fn foo(&self, x: Key1, y: Key2) -> u32 - // - // we would create - // - // fn lookup_foo(&self, x: u32) -> (Key1, Key2) - let lookup_query = if let QueryStorage::Interned = storage { - let lookup_query_type = format_ident!( - "{}LookupQuery", - query_name.to_string().to_upper_camel_case() - ); - let lookup_fn_name = format_ident!("lookup_{}", query_name); - let keys = keys.iter().map(|(_, ty)| ty); - let lookup_value: Type = parse_quote!((#(#keys),*)); - let lookup_keys = vec![(parse_quote! { key }, value.clone())]; - Some(Query { - query_type: lookup_query_type, - query_name: format!("{}", lookup_fn_name), - fn_name: lookup_fn_name, - receiver: self_receiver.clone(), - attrs: vec![], // FIXME -- some automatically generated docs on this method? - storage: QueryStorage::InternedLookup { - intern_query_type: query_type.clone(), - }, - keys: lookup_keys, - value: lookup_value, - invoke: None, - cycle: cycle.clone(), - }) - } else { - None - }; - - queries.push(Query { - query_type, - query_name, - fn_name: method.sig.ident, - receiver: self_receiver.clone(), - attrs, - storage, - keys, - value, - invoke, - cycle, - }); - - queries.extend(lookup_query); - } - } - - let group_storage = format_ident!("{}GroupStorage__", trait_name, span = Span::call_site()); - - let mut query_fn_declarations = proc_macro2::TokenStream::new(); - let mut query_fn_definitions = proc_macro2::TokenStream::new(); - let mut storage_fields = proc_macro2::TokenStream::new(); - let mut queries_with_storage = vec![]; - for query in &queries { - let (key_names, keys): (Vec<_>, Vec<_>) = - query.keys.iter().map(|(pat, ty)| (pat, ty)).unzip(); - let value = &query.value; - let fn_name = &query.fn_name; - let qt = &query.query_type; - let attrs = &query.attrs; - let self_receiver = &query.receiver; - - query_fn_declarations.extend(quote! { - #(#attrs)* - fn #fn_name(#self_receiver, #(#key_names: #keys),*) -> #value; - }); - - // Special case: transparent queries don't create actual storage, - // just inline the definition - if let QueryStorage::Transparent = query.storage { - let invoke = query.invoke_tt(); - query_fn_definitions.extend(quote! { - fn #fn_name(&self, #(#key_names: #keys),*) -> #value { - #invoke(self, #(#key_names),*) - } - }); - continue; - } - - queries_with_storage.push(fn_name); - - query_fn_definitions.extend(quote! { - fn #fn_name(&self, #(#key_names: #keys),*) -> #value { - // Create a shim to force the code to be monomorphized in the - // query crate. Our experiments revealed that this makes a big - // difference in total compilation time in rust-analyzer, though - // it's not totally obvious why that should be. - fn __shim(db: &(dyn #trait_name + '_), #(#key_names: #keys),*) -> #value { - salsa::plumbing::get_query_table::<#qt>(db).get((#(#key_names),*)) - } - __shim(self, #(#key_names),*) - - } - }); - - // For input queries, we need `set_foo` etc - if let QueryStorage::Input = query.storage { - let set_fn_name = format_ident!("set_{}", fn_name); - let set_with_durability_fn_name = format_ident!("set_{}_with_durability", fn_name); - let remove_fn_name = format_ident!("remove_{}", fn_name); - - let set_fn_docs = format!( - " - Set the value of the `{fn_name}` input. - - See `{fn_name}` for details. - - *Note:* Setting values will trigger cancellation - of any ongoing queries; this method blocks until - those queries have been cancelled. - ", - fn_name = fn_name - ); - - let set_constant_fn_docs = format!( - " - Set the value of the `{fn_name}` input with a - specific durability instead of the default of - `Durability::LOW`. You can use `Durability::MAX` - to promise that its value will never change again. - - See `{fn_name}` for details. - - *Note:* Setting values will trigger cancellation - of any ongoing queries; this method blocks until - those queries have been cancelled. - ", - fn_name = fn_name - ); - - let remove_fn_docs = format!( - " - Remove the value from the `{fn_name}` input. - - See `{fn_name}` for details. Panics if a value has - not previously been set using `set_{fn_name}` or - `set_{fn_name}_with_durability`. - - *Note:* Setting values will trigger cancellation - of any ongoing queries; this method blocks until - those queries have been cancelled. - ", - fn_name = fn_name - ); - - query_fn_declarations.extend(quote! { - # [doc = #set_fn_docs] - fn #set_fn_name(&mut self, #(#key_names: #keys,)* value__: #value); - - # [doc = #set_constant_fn_docs] - fn #set_with_durability_fn_name(&mut self, #(#key_names: #keys,)* value__: #value, durability__: salsa::Durability); - - # [doc = #remove_fn_docs] - fn #remove_fn_name(&mut self, #(#key_names: #keys,)*) -> #value; - }); - - query_fn_definitions.extend(quote! { - fn #set_fn_name(&mut self, #(#key_names: #keys,)* value__: #value) { - fn __shim(db: &mut dyn #trait_name, #(#key_names: #keys,)* value__: #value) { - salsa::plumbing::get_query_table_mut::<#qt>(db).set((#(#key_names),*), value__) - } - __shim(self, #(#key_names,)* value__) - } - - fn #set_with_durability_fn_name(&mut self, #(#key_names: #keys,)* value__: #value, durability__: salsa::Durability) { - fn __shim(db: &mut dyn #trait_name, #(#key_names: #keys,)* value__: #value, durability__: salsa::Durability) { - salsa::plumbing::get_query_table_mut::<#qt>(db).set_with_durability((#(#key_names),*), value__, durability__) - } - __shim(self, #(#key_names,)* value__ ,durability__) - } - - fn #remove_fn_name(&mut self, #(#key_names: #keys,)*) -> #value { - fn __shim(db: &mut dyn #trait_name, #(#key_names: #keys,)*) -> #value { - salsa::plumbing::get_query_table_mut::<#qt>(db).remove((#(#key_names),*)) - } - __shim(self, #(#key_names,)*) - } - }); - } - - // A field for the storage struct - storage_fields.extend(quote! { - #fn_name: std::sync::Arc<<#qt as salsa::Query>::Storage>, - }); - } - - // Emit the trait itself. - let mut output = { - let bounds = &input.supertraits; - quote! { - #(#trait_attrs)* - #trait_vis trait #trait_name : - salsa::Database + - salsa::plumbing::HasQueryGroup<#group_struct> + - #bounds - { - #query_fn_declarations - } - } - }; - - // Emit the query group struct and impl of `QueryGroup`. - output.extend(quote! { - /// Representative struct for the query group. - #trait_vis struct #group_struct { } - - impl salsa::plumbing::QueryGroup for #group_struct - { - type DynDb = #dyn_db; - type GroupStorage = #group_storage; - } - }); - - // Emit an impl of the trait - output.extend({ - let bounds = input.supertraits; - quote! { - impl #trait_name for DB - where - DB: #bounds, - DB: salsa::Database, - DB: salsa::plumbing::HasQueryGroup<#group_struct>, - { - #query_fn_definitions - } - } - }); - - let non_transparent_queries = || { - queries - .iter() - .filter(|q| !matches!(q.storage, QueryStorage::Transparent)) - }; - - // Emit the query types. - for (query, query_index) in non_transparent_queries().zip(0_u16..) { - let fn_name = &query.fn_name; - let qt = &query.query_type; - - let storage = match &query.storage { - QueryStorage::Memoized => quote!(salsa::plumbing::MemoizedStorage), - QueryStorage::Dependencies => { - quote!(salsa::plumbing::DependencyStorage) - } - QueryStorage::Input => quote!(salsa::plumbing::InputStorage), - QueryStorage::Interned => quote!(salsa::plumbing::InternedStorage), - QueryStorage::InternedLookup { intern_query_type } => { - quote!(salsa::plumbing::LookupInternedStorage) - } - QueryStorage::Transparent => panic!("should have been filtered"), - }; - let keys = query.keys.iter().map(|(_, ty)| ty); - let value = &query.value; - let query_name = &query.query_name; - - // Emit the query struct and implement the Query trait on it. - output.extend(quote! { - #[derive(Default, Debug)] - #trait_vis struct #qt; - }); - - output.extend(quote! { - impl #qt { - /// Get access to extra methods pertaining to this query. - /// You can also use it to invoke this query. - #trait_vis fn in_db(self, db: &#dyn_db) -> salsa::QueryTable<'_, Self> - { - salsa::plumbing::get_query_table::<#qt>(db) - } - } - }); - - output.extend(quote! { - impl #qt { - /// Like `in_db`, but gives access to methods for setting the - /// value of an input. Not applicable to derived queries. - /// - /// # Threads, cancellation, and blocking - /// - /// Mutating the value of a query cannot be done while there are - /// still other queries executing. If you are using your database - /// within a single thread, this is not a problem: you only have - /// `&self` access to the database, but this method requires `&mut - /// self`. - /// - /// However, if you have used `snapshot` to create other threads, - /// then attempts to `set` will **block the current thread** until - /// those snapshots are dropped (usually when those threads - /// complete). This also implies that if you create a snapshot but - /// do not send it to another thread, then invoking `set` will - /// deadlock. - /// - /// Before blocking, the thread that is attempting to `set` will - /// also set a cancellation flag. This will cause any query - /// invocations in other threads to unwind with a `Cancelled` - /// sentinel value and eventually let the `set` succeed once all - /// threads have unwound past the salsa invocation. - /// - /// If your query implementations are performing expensive - /// operations without invoking another query, you can also use - /// the `Runtime::unwind_if_cancelled` method to check for an - /// ongoing cancellation and bring those operations to a close, - /// thus allowing the `set` to succeed. Otherwise, long-running - /// computations may lead to "starvation", meaning that the - /// thread attempting to `set` has to wait a long, long time. =) - #trait_vis fn in_db_mut(self, db: &mut #dyn_db) -> salsa::QueryTableMut<'_, Self> - { - salsa::plumbing::get_query_table_mut::<#qt>(db) - } - } - - impl<'d> salsa::QueryDb<'d> for #qt - { - type DynDb = #dyn_db + 'd; - type Group = #group_struct; - type GroupStorage = #group_storage; - } - - // ANCHOR:Query_impl - impl salsa::Query for #qt - { - type Key = (#(#keys),*); - type Value = #value; - type Storage = #storage; - - const QUERY_INDEX: u16 = #query_index; - - const QUERY_NAME: &'static str = #query_name; - - fn query_storage<'a>( - group_storage: &'a >::GroupStorage, - ) -> &'a std::sync::Arc { - &group_storage.#fn_name - } - - fn query_storage_mut<'a>( - group_storage: &'a >::GroupStorage, - ) -> &'a std::sync::Arc { - &group_storage.#fn_name - } - } - // ANCHOR_END:Query_impl - }); - - // Implement the QueryFunction trait for queries which need it. - if query.storage.needs_query_function() { - let span = query.fn_name.span(); - - let key_names: Vec<_> = query.keys.iter().map(|(pat, _)| pat).collect(); - let key_pattern = if query.keys.len() == 1 { - quote! { #(#key_names),* } - } else { - quote! { (#(#key_names),*) } - }; - let invoke = query.invoke_tt(); - - let recover = if let Some(cycle_recovery_fn) = &query.cycle { - quote! { - const CYCLE_STRATEGY: salsa::plumbing::CycleRecoveryStrategy = - salsa::plumbing::CycleRecoveryStrategy::Fallback; - fn cycle_fallback(db: &>::DynDb, cycle: &salsa::Cycle, #key_pattern: &::Key) - -> ::Value { - #cycle_recovery_fn( - db, - cycle, - #(#key_names),* - ) - } - } - } else { - quote! { - const CYCLE_STRATEGY: salsa::plumbing::CycleRecoveryStrategy = - salsa::plumbing::CycleRecoveryStrategy::Panic; - } - }; - - output.extend(quote_spanned! {span=> - // ANCHOR:QueryFunction_impl - impl salsa::plumbing::QueryFunction for #qt - { - fn execute(db: &>::DynDb, #key_pattern: ::Key) - -> ::Value { - #invoke(db, #(#key_names),*) - } - - #recover - } - // ANCHOR_END:QueryFunction_impl - }); - } - } - - let mut fmt_ops = proc_macro2::TokenStream::new(); - for (Query { fn_name, .. }, query_index) in non_transparent_queries().zip(0_u16..) { - fmt_ops.extend(quote! { - #query_index => { - salsa::plumbing::QueryStorageOps::fmt_index( - &*self.#fn_name, db, input, fmt, - ) - } - }); - } - - let mut maybe_changed_ops = proc_macro2::TokenStream::new(); - for (Query { fn_name, .. }, query_index) in non_transparent_queries().zip(0_u16..) { - maybe_changed_ops.extend(quote! { - #query_index => { - salsa::plumbing::QueryStorageOps::maybe_changed_after( - &*self.#fn_name, db, input, revision - ) - } - }); - } - - let mut cycle_recovery_strategy_ops = proc_macro2::TokenStream::new(); - for (Query { fn_name, .. }, query_index) in non_transparent_queries().zip(0_u16..) { - cycle_recovery_strategy_ops.extend(quote! { - #query_index => { - salsa::plumbing::QueryStorageOps::cycle_recovery_strategy( - &*self.#fn_name - ) - } - }); - } - - let mut for_each_ops = proc_macro2::TokenStream::new(); - for Query { fn_name, .. } in non_transparent_queries() { - for_each_ops.extend(quote! { - op(&*self.#fn_name); - }); - } - - // Emit query group storage struct - output.extend(quote! { - #trait_vis struct #group_storage { - #storage_fields - } - - // ANCHOR:group_storage_new - impl #group_storage { - #trait_vis fn new(group_index: u16) -> Self { - #group_storage { - #( - #queries_with_storage: - std::sync::Arc::new(salsa::plumbing::QueryStorageOps::new(group_index)), - )* - } - } - } - // ANCHOR_END:group_storage_new - - // ANCHOR:group_storage_methods - impl #group_storage { - #trait_vis fn fmt_index( - &self, - db: &(#dyn_db + '_), - input: salsa::DatabaseKeyIndex, - fmt: &mut std::fmt::Formatter<'_>, - ) -> std::fmt::Result { - match input.query_index() { - #fmt_ops - i => panic!("salsa: impossible query index {}", i), - } - } - - #trait_vis fn maybe_changed_after( - &self, - db: &(#dyn_db + '_), - input: salsa::DatabaseKeyIndex, - revision: salsa::Revision, - ) -> bool { - match input.query_index() { - #maybe_changed_ops - i => panic!("salsa: impossible query index {}", i), - } - } - - #trait_vis fn cycle_recovery_strategy( - &self, - db: &(#dyn_db + '_), - input: salsa::DatabaseKeyIndex, - ) -> salsa::plumbing::CycleRecoveryStrategy { - match input.query_index() { - #cycle_recovery_strategy_ops - i => panic!("salsa: impossible query index {}", i), - } - } - - #trait_vis fn for_each_query( - &self, - _runtime: &salsa::Runtime, - mut op: &mut dyn FnMut(&dyn salsa::plumbing::QueryStorageMassOps), - ) { - #for_each_ops - } - } - // ANCHOR_END:group_storage_methods - }); - - if std::env::var("SALSA_DUMP").is_ok() { - println!("~~~ query_group"); - println!("{}", output); - println!("~~~ query_group"); - } - - output.into() -} - -struct SalsaAttr { - name: String, - tts: TokenStream, - span: Span, -} - -impl std::fmt::Debug for SalsaAttr { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(fmt, "{:?}", self.name) - } -} - -impl TryFrom for SalsaAttr { - type Error = syn::Attribute; - - fn try_from(attr: syn::Attribute) -> Result { - if is_not_salsa_attr_path(&attr.path) { - return Err(attr); - } - - let span = attr.span(); - let name = attr.path.segments[1].ident.to_string(); - let tts = attr.tokens.into(); - - Ok(SalsaAttr { name, tts, span }) - } -} - -fn is_not_salsa_attr_path(path: &syn::Path) -> bool { - path.segments - .first() - .map(|s| s.ident != "salsa") - .unwrap_or(true) - || path.segments.len() != 2 -} - -fn filter_attrs(attrs: Vec) -> (Vec, Vec) { - let mut other = vec![]; - let mut salsa = vec![]; - // Leave non-salsa attributes untouched. These are - // attributes that don't start with `salsa::` or don't have - // exactly two segments in their path. - // Keep the salsa attributes around. - for attr in attrs { - match SalsaAttr::try_from(attr) { - Ok(it) => salsa.push(it), - Err(it) => other.push(it), - } - } - (other, salsa) -} - -#[derive(Debug)] -struct Query { - fn_name: Ident, - receiver: syn::Receiver, - query_name: String, - attrs: Vec, - query_type: Ident, - storage: QueryStorage, - keys: Vec<(Ident, syn::Type)>, - value: syn::Type, - invoke: Option, - cycle: Option, -} - -impl Query { - fn invoke_tt(&self) -> proc_macro2::TokenStream { - match &self.invoke { - Some(i) => i.into_token_stream(), - None => self.fn_name.clone().into_token_stream(), - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -enum QueryStorage { - Memoized, - Dependencies, - Input, - Interned, - InternedLookup { intern_query_type: Ident }, - Transparent, -} - -impl QueryStorage { - /// Do we need a `QueryFunction` impl for this type of query? - fn needs_query_function(&self) -> bool { - match self { - QueryStorage::Input - | QueryStorage::Interned - | QueryStorage::InternedLookup { .. } - | QueryStorage::Transparent => false, - QueryStorage::Memoized | QueryStorage::Dependencies => true, - } - } -} diff --git a/examples/compiler/compiler.rs b/examples/compiler/compiler.rs deleted file mode 100644 index 8e9572a80..000000000 --- a/examples/compiler/compiler.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::sync::Arc; - -use crate::{interner::Interner, values::*}; - -#[salsa::query_group(CompilerDatabase)] -pub trait Compiler: Interner { - #[salsa::input] - fn input_string(&self) -> Arc; - - /// Get the fields. - fn fields(&self, class: Class) -> Arc>; - - /// Get the list of all classes - fn all_classes(&self) -> Arc>; - - /// Get the list of all fields - fn all_fields(&self) -> Arc>; -} - -/// This function parses a dummy language with the following structure: -/// -/// Classes are defined one per line, consisting of a comma-separated list of fields. -/// -/// Example: -/// -/// ``` -/// lorem,ipsum -/// dolor,sit,amet, -/// consectetur,adipiscing,elit -/// ``` -fn all_classes(db: &dyn Compiler) -> Arc> { - let string = db.input_string(); - - let rows = string.split('\n'); - let classes: Vec<_> = rows - .filter(|string| !string.is_empty()) - .map(|string| { - let fields = string - .trim() - .split(',') - .filter(|string| !string.is_empty()) - .map(|name_str| { - let name = name_str.to_owned(); - let field_data = FieldData { name }; - db.intern_field(field_data) - }) - .collect(); - let class_data = ClassData { fields }; - db.intern_class(class_data) - }) - .collect(); - - Arc::new(classes) -} - -fn fields(db: &dyn Compiler, class: Class) -> Arc> { - let class = db.lookup_intern_class(class); - Arc::new(class.fields) -} - -fn all_fields(db: &dyn Compiler) -> Arc> { - Arc::new( - db.all_classes() - .iter() - .cloned() - .flat_map(|class| { - let fields = db.fields(class); - (0..fields.len()).map(move |i| fields[i]) - }) - .collect(), - ) -} diff --git a/examples/compiler/implementation.rs b/examples/compiler/implementation.rs deleted file mode 100644 index 00f1065d7..000000000 --- a/examples/compiler/implementation.rs +++ /dev/null @@ -1,23 +0,0 @@ -use crate::compiler::CompilerDatabase; -use crate::interner::InternerDatabase; - -/// Our "database" will be threaded through our application (though -/// 99% of the application only interacts with it through traits and -/// never knows its real name). It contains all the values for all of -/// our memoized queries and encapsulates **all mutable state that -/// persists longer than a single query execution.** -/// -/// Databases can contain whatever you want them to, but salsa -/// requires you to add a `salsa::Runtime` member. Note -/// though: you should be very careful if adding shared, mutable state -/// to your context (e.g., a shared counter or some such thing). If -/// mutations to that shared state affect the results of your queries, -/// that's going to mess up the incremental results. -#[salsa::database(InternerDatabase, CompilerDatabase)] -#[derive(Default)] -pub struct DatabaseImpl { - storage: salsa::Storage, -} - -/// This impl tells salsa where to find the salsa runtime. -impl salsa::Database for DatabaseImpl {} diff --git a/examples/compiler/interner.rs b/examples/compiler/interner.rs deleted file mode 100644 index 3aca08970..000000000 --- a/examples/compiler/interner.rs +++ /dev/null @@ -1,10 +0,0 @@ -use crate::values::*; - -#[salsa::query_group(InternerDatabase)] -pub trait Interner { - #[salsa::interned] - fn intern_field(&self, field: FieldData) -> Field; - - #[salsa::interned] - fn intern_class(&self, class: ClassData) -> Class; -} diff --git a/examples/compiler/main.rs b/examples/compiler/main.rs deleted file mode 100644 index a51a63f4a..000000000 --- a/examples/compiler/main.rs +++ /dev/null @@ -1,40 +0,0 @@ -use std::sync::Arc; - -mod compiler; -mod implementation; -mod interner; -mod values; - -use self::compiler::Compiler; -use self::implementation::DatabaseImpl; -use self::interner::Interner; - -static INPUT_STR: &str = r#" -lorem,ipsum -dolor,sit,amet, -consectetur,adipiscing,elit -"#; - -#[test] -fn test() { - let mut db = DatabaseImpl::default(); - - db.set_input_string(Arc::new(INPUT_STR.to_owned())); - - let all_fields = db.all_fields(); - assert_eq!( - format!("{:?}", all_fields), - "[Field(0), Field(1), Field(2), Field(3), Field(4), Field(5), Field(6), Field(7)]" - ); -} - -fn main() { - let mut db = DatabaseImpl::default(); - - db.set_input_string(Arc::new(INPUT_STR.to_owned())); - - for field in db.all_fields().iter() { - let field_data = db.lookup_intern_field(*field); - println!("{:?} => {:?}", field, field_data); - } -} diff --git a/examples/compiler/values.rs b/examples/compiler/values.rs deleted file mode 100644 index 7cd7c853b..000000000 --- a/examples/compiler/values.rs +++ /dev/null @@ -1,35 +0,0 @@ -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -pub struct ClassData { - pub fields: Vec, -} - -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -pub struct Class(salsa::InternId); - -impl salsa::InternKey for Class { - fn from_intern_id(id: salsa::InternId) -> Self { - Self(id) - } - - fn as_intern_id(&self) -> salsa::InternId { - self.0 - } -} - -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -pub struct FieldData { - pub name: String, -} - -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -pub struct Field(salsa::InternId); - -impl salsa::InternKey for Field { - fn from_intern_id(id: salsa::InternId) -> Self { - Self(id) - } - - fn as_intern_id(&self) -> salsa::InternId { - self.0 - } -} diff --git a/examples/hello_world/main.rs b/examples/hello_world/main.rs deleted file mode 100644 index a3edfc5d2..000000000 --- a/examples/hello_world/main.rs +++ /dev/null @@ -1,104 +0,0 @@ -use std::sync::Arc; - -/////////////////////////////////////////////////////////////////////////// -// Step 1. Define the query group - -// A **query group** is a collection of queries (both inputs and -// functions) that are defined in one particular spot. Each query -// group is defined by a trait decorated with the -// `#[salsa::query_group]` attribute. The trait defines one method per -// query, with the arguments to the method being the query **keys** and -// the return value being the query's **value**. -// -// Along with the trait, each query group has an associated -// "storage struct". The name of this struct is specified in the `query_group` -// attribute -- for a query group `Foo`, it is conventionally `FooStorage`. -// -// When we define the final database (see below), we will list out the -// storage structs for each query group that it contains. The database -// will then automatically implement the traits. -// -// Note that one query group can "include" another by listing the -// trait for that query group as a supertrait. -// ANCHOR:trait -#[salsa::query_group(HelloWorldStorage)] -trait HelloWorld { - // For each query, we give the name, some input keys (here, we - // have one key, `()`) and the output type `Arc`. We can - // use attributes to give other configuration: - // - // - `salsa::input` indicates that this is an "input" to the system, - // which must be explicitly set. The `salsa::query_group` method - // will autogenerate a `set_input_string` method that can be - // used to set the input. - #[salsa::input] - fn input_string(&self, key: ()) -> Arc; - - // This is a *derived query*, meaning its value is specified by - // a function (see Step 2, below). - fn length(&self, key: ()) -> usize; -} -// ANCHOR_END:trait - -/////////////////////////////////////////////////////////////////////////// -// Step 2. Define the queries. - -// Define the **function** for the `length` query. This function will -// be called whenever the query's value must be recomputed. After it -// is called once, its result is typically memoized, unless we think -// that one of the inputs may have changed. Its first argument (`db`) -// is the "database". This is always a `&dyn` version of the query group -// trait, so that you can invoke all the queries you know about. -// We never know the concrete type here, as the full database may contain -// methods from other query groups that we don't know about. -fn length(db: &dyn HelloWorld, (): ()) -> usize { - // Read the input string: - let input_string = db.input_string(()); - - // Return its length: - input_string.len() -} - -/////////////////////////////////////////////////////////////////////////// -// Step 3. Define the database struct - -// Define the actual database struct. This struct needs to be annotated with -// `#[salsa::database(..)]`. The list `..` will be the paths leading to the -// storage structs for each query group that this database supports. This -// attribute macro will generate the necessary impls so that the database -// implements the corresponding traits as well (so, here, `DatabaseStruct` will -// implement the `HelloWorld` trait). -// -// The database struct must have a field `storage: salsa::Storage`, but it -// can have any number of additional fields beyond that. The -// `#[salsa::database]` macro will generate glue code that accesses this -// `storage` field (but other fields are ignored). The `Storage` type -// contains all the actual hashtables and the like used to store query results -// and dependency information. -// -// In addition to including the `storage` field, you must also implement the -// `salsa::Database` trait (as shown below). This gives you a chance to define -// the callback methods within if you want to (in this example, we don't). -// ANCHOR:database -#[salsa::database(HelloWorldStorage)] -#[derive(Default)] -struct DatabaseStruct { - storage: salsa::Storage, -} - -impl salsa::Database for DatabaseStruct {} -// ANCHOR_END:database - -// This shows how to use a query. -fn main() { - let mut db = DatabaseStruct::default(); - - // You cannot access input_string yet, because it does not have a - // value. If you do, it will panic. You could create an Option - // interface by maintaining a HashSet of inserted keys. - // println!("Initially, the length is {}.", db.length(())); - - db.set_input_string((), Arc::new("Hello, world".to_string())); - - println!("Now, the length is {}.", db.length(())); -} diff --git a/examples/selection/main.rs b/examples/selection/main.rs deleted file mode 100644 index 004eb47d0..000000000 --- a/examples/selection/main.rs +++ /dev/null @@ -1,36 +0,0 @@ -/// Sources for the [selection pattern chapter][c] of the salsa book. -/// -/// [c]: https://salsa-rs.github.io/salsa/common_patterns/selection.html - -// ANCHOR: request -#[derive(Clone, Debug, PartialEq, Eq)] -struct ParsedResult { - header: Vec, - body: String, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -struct ParsedHeader { - key: String, - value: String, -} - -#[salsa::query_group(Request)] -trait RequestParser { - /// The base text of the request. - #[salsa::input] - fn request_text(&self) -> String; - - /// The parsed form of the request. - fn parse(&self) -> ParsedResult; -} -// ANCHOR_END: request - -fn parse(_db: &dyn RequestParser) -> ParsedResult { - panic!() -} - -mod util1; -mod util2; - -fn main() {} diff --git a/examples/selection/util1.rs b/examples/selection/util1.rs deleted file mode 100644 index bd5ce4ac4..000000000 --- a/examples/selection/util1.rs +++ /dev/null @@ -1,16 +0,0 @@ -use super::*; - -// ANCHOR: util1 -#[salsa::query_group(Request)] -trait RequestUtil: RequestParser { - fn content_type(&self) -> Option; -} - -fn content_type(db: &dyn RequestUtil) -> Option { - db.parse() - .header - .iter() - .find(|header| header.key == "content-type") - .map(|header| header.value.clone()) -} -// ANCHOR_END: util1 diff --git a/examples/selection/util2.rs b/examples/selection/util2.rs deleted file mode 100644 index 3ac8b0e75..000000000 --- a/examples/selection/util2.rs +++ /dev/null @@ -1,20 +0,0 @@ -use super::*; - -// ANCHOR: util2 -#[salsa::query_group(Request)] -trait RequestUtil: RequestParser { - fn header(&self) -> Vec; - fn content_type(&self) -> Option; -} - -fn header(db: &dyn RequestUtil) -> Vec { - db.parse().header -} - -fn content_type(db: &dyn RequestUtil) -> Option { - db.header() - .iter() - .find(|header| header.key == "content-type") - .map(|header| header.value.clone()) -} -// ANCHOR_END: util2 diff --git a/src/debug.rs b/src/debug.rs deleted file mode 100644 index 0925ddb3d..000000000 --- a/src/debug.rs +++ /dev/null @@ -1,66 +0,0 @@ -//! Debugging APIs: these are meant for use when unit-testing or -//! debugging your application but aren't ordinarily needed. - -use crate::durability::Durability; -use crate::plumbing::QueryStorageOps; -use crate::Query; -use crate::QueryTable; -use std::iter::FromIterator; - -/// Additional methods on queries that can be used to "peek into" -/// their current state. These methods are meant for debugging and -/// observing the effects of garbage collection etc. -pub trait DebugQueryTable { - /// Key of this query. - type Key; - - /// Value of this query. - type Value; - - /// Returns a lower bound on the durability for the given key. - /// This is typically the minimum durability of all values that - /// the query accessed, but we may return a lower durability in - /// some cases. - fn durability(&self, key: Self::Key) -> Durability; - - /// Get the (current) set of the entries in the query table. - fn entries(&self) -> C - where - C: FromIterator>; -} - -/// An entry from a query table, for debugging and inspecting the table state. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] -#[non_exhaustive] -pub struct TableEntry { - /// key of the query - pub key: K, - /// value of the query, if it is stored - pub value: Option, -} - -impl TableEntry { - pub(crate) fn new(key: K, value: Option) -> TableEntry { - TableEntry { key, value } - } -} - -impl DebugQueryTable for QueryTable<'_, Q> -where - Q: Query, - Q::Storage: QueryStorageOps, -{ - type Key = Q::Key; - type Value = Q::Value; - - fn durability(&self, key: Q::Key) -> Durability { - self.storage.durability(self.db, &key) - } - - fn entries(&self) -> C - where - C: FromIterator>, - { - self.storage.entries(self.db) - } -} diff --git a/src/derived.rs b/src/derived.rs deleted file mode 100644 index e9d9f9024..000000000 --- a/src/derived.rs +++ /dev/null @@ -1,241 +0,0 @@ -use crate::debug::TableEntry; -use crate::durability::Durability; -use crate::plumbing::DerivedQueryStorageOps; -use crate::plumbing::LruQueryStorageOps; -use crate::plumbing::QueryFunction; -use crate::plumbing::QueryStorageMassOps; -use crate::plumbing::QueryStorageOps; -use crate::runtime::local_state::QueryInputs; -use crate::runtime::local_state::QueryRevisions; -use crate::Runtime; -use crate::{Database, DatabaseKeyIndex, QueryDb, Revision}; -use std::borrow::Borrow; -use std::hash::Hash; -use std::marker::PhantomData; - -mod execute; -mod fetch; -mod key_to_key_index; -mod lru; -mod maybe_changed_after; -mod memo; -mod sync; - -//mod slot; -//use slot::Slot; - -/// Memoized queries store the result plus a list of the other queries -/// that they invoked. This means we can avoid recomputing them when -/// none of those inputs have changed. -pub type MemoizedStorage = DerivedStorage; - -/// "Dependency" queries just track their dependencies and not the -/// actual value (which they produce on demand). This lessens the -/// storage requirements. -pub type DependencyStorage = DerivedStorage; - -/// Handles storage where the value is 'derived' by executing a -/// function (in contrast to "inputs"). -pub struct DerivedStorage -where - Q: QueryFunction, - MP: MemoizationPolicy, -{ - group_index: u16, - lru: lru::Lru, - key_map: key_to_key_index::KeyToKeyIndex, - memo_map: memo::MemoMap, - sync_map: sync::SyncMap, - policy: PhantomData, -} - -type DerivedKeyIndex = u32; - -impl std::panic::RefUnwindSafe for DerivedStorage -where - Q: QueryFunction, - MP: MemoizationPolicy, - Q::Key: std::panic::RefUnwindSafe, - Q::Value: std::panic::RefUnwindSafe, -{ -} - -pub trait MemoizationPolicy: Send + Sync -where - Q: QueryFunction, -{ - fn should_memoize_value(key: &Q::Key) -> bool; - - fn memoized_value_eq(old_value: &Q::Value, new_value: &Q::Value) -> bool; -} - -pub enum AlwaysMemoizeValue {} -impl MemoizationPolicy for AlwaysMemoizeValue -where - Q: QueryFunction, - Q::Value: Eq, -{ - fn should_memoize_value(_key: &Q::Key) -> bool { - true - } - - fn memoized_value_eq(old_value: &Q::Value, new_value: &Q::Value) -> bool { - old_value == new_value - } -} - -pub enum NeverMemoizeValue {} -impl MemoizationPolicy for NeverMemoizeValue -where - Q: QueryFunction, -{ - fn should_memoize_value(_key: &Q::Key) -> bool { - false - } - - fn memoized_value_eq(_old_value: &Q::Value, _new_value: &Q::Value) -> bool { - panic!("cannot reach since we never memoize") - } -} - -impl DerivedStorage -where - Q: QueryFunction, - MP: MemoizationPolicy, -{ - fn database_key_index(&self, key_index: DerivedKeyIndex) -> DatabaseKeyIndex { - DatabaseKeyIndex { - group_index: self.group_index, - query_index: Q::QUERY_INDEX, - key_index, - } - } - - fn assert_our_key_index(&self, index: DatabaseKeyIndex) { - assert_eq!(index.group_index, self.group_index); - assert_eq!(index.query_index, Q::QUERY_INDEX); - } - - fn key_index(&self, index: DatabaseKeyIndex) -> DerivedKeyIndex { - self.assert_our_key_index(index); - index.key_index - } -} - -impl QueryStorageOps for DerivedStorage -where - Q: QueryFunction, - MP: MemoizationPolicy, -{ - const CYCLE_STRATEGY: crate::plumbing::CycleRecoveryStrategy = Q::CYCLE_STRATEGY; - - fn new(group_index: u16) -> Self { - DerivedStorage { - group_index, - lru: Default::default(), - key_map: Default::default(), - memo_map: Default::default(), - sync_map: Default::default(), - policy: PhantomData, - } - } - - fn fmt_index( - &self, - _db: &>::DynDb, - index: DatabaseKeyIndex, - fmt: &mut std::fmt::Formatter<'_>, - ) -> std::fmt::Result { - let key_index = self.key_index(index); - let key = self.key_map.key_for_key_index(key_index); - write!(fmt, "{}({:?})", Q::QUERY_NAME, key) - } - - fn maybe_changed_after( - &self, - db: &>::DynDb, - database_key_index: DatabaseKeyIndex, - revision: Revision, - ) -> bool { - debug_assert!(revision < db.salsa_runtime().current_revision()); - let key_index = self.key_index(database_key_index); - self.maybe_changed_after(db, key_index, revision) - } - - fn fetch(&self, db: &>::DynDb, key: &Q::Key) -> Q::Value { - let key_index = self.key_map.key_index_for_key(key); - self.fetch(db, key_index) - } - - fn durability(&self, _db: &>::DynDb, key: &Q::Key) -> Durability { - let key_index = self.key_map.key_index_for_key(key); - if let Some(memo) = self.memo_map.get(key_index) { - memo.revisions.durability - } else { - Durability::LOW - } - } - - fn entries(&self, _db: &>::DynDb) -> C - where - C: std::iter::FromIterator>, - { - self.memo_map - .iter() - .map(|(key_index, memo)| { - let key = self.key_map.key_for_key_index(key_index); - TableEntry::new(key, memo.value.clone()) - }) - .collect() - } -} - -impl QueryStorageMassOps for DerivedStorage -where - Q: QueryFunction, - MP: MemoizationPolicy, -{ - fn purge(&self) { - self.lru.set_capacity(0); - self.memo_map.clear(); - } -} - -impl LruQueryStorageOps for DerivedStorage -where - Q: QueryFunction, - MP: MemoizationPolicy, -{ - fn set_lru_capacity(&self, new_capacity: usize) { - self.lru.set_capacity(new_capacity); - } -} - -impl DerivedQueryStorageOps for DerivedStorage -where - Q: QueryFunction, - MP: MemoizationPolicy, -{ - fn invalidate(&self, runtime: &mut Runtime, key: &S) - where - S: Eq + Hash, - Q::Key: Borrow, - { - runtime.with_incremented_revision(|new_revision| { - let key_index = self.key_map.existing_key_index_for_key(key)?; - let memo = self.memo_map.get(key_index)?; - let invalidated_revisions = QueryRevisions { - changed_at: new_revision, - durability: memo.revisions.durability, - inputs: QueryInputs::Untracked, - }; - let new_memo = memo::Memo::new( - memo.value.clone(), - memo.verified_at.load(), - invalidated_revisions, - ); - self.memo_map.insert(key_index, new_memo); - Some(memo.revisions.durability) - }) - } -} diff --git a/src/derived/execute.rs b/src/derived/execute.rs deleted file mode 100644 index 229a565d3..000000000 --- a/src/derived/execute.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::sync::Arc; - -use crate::{ - plumbing::QueryFunction, - runtime::{local_state::ActiveQueryGuard, StampedValue}, - Cycle, Database, Event, EventKind, QueryDb, -}; - -use super::{memo::Memo, DerivedStorage, MemoizationPolicy}; - -impl DerivedStorage -where - Q: QueryFunction, - MP: MemoizationPolicy, -{ - /// Executes the query function for the given `active_query`. Creates and stores - /// a new memo with the result, backdated if possible. Once this completes, - /// the query will have been popped off the active query stack. - /// - /// # Parameters - /// - /// * `db`, the database. - /// * `active_query`, the active stack frame for the query to execute. - /// * `opt_old_memo`, the older memo, if any existed. Used for backdated. - pub(super) fn execute( - &self, - db: &>::DynDb, - active_query: ActiveQueryGuard<'_>, - opt_old_memo: Option>>, - ) -> StampedValue { - let runtime = db.salsa_runtime(); - let revision_now = runtime.current_revision(); - let database_key_index = active_query.database_key_index; - - log::info!("{:?}: executing query", database_key_index.debug(db)); - - db.salsa_event(Event { - runtime_id: db.salsa_runtime().id(), - kind: EventKind::WillExecute { - database_key: database_key_index, - }, - }); - - // Query was not previously executed, or value is potentially - // stale, or value is absent. Let's execute! - let database_key_index = active_query.database_key_index; - let key_index = database_key_index.key_index; - let key = self.key_map.key_for_key_index(key_index); - let value = match Cycle::catch(|| Q::execute(db, key.clone())) { - Ok(v) => v, - Err(cycle) => { - log::debug!( - "{:?}: caught cycle {:?}, have strategy {:?}", - database_key_index.debug(db), - cycle, - Q::CYCLE_STRATEGY, - ); - match Q::CYCLE_STRATEGY { - crate::plumbing::CycleRecoveryStrategy::Panic => cycle.throw(), - crate::plumbing::CycleRecoveryStrategy::Fallback => { - if let Some(c) = active_query.take_cycle() { - assert!(c.is(&cycle)); - Q::cycle_fallback(db, &cycle, &key) - } else { - // we are not a participant in this cycle - debug_assert!(!cycle - .participant_keys() - .any(|k| k == database_key_index)); - cycle.throw() - } - } - } - } - }; - let mut revisions = active_query.pop(); - - // We assume that query is side-effect free -- that is, does - // not mutate the "inputs" to the query system. Sanity check - // that assumption here, at least to the best of our ability. - assert_eq!( - runtime.current_revision(), - revision_now, - "revision altered during query execution", - ); - - // If the new value is equal to the old one, then it didn't - // really change, even if some of its inputs have. So we can - // "backdate" its `changed_at` revision to be the same as the - // old value. - if let Some(old_memo) = &opt_old_memo { - if let Some(old_value) = &old_memo.value { - // Careful: if the value became less durable than it - // used to be, that is a "breaking change" that our - // consumers must be aware of. Becoming *more* durable - // is not. See the test `constant_to_non_constant`. - if revisions.durability >= old_memo.revisions.durability - && MP::memoized_value_eq(old_value, &value) - { - log::debug!( - "{:?}: read_upgrade: value is equal, back-dating to {:?}", - database_key_index.debug(db), - old_memo.revisions.changed_at, - ); - - assert!(old_memo.revisions.changed_at <= revisions.changed_at); - revisions.changed_at = old_memo.revisions.changed_at; - } - } - } - - let stamped_value = revisions.stamped_value(value); - - log::debug!( - "{:?}: read_upgrade: result.revisions = {:#?}", - database_key_index.debug(db), - revisions - ); - - self.memo_map.insert( - key_index, - Memo::new( - if MP::should_memoize_value(&key) { - Some(stamped_value.value.clone()) - } else { - None - }, - revision_now, - revisions, - ), - ); - - stamped_value - } -} diff --git a/src/derived/fetch.rs b/src/derived/fetch.rs deleted file mode 100644 index bafc2ffe1..000000000 --- a/src/derived/fetch.rs +++ /dev/null @@ -1,109 +0,0 @@ -use arc_swap::Guard; - -use crate::{ - plumbing::{DatabaseOps, QueryFunction}, - runtime::StampedValue, - Database, QueryDb, -}; - -use super::{DerivedKeyIndex, DerivedStorage, MemoizationPolicy}; - -impl DerivedStorage -where - Q: QueryFunction, - MP: MemoizationPolicy, -{ - #[inline] - pub(super) fn fetch( - &self, - db: &>::DynDb, - key_index: DerivedKeyIndex, - ) -> Q::Value { - db.unwind_if_cancelled(); - - let StampedValue { - value, - durability, - changed_at, - } = self.compute_value(db, key_index); - - if let Some(evicted) = self.lru.record_use(key_index) { - self.evict(evicted); - } - - db.salsa_runtime() - .report_query_read_and_unwind_if_cycle_resulted( - self.database_key_index(key_index), - durability, - changed_at, - ); - - value - } - - #[inline] - fn compute_value( - &self, - db: &>::DynDb, - key_index: DerivedKeyIndex, - ) -> StampedValue { - loop { - if let Some(value) = self - .fetch_hot(db, key_index) - .or_else(|| self.fetch_cold(db, key_index)) - { - return value; - } - } - } - - #[inline] - fn fetch_hot( - &self, - db: &>::DynDb, - key_index: DerivedKeyIndex, - ) -> Option> { - let memo_guard = self.memo_map.get(key_index); - if let Some(memo) = &memo_guard { - if let Some(value) = &memo.value { - let runtime = db.salsa_runtime(); - if self.shallow_verify_memo(db, runtime, self.database_key_index(key_index), memo) { - return Some(memo.revisions.stamped_value(value.clone())); - } - } - } - None - } - - fn fetch_cold( - &self, - db: &>::DynDb, - key_index: DerivedKeyIndex, - ) -> Option> { - let runtime = db.salsa_runtime(); - let database_key_index = self.database_key_index(key_index); - - // Try to claim this query: if someone else has claimed it already, go back and start again. - let _claim_guard = self.sync_map.claim(db.ops_database(), database_key_index)?; - - // Push the query on the stack. - let active_query = runtime.push_query(database_key_index); - - // Now that we've claimed the item, check again to see if there's a "hot" value. - // This time we can do a *deep* verify. Because this can recurse, don't hold the arcswap guard. - let opt_old_memo = self.memo_map.get(key_index).map(Guard::into_inner); - if let Some(old_memo) = &opt_old_memo { - if let Some(value) = &old_memo.value { - if self.deep_verify_memo(db, old_memo, &active_query) { - return Some(old_memo.revisions.stamped_value(value.clone())); - } - } - } - - Some(self.execute(db, active_query, opt_old_memo)) - } - - fn evict(&self, key_index: DerivedKeyIndex) { - self.memo_map.evict(key_index); - } -} diff --git a/src/derived/key_to_key_index.rs b/src/derived/key_to_key_index.rs deleted file mode 100644 index 107d8c4b4..000000000 --- a/src/derived/key_to_key_index.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crossbeam_utils::atomic::AtomicCell; -use std::borrow::Borrow; -use std::hash::Hash; - -use crate::hash::FxDashMap; - -use super::DerivedKeyIndex; - -pub(super) struct KeyToKeyIndex { - index_map: FxDashMap, - key_map: FxDashMap, - indices: AtomicCell, -} - -impl Default for KeyToKeyIndex -where - K: Hash + Eq, -{ - fn default() -> Self { - Self { - index_map: Default::default(), - key_map: Default::default(), - indices: Default::default(), - } - } -} - -impl KeyToKeyIndex -where - K: Hash + Eq + Clone, -{ - pub(super) fn key_index_for_key(&self, key: &K) -> DerivedKeyIndex { - // Common case: get an existing key - if let Some(v) = self.index_map.get(key) { - return *v; - } - - // Less common case: (potentially) create a new slot - *self.index_map.entry(key.clone()).or_insert_with(|| { - let key_index = self.indices.fetch_add(1); - self.key_map.insert(key_index, key.clone()); - key_index - }) - } - - pub(super) fn existing_key_index_for_key(&self, key: &S) -> Option - where - S: Eq + Hash, - K: Borrow, - { - // Common case: get an existing key - self.index_map.get(key).map(|v| *v) - } - - pub(super) fn key_for_key_index(&self, key_index: DerivedKeyIndex) -> K { - self.key_map.get(&key_index).unwrap().clone() - } -} diff --git a/src/derived/lru.rs b/src/derived/lru.rs deleted file mode 100644 index 4a9af3352..000000000 --- a/src/derived/lru.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::hash::FxLinkedHashSet; - -use super::DerivedKeyIndex; -use crossbeam_utils::atomic::AtomicCell; -use parking_lot::Mutex; - -#[derive(Default)] -pub(super) struct Lru { - capacity: AtomicCell, - set: Mutex>, -} - -impl Lru { - pub(super) fn record_use(&self, index: DerivedKeyIndex) -> Option { - let capacity = self.capacity.load(); - - if capacity == 0 { - // LRU is disabled - return None; - } - - let mut set = self.set.lock(); - set.insert(index); - if set.len() > capacity { - return set.pop_front(); - } - - None - } - - pub(super) fn set_capacity(&self, capacity: usize) { - self.capacity.store(capacity); - - if capacity == 0 { - let mut set = self.set.lock(); - *set = FxLinkedHashSet::default(); - } - } -} diff --git a/src/derived/maybe_changed_after.rs b/src/derived/maybe_changed_after.rs deleted file mode 100644 index 7e7253de5..000000000 --- a/src/derived/maybe_changed_after.rs +++ /dev/null @@ -1,181 +0,0 @@ -use arc_swap::Guard; - -use crate::{ - plumbing::{DatabaseOps, QueryFunction}, - runtime::{ - local_state::{ActiveQueryGuard, QueryInputs}, - StampedValue, - }, - Database, DatabaseKeyIndex, QueryDb, Revision, Runtime, -}; - -use super::{memo::Memo, DerivedKeyIndex, DerivedStorage, MemoizationPolicy}; - -impl DerivedStorage -where - Q: QueryFunction, - MP: MemoizationPolicy, -{ - pub(super) fn maybe_changed_after( - &self, - db: &>::DynDb, - key_index: DerivedKeyIndex, - revision: Revision, - ) -> bool { - db.unwind_if_cancelled(); - - loop { - let runtime = db.salsa_runtime(); - let database_key_index = self.database_key_index(key_index); - - log::debug!( - "{:?}: maybe_changed_after(revision = {:?})", - database_key_index.debug(db), - revision, - ); - - // Check if we have a verified version: this is the hot path. - let memo_guard = self.memo_map.get(key_index); - if let Some(memo) = &memo_guard { - if self.shallow_verify_memo(db, runtime, database_key_index, memo) { - return memo.revisions.changed_at > revision; - } - drop(memo_guard); // release the arc-swap guard before cold path - if let Some(mcs) = self.maybe_changed_after_cold(db, key_index, revision) { - return mcs; - } else { - // We failed to claim, have to retry. - } - } else { - // No memo? Assume has changed. - return true; - } - } - } - - fn maybe_changed_after_cold( - &self, - db: &>::DynDb, - key_index: DerivedKeyIndex, - revision: Revision, - ) -> Option { - let runtime = db.salsa_runtime(); - let database_key_index = self.database_key_index(key_index); - - let _claim_guard = self.sync_map.claim(db.ops_database(), database_key_index)?; - let active_query = runtime.push_query(database_key_index); - - // Load the current memo, if any. Use a real arc, not an arc-swap guard, - // since we may recurse. - let old_memo = match self.memo_map.get(key_index) { - Some(m) => Guard::into_inner(m), - None => return Some(true), - }; - - log::debug!( - "{:?}: maybe_changed_after_cold, successful claim, revision = {:?}, old_memo = {:#?}", - database_key_index.debug(db), - revision, - old_memo - ); - - // Check if the inputs are still valid and we can just compare `changed_at`. - if self.deep_verify_memo(db, &old_memo, &active_query) { - return Some(old_memo.revisions.changed_at > revision); - } - - // If inputs have changed, but we have an old value, we can re-execute. - // It is possible the result will be equal to the old value and hence - // backdated. In that case, although we will have computed a new memo, - // the value has not logically changed. - if old_memo.value.is_some() { - let StampedValue { changed_at, .. } = self.execute(db, active_query, Some(old_memo)); - return Some(changed_at > revision); - } - - // Otherwise, nothing for it: have to consider the value to have changed. - Some(true) - } - - /// True if the memo's value and `changed_at` time is still valid in this revision. - /// Does only a shallow O(1) check, doesn't walk the dependencies. - #[inline] - pub(super) fn shallow_verify_memo( - &self, - db: &>::DynDb, - runtime: &Runtime, - database_key_index: DatabaseKeyIndex, - memo: &Memo, - ) -> bool { - let verified_at = memo.verified_at.load(); - let revision_now = runtime.current_revision(); - - log::debug!( - "{:?}: shallow_verify_memo(memo = {:#?})", - database_key_index.debug(db), - memo, - ); - - if verified_at == revision_now { - // Already verified. - return true; - } - - if memo.check_durability(runtime) { - // No input of the suitable durability has changed since last verified. - memo.mark_as_verified(db.ops_database(), runtime, database_key_index); - return true; - } - - false - } - - /// True if the memo's value and `changed_at` time is up to date in the current - /// revision. When this returns true, it also updates the memo's `verified_at` - /// field if needed to make future calls cheaper. - /// - /// Takes an [`ActiveQueryGuard`] argument because this function recursively - /// walks dependencies of `old_memo` and may even execute them to see if their - /// outputs have changed. As that could lead to cycles, it is important that the - /// query is on the stack. - pub(super) fn deep_verify_memo( - &self, - db: &>::DynDb, - old_memo: &Memo, - active_query: &ActiveQueryGuard<'_>, - ) -> bool { - let runtime = db.salsa_runtime(); - let database_key_index = active_query.database_key_index; - - log::debug!( - "{:?}: deep_verify_memo(old_memo = {:#?})", - database_key_index.debug(db), - old_memo - ); - - if self.shallow_verify_memo(db, runtime, database_key_index, old_memo) { - return true; - } - - match &old_memo.revisions.inputs { - QueryInputs::Untracked => { - // Untracked inputs? Have to assume that it changed. - return false; - } - QueryInputs::NoInputs => { - // No inputs, cannot have changed. - } - QueryInputs::Tracked { inputs } => { - let last_verified_at = old_memo.verified_at.load(); - for &input in inputs.iter() { - if db.maybe_changed_after(input, last_verified_at) { - return false; - } - } - } - } - - old_memo.mark_as_verified(db.ops_database(), runtime, database_key_index); - true - } -} diff --git a/src/derived/memo.rs b/src/derived/memo.rs deleted file mode 100644 index 155f666fe..000000000 --- a/src/derived/memo.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::sync::Arc; - -use arc_swap::{ArcSwap, Guard}; -use crossbeam_utils::atomic::AtomicCell; -use dashmap::mapref::entry::Entry; - -use crate::{ - hash::FxDashMap, - runtime::local_state::{QueryInputs, QueryRevisions}, - DatabaseKeyIndex, Event, EventKind, Revision, Runtime, -}; - -use super::DerivedKeyIndex; - -pub(super) struct MemoMap { - map: FxDashMap>>, -} - -impl Default for MemoMap { - fn default() -> Self { - Self { - map: Default::default(), - } - } -} - -impl MemoMap { - /// Inserts the memo for the given key; (atomically) overwrites any previously existing memo.- - pub(super) fn insert(&self, key: DerivedKeyIndex, memo: Memo) { - self.map.insert(key, ArcSwap::from(Arc::new(memo))); - } - - /// Evicts the existing memo for the given key, replacing it - /// with an equivalent memo that has no value. If the memo - /// has untracked inputs, this has no effect. - pub(super) fn evict(&self, key: DerivedKeyIndex) { - // Nit: this function embodies a touch more "business logic" - // than I would like (specifically the check about "query-input::untracked"), - // but I can't see a clean way to encapsulate it otherwise. I suppose - // it could take a closure, but it seems silly. - match self.map.entry(key) { - Entry::Vacant(_) => (), - Entry::Occupied(entry) => { - let memo = entry.get().load(); - - // Careful: we can't evict memos with untracked inputs - // as their values cannot be reconstructed. - if let QueryInputs::Untracked = memo.revisions.inputs { - return; - } - - let memo_evicted = Arc::new(Memo::new( - None::, - memo.verified_at.load(), - memo.revisions.clone(), - )); - - entry.get().store(memo_evicted); - } - } - } - - /// Loads the current memo for `key_index`. This does not hold any sort of - /// lock on the `memo_map` once it returns, so this memo could immediately - /// become outdated if other threads store into the `memo_map`. - pub(super) fn get(&self, key: DerivedKeyIndex) -> Option>>> { - self.map.get(&key).map(|v| v.load()) - } - - /// Iterates over the entries in the map. This holds a read lock while iteration continues. - pub(super) fn iter(&self) -> impl Iterator>)> + '_ { - self.map - .iter() - .map(move |r| (*r.key(), r.value().load_full())) - } - - /// Clears the memo of all entries. - pub(super) fn clear(&self) { - self.map.clear() - } -} - -#[derive(Debug)] -pub(super) struct Memo { - /// The result of the query, if we decide to memoize it. - pub(super) value: Option, - - /// Last revision when this memo was verified; this begins - /// as the current revision. - pub(super) verified_at: AtomicCell, - - /// Revision information - pub(super) revisions: QueryRevisions, -} - -impl Memo { - pub(super) fn new(value: Option, revision_now: Revision, revisions: QueryRevisions) -> Self { - Memo { - value, - verified_at: AtomicCell::new(revision_now), - revisions, - } - } - /// True if this memo is known not to have changed based on its durability. - pub(super) fn check_durability(&self, runtime: &Runtime) -> bool { - let last_changed = runtime.last_changed_revision(self.revisions.durability); - let verified_at = self.verified_at.load(); - log::debug!( - "check_durability(last_changed={:?} <= verified_at={:?}) = {:?}", - last_changed, - self.verified_at, - last_changed <= verified_at, - ); - last_changed <= verified_at - } - - /// Mark memo as having been verified in the `revision_now`, which should - /// be the current revision. - pub(super) fn mark_as_verified( - &self, - db: &dyn crate::Database, - runtime: &crate::Runtime, - database_key_index: DatabaseKeyIndex, - ) { - db.salsa_event(Event { - runtime_id: runtime.id(), - kind: EventKind::DidValidateMemoizedValue { - database_key: database_key_index, - }, - }); - - self.verified_at.store(runtime.current_revision()); - } -} diff --git a/src/derived/sync.rs b/src/derived/sync.rs deleted file mode 100644 index 31d9e47ca..000000000 --- a/src/derived/sync.rs +++ /dev/null @@ -1,87 +0,0 @@ -use std::sync::atomic::{AtomicBool, Ordering}; - -use crate::{hash::FxDashMap, runtime::WaitResult, Database, DatabaseKeyIndex, Runtime, RuntimeId}; - -use super::DerivedKeyIndex; - -#[derive(Default)] -pub(super) struct SyncMap { - sync_map: FxDashMap, -} - -struct SyncState { - id: RuntimeId, - - /// Set to true if any other queries are blocked, - /// waiting for this query to complete. - anyone_waiting: AtomicBool, -} - -impl SyncMap { - pub(super) fn claim<'me>( - &'me self, - db: &'me dyn Database, - database_key_index: DatabaseKeyIndex, - ) -> Option> { - let runtime = db.salsa_runtime(); - match self.sync_map.entry(database_key_index.key_index) { - dashmap::mapref::entry::Entry::Vacant(entry) => { - entry.insert(SyncState { - id: runtime.id(), - anyone_waiting: AtomicBool::new(false), - }); - Some(ClaimGuard { - database_key: database_key_index, - runtime, - sync_map: &self.sync_map, - }) - } - dashmap::mapref::entry::Entry::Occupied(entry) => { - // NB: `Ordering::Relaxed` is sufficient here, - // as there are no loads that are "gated" on this - // value. Everything that is written is also protected - // by a lock that must be acquired. The role of this - // boolean is to decide *whether* to acquire the lock, - // not to gate future atomic reads. - entry.get().anyone_waiting.store(true, Ordering::Relaxed); - let other_id = entry.get().id; - runtime.block_on_or_unwind(db, database_key_index, other_id, entry); - None - } - } - } -} - -/// Marks an active 'claim' in the synchronization map. The claim is -/// released when this value is dropped. -#[must_use] -pub(super) struct ClaimGuard<'me> { - database_key: DatabaseKeyIndex, - runtime: &'me Runtime, - sync_map: &'me FxDashMap, -} - -impl<'me> ClaimGuard<'me> { - fn remove_from_map_and_unblock_queries(&self, wait_result: WaitResult) { - let (_, SyncState { anyone_waiting, .. }) = - self.sync_map.remove(&self.database_key.key_index).unwrap(); - - // NB: `Ordering::Relaxed` is sufficient here, - // see `store` above for explanation. - if anyone_waiting.load(Ordering::Relaxed) { - self.runtime - .unblock_queries_blocked_on(self.database_key, wait_result) - } - } -} - -impl<'me> Drop for ClaimGuard<'me> { - fn drop(&mut self) { - let wait_result = if std::thread::panicking() { - WaitResult::Panicked - } else { - WaitResult::Completed - }; - self.remove_from_map_and_unblock_queries(wait_result) - } -} diff --git a/src/doctest.rs b/src/doctest.rs deleted file mode 100644 index 7bafff0d0..000000000 --- a/src/doctest.rs +++ /dev/null @@ -1,114 +0,0 @@ -#![allow(dead_code)] - -/// Test that a database with a key/value that is not `Send` will, -/// indeed, not be `Send`. -/// -/// ```compile_fail,E0277 -/// use std::rc::Rc; -/// -/// #[salsa::query_group(NoSendSyncStorage)] -/// trait NoSendSyncDatabase: salsa::Database { -/// fn no_send_sync_value(&self, key: bool) -> Rc; -/// fn no_send_sync_key(&self, key: Rc) -> bool; -/// } -/// -/// fn no_send_sync_value(_db: &dyn NoSendSyncDatabase, key: bool) -> Rc { -/// Rc::new(key) -/// } -/// -/// fn no_send_sync_key(_db: &dyn NoSendSyncDatabase, key: Rc) -> bool { -/// *key -/// } -/// -/// #[salsa::database(NoSendSyncStorage)] -/// #[derive(Default)] -/// struct DatabaseImpl { -/// storage: salsa::Storage, -/// } -/// -/// impl salsa::Database for DatabaseImpl { -/// } -/// -/// fn is_send(_: T) { } -/// -/// fn assert_send() { -/// is_send(DatabaseImpl::default()); -/// } -/// ``` -fn test_key_not_send_db_not_send() {} - -/// Test that a database with a key/value that is not `Sync` will not -/// be `Send`. -/// -/// ```compile_fail,E0277 -/// use std::rc::Rc; -/// use std::cell::Cell; -/// -/// #[salsa::query_group(NoSendSyncStorage)] -/// trait NoSendSyncDatabase: salsa::Database { -/// fn no_send_sync_value(&self, key: bool) -> Cell; -/// fn no_send_sync_key(&self, key: Cell) -> bool; -/// } -/// -/// fn no_send_sync_value(_db: &dyn NoSendSyncDatabase, key: bool) -> Cell { -/// Cell::new(key) -/// } -/// -/// fn no_send_sync_key(_db: &dyn NoSendSyncDatabase, key: Cell) -> bool { -/// *key -/// } -/// -/// #[salsa::database(NoSendSyncStorage)] -/// #[derive(Default)] -/// struct DatabaseImpl { -/// runtime: salsa::Storage, -/// } -/// -/// impl salsa::Database for DatabaseImpl { -/// } -/// -/// fn is_send(_: T) { } -/// -/// fn assert_send() { -/// is_send(DatabaseImpl::default()); -/// } -/// ``` -fn test_key_not_sync_db_not_send() {} - -/// Test that a database with a key/value that is not `Sync` will -/// not be `Sync`. -/// -/// ```compile_fail,E0277 -/// use std::cell::Cell; -/// use std::rc::Rc; -/// -/// #[salsa::query_group(NoSendSyncStorage)] -/// trait NoSendSyncDatabase: salsa::Database { -/// fn no_send_sync_value(&self, key: bool) -> Cell; -/// fn no_send_sync_key(&self, key: Cell) -> bool; -/// } -/// -/// fn no_send_sync_value(_db: &dyn NoSendSyncDatabase, key: bool) -> Cell { -/// Cell::new(key) -/// } -/// -/// fn no_send_sync_key(_db: &dyn NoSendSyncDatabase, key: Cell) -> bool { -/// *key -/// } -/// -/// #[salsa::database(NoSendSyncStorage)] -/// #[derive(Default)] -/// struct DatabaseImpl { -/// runtime: salsa::Storage, -/// } -/// -/// impl salsa::Database for DatabaseImpl { -/// } -/// -/// fn is_sync(_: T) { } -/// -/// fn assert_send() { -/// is_sync(DatabaseImpl::default()); -/// } -/// ``` -fn test_key_not_sync_db_not_sync() {} diff --git a/src/durability.rs b/src/durability.rs deleted file mode 100644 index 58a81e378..000000000 --- a/src/durability.rs +++ /dev/null @@ -1,49 +0,0 @@ -/// Describes how likely a value is to change -- how "durable" it is. -/// By default, inputs have `Durability::LOW` and interned values have -/// `Durability::HIGH`. But inputs can be explicitly set with other -/// durabilities. -/// -/// We use durabilities to optimize the work of "revalidating" a query -/// after some input has changed. Ordinarily, in a new revision, -/// queries have to trace all their inputs back to the base inputs to -/// determine if any of those inputs have changed. But if we know that -/// the only changes were to inputs of low durability (the common -/// case), and we know that the query only used inputs of medium -/// durability or higher, then we can skip that enumeration. -/// -/// Typically, one assigns low durabilites to inputs that the user is -/// frequently editing. Medium or high durabilities are used for -/// configuration, the source from library crates, or other things -/// that are unlikely to be edited. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct Durability(u8); - -impl Durability { - /// Low durability: things that change frequently. - /// - /// Example: part of the crate being edited - pub const LOW: Durability = Durability(0); - - /// Medium durability: things that change sometimes, but rarely. - /// - /// Example: a Cargo.toml file - pub const MEDIUM: Durability = Durability(1); - - /// High durability: things that are not expected to change under - /// common usage. - /// - /// Example: the standard library or something from crates.io - pub const HIGH: Durability = Durability(2); - - /// The maximum possible durability; equivalent to HIGH but - /// "conceptually" distinct (i.e., if we add more durability - /// levels, this could change). - pub(crate) const MAX: Durability = Self::HIGH; - - /// Number of durability levels. - pub(crate) const LEN: usize = 3; - - pub(crate) fn index(self) -> usize { - self.0 as usize - } -} diff --git a/src/hash.rs b/src/hash.rs deleted file mode 100644 index ec94909e2..000000000 --- a/src/hash.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub(crate) type FxHasher = std::hash::BuildHasherDefault; -pub(crate) type FxIndexSet = indexmap::IndexSet; -pub(crate) type FxIndexMap = indexmap::IndexMap; -pub(crate) type FxDashMap = dashmap::DashMap; -pub(crate) type FxLinkedHashSet = hashlink::LinkedHashSet; diff --git a/src/input.rs b/src/input.rs deleted file mode 100644 index 70996e340..000000000 --- a/src/input.rs +++ /dev/null @@ -1,337 +0,0 @@ -use crate::debug::TableEntry; -use crate::durability::Durability; -use crate::hash::FxIndexMap; -use crate::plumbing::CycleRecoveryStrategy; -use crate::plumbing::InputQueryStorageOps; -use crate::plumbing::QueryStorageMassOps; -use crate::plumbing::QueryStorageOps; -use crate::revision::Revision; -use crate::runtime::StampedValue; -use crate::Database; -use crate::Query; -use crate::Runtime; -use crate::{DatabaseKeyIndex, QueryDb}; -use indexmap::map::Entry; -use log::debug; -use parking_lot::RwLock; -use std::convert::TryFrom; - -/// Input queries store the result plus a list of the other queries -/// that they invoked. This means we can avoid recomputing them when -/// none of those inputs have changed. -pub struct InputStorage -where - Q: Query, -{ - group_index: u16, - slots: RwLock>>, -} - -struct Slot -where - Q: Query, -{ - key: Q::Key, - database_key_index: DatabaseKeyIndex, - - /// Value for this input: initially, it is `Some`. - /// - /// If it is `None`, then the value was removed - /// using `remove_input`. - /// - /// Note that the slot is *never* removed, so as to preserve - /// the `DatabaseKeyIndex` values. - /// - /// Impl note: We store an `Option>` - /// instead of a `StampedValue>` for two reasons. - /// One, it corresponds to "data never existed in the first place", - /// and two, it's more efficient, since the compiler can make - /// use of the revisions in the `StampedValue` as a niche to avoid - /// an extra word. (See the `assert_size_of` test below.) - stamped_value: RwLock>>, -} - -#[test] -fn assert_size_of() { - assert_eq!( - std::mem::size_of::>>>(), - std::mem::size_of::>>(), - ); -} - -impl std::panic::RefUnwindSafe for InputStorage -where - Q: Query, - Q::Key: std::panic::RefUnwindSafe, - Q::Value: std::panic::RefUnwindSafe, -{ -} - -impl QueryStorageOps for InputStorage -where - Q: Query, -{ - const CYCLE_STRATEGY: crate::plumbing::CycleRecoveryStrategy = CycleRecoveryStrategy::Panic; - - fn new(group_index: u16) -> Self { - InputStorage { - group_index, - slots: Default::default(), - } - } - - fn fmt_index( - &self, - _db: &>::DynDb, - index: DatabaseKeyIndex, - fmt: &mut std::fmt::Formatter<'_>, - ) -> std::fmt::Result { - assert_eq!(index.group_index, self.group_index); - assert_eq!(index.query_index, Q::QUERY_INDEX); - let slot_map = self.slots.read(); - let key = slot_map.get_index(index.key_index as usize).unwrap().0; - write!(fmt, "{}({:?})", Q::QUERY_NAME, key) - } - - fn maybe_changed_after( - &self, - db: &>::DynDb, - input: DatabaseKeyIndex, - revision: Revision, - ) -> bool { - assert_eq!(input.group_index, self.group_index); - assert_eq!(input.query_index, Q::QUERY_INDEX); - debug_assert!(revision < db.salsa_runtime().current_revision()); - let slots = self.slots.read(); - let (_, slot) = slots.get_index(input.key_index as usize).unwrap(); - slot.maybe_changed_after(db, revision) - } - - fn fetch(&self, db: &>::DynDb, key: &Q::Key) -> Q::Value { - db.unwind_if_cancelled(); - - let slots = self.slots.read(); - let slot = slots - .get(key) - .unwrap_or_else(|| panic!("no value set for {:?}({:?})", Q::default(), key)); - - let value = slot.stamped_value.read().clone(); - match value { - Some(StampedValue { - value, - durability, - changed_at, - }) => { - db.salsa_runtime() - .report_query_read_and_unwind_if_cycle_resulted( - slot.database_key_index, - durability, - changed_at, - ); - - value - } - - None => { - panic!("value removed for {:?}({:?})", Q::default(), key) - } - } - } - - fn durability(&self, _db: &>::DynDb, key: &Q::Key) -> Durability { - let slots = self.slots.read(); - match slots.get(key) { - Some(slot) => match &*slot.stamped_value.read() { - Some(v) => v.durability, - None => Durability::LOW, // removed - }, - None => panic!("no value set for {:?}({:?})", Q::default(), key), - } - } - - fn entries(&self, _db: &>::DynDb) -> C - where - C: std::iter::FromIterator>, - { - let slots = self.slots.read(); - slots - .values() - .map(|slot| { - let value = (*slot.stamped_value.read()) - .as_ref() - .map(|stamped_value| stamped_value.value.clone()); - TableEntry::new(slot.key.clone(), value) - }) - .collect() - } -} - -impl Slot -where - Q: Query, -{ - fn maybe_changed_after(&self, _db: &>::DynDb, revision: Revision) -> bool { - debug!( - "maybe_changed_after(slot={:?}, revision={:?})", - self, revision, - ); - - match &*self.stamped_value.read() { - Some(stamped_value) => { - let changed_at = stamped_value.changed_at; - - debug!("maybe_changed_after: changed_at = {:?}", changed_at); - - changed_at > revision - } - - None => { - // treat a removed input as always having changed - true - } - } - } -} - -impl QueryStorageMassOps for InputStorage -where - Q: Query, -{ - fn purge(&self) { - *self.slots.write() = Default::default(); - } -} - -impl InputQueryStorageOps for InputStorage -where - Q: Query, -{ - fn set(&self, runtime: &mut Runtime, key: &Q::Key, value: Q::Value, durability: Durability) { - log::debug!( - "{:?}({:?}) = {:?} ({:?})", - Q::default(), - key, - value, - durability - ); - - // The value is changing, so we need a new revision (*). We also - // need to update the 'last changed' revision by invoking - // `guard.mark_durability_as_changed`. - // - // CAREFUL: This will block until the global revision lock can - // be acquired. If there are still queries executing, they may - // need to read from this input. Therefore, we wait to acquire - // the lock on `map` until we also hold the global query write - // lock. - // - // (*) Technically, since you can't presently access an input - // for a non-existent key, and you can't enumerate the set of - // keys, we only need a new revision if the key used to - // exist. But we may add such methods in the future and this - // case doesn't generally seem worth optimizing for. - runtime.with_incremented_revision(|next_revision| { - let mut slots = self.slots.write(); - - // Do this *after* we acquire the lock, so that we are not - // racing with somebody else to modify this same cell. - // (Otherwise, someone else might write a *newer* revision - // into the same cell while we block on the lock.) - let stamped_value = StampedValue { - value, - durability, - changed_at: next_revision, - }; - - match slots.entry(key.clone()) { - Entry::Occupied(entry) => { - let mut slot_stamped_value = entry.get().stamped_value.write(); - match &mut *slot_stamped_value { - Some(slot_stamped_value) => { - // Modifying an existing value that has not been removed. - let old_durability = slot_stamped_value.durability; - *slot_stamped_value = stamped_value; - Some(old_durability) - } - - None => { - // Overwriting a removed value: this is the same as inserting a new value, - // it doesn't modify any existing data (the remove did that). - *slot_stamped_value = Some(stamped_value); - None - } - } - } - - Entry::Vacant(entry) => { - let key_index = u32::try_from(entry.index()).unwrap(); - let database_key_index = DatabaseKeyIndex { - group_index: self.group_index, - query_index: Q::QUERY_INDEX, - key_index, - }; - entry.insert(Slot { - key: key.clone(), - database_key_index, - stamped_value: RwLock::new(Some(stamped_value)), - }); - None - } - } - }); - } - - fn remove(&self, runtime: &mut Runtime, key: &::Key) -> ::Value { - let mut value = None; - runtime.with_incremented_revision(&mut |_| { - let mut slots = self.slots.write(); - let slot = slots.get_mut(key)?; - - if let Some(slot_stamped_value) = slot.stamped_value.get_mut().take() { - value = Some(slot_stamped_value.value); - Some(slot_stamped_value.durability) - } else { - None - } - }); - - value.unwrap_or_else(|| panic!("no value set for {:?}({:?})", Q::default(), key)) - } -} - -/// Check that `Slot: Send + Sync` as long as -/// `DB::DatabaseData: Send + Sync`, which in turn implies that -/// `Q::Key: Send + Sync`, `Q::Value: Send + Sync`. -#[allow(dead_code)] -fn check_send_sync() -where - Q: Query, - Q::Key: Send + Sync, - Q::Value: Send + Sync, -{ - fn is_send_sync() {} - is_send_sync::>(); -} - -/// Check that `Slot: 'static` as long as -/// `DB::DatabaseData: 'static`, which in turn implies that -/// `Q::Key: 'static`, `Q::Value: 'static`. -#[allow(dead_code)] -fn check_static() -where - Q: Query + 'static, - Q::Key: 'static, - Q::Value: 'static, -{ - fn is_static() {} - is_static::>(); -} - -impl std::fmt::Debug for Slot -where - Q: Query, -{ - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(fmt, "{:?}({:?})", Q::default(), self.key) - } -} diff --git a/src/intern_id.rs b/src/intern_id.rs deleted file mode 100644 index 068c89668..000000000 --- a/src/intern_id.rs +++ /dev/null @@ -1,129 +0,0 @@ -use std::fmt; -use std::num::NonZeroU32; - -/// The "raw-id" is used for interned keys in salsa -- it is basically -/// a newtype'd u32. Typically, it is wrapped in a type of your own -/// devising. For more information about interned keys, see [the -/// interned key RFC][rfc]. -/// -/// # Creating a `InternId` -// -/// InternId values can be constructed using the `From` impls, -/// which are implemented for `u32` and `usize`: -/// -/// ``` -/// # use salsa::InternId; -/// let intern_id1 = InternId::from(22_u32); -/// let intern_id2 = InternId::from(22_usize); -/// assert_eq!(intern_id1, intern_id2); -/// ``` -/// -/// # Converting to a u32 or usize -/// -/// Normally, there should be no need to access the underlying integer -/// in a `InternId`. But if you do need to do so, you can convert to a -/// `usize` using the `as_u32` or `as_usize` methods or the `From` impls. -/// -/// ``` -/// # use salsa::InternId; -/// let intern_id = InternId::from(22_u32); -/// let value = u32::from(intern_id); -/// assert_eq!(value, 22); -/// ``` -/// -/// ## Illegal values -/// -/// Be warned, however, that `InternId` values cannot be created from -/// *arbitrary* values -- in particular large values greater than -/// `InternId::MAX` will panic. Those large values are reserved so that -/// the Rust compiler can use them as sentinel values, which means -/// that (for example) `Option` is represented in a single -/// word. -/// -/// ```should_panic -/// # use salsa::InternId; -/// InternId::from(InternId::MAX); -/// ``` -/// -/// [rfc]: https://github.com/salsa-rs/salsa-rfcs/pull/2 -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct InternId { - value: NonZeroU32, -} - -impl InternId { - /// The maximum allowed `InternId`. This value can grow between - /// releases without affecting semver. - pub const MAX: u32 = 0xFFFF_FF00; - - /// Creates a new InternId. Unsafe as `value` must be less than `MAX` - /// and this is not checked in release builds. - unsafe fn new_unchecked(value: u32) -> Self { - debug_assert!(value < InternId::MAX); - InternId { - value: NonZeroU32::new_unchecked(value + 1), - } - } - - /// Convert this raw-id into a u32 value. - /// - /// ``` - /// # use salsa::InternId; - /// let intern_id = InternId::from(22_u32); - /// let value = intern_id.as_usize(); - /// assert_eq!(value, 22); - /// ``` - pub fn as_u32(self) -> u32 { - self.value.get() - 1 - } - - /// Convert this raw-id into a usize value. - /// - /// ``` - /// # use salsa::InternId; - /// let intern_id = InternId::from(22_u32); - /// let value = intern_id.as_usize(); - /// assert_eq!(value, 22); - /// ``` - pub fn as_usize(self) -> usize { - self.as_u32() as usize - } -} - -impl From for u32 { - fn from(raw: InternId) -> u32 { - raw.as_u32() - } -} - -impl From for usize { - fn from(raw: InternId) -> usize { - raw.as_usize() - } -} - -impl From for InternId { - fn from(id: u32) -> InternId { - assert!(id < InternId::MAX); - unsafe { InternId::new_unchecked(id) } - } -} - -impl From for InternId { - fn from(id: usize) -> InternId { - assert!(id < (InternId::MAX as usize)); - unsafe { InternId::new_unchecked(id as u32) } - } -} - -impl fmt::Debug for InternId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.as_usize().fmt(f) - } -} - -impl fmt::Display for InternId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.as_usize().fmt(f) - } -} diff --git a/src/interned.rs b/src/interned.rs deleted file mode 100644 index a72202516..000000000 --- a/src/interned.rs +++ /dev/null @@ -1,424 +0,0 @@ -use crate::debug::TableEntry; -use crate::durability::Durability; -use crate::intern_id::InternId; -use crate::plumbing::CycleRecoveryStrategy; -use crate::plumbing::HasQueryGroup; -use crate::plumbing::QueryStorageMassOps; -use crate::plumbing::QueryStorageOps; -use crate::revision::Revision; -use crate::Query; -use crate::{Database, DatabaseKeyIndex, QueryDb}; -use parking_lot::RwLock; -use rustc_hash::FxHashMap; -use std::collections::hash_map::Entry; -use std::convert::From; -use std::fmt::Debug; -use std::hash::Hash; -use std::sync::Arc; - -const INTERN_DURABILITY: Durability = Durability::HIGH; - -/// Handles storage where the value is 'derived' by executing a -/// function (in contrast to "inputs"). -pub struct InternedStorage -where - Q: Query, - Q::Value: InternKey, -{ - group_index: u16, - tables: RwLock>, -} - -/// Storage for the looking up interned things. -pub struct LookupInternedStorage -where - Q: Query, - Q::Key: InternKey, - Q::Value: Eq + Hash, -{ - phantom: std::marker::PhantomData<(Q::Key, IQ)>, -} - -struct InternTables { - /// Map from the key to the corresponding intern-index. - map: FxHashMap, - - /// For each valid intern-index, stores the interned value. - values: Vec>>, -} - -/// Trait implemented for the "key" that results from a -/// `#[salsa::intern]` query. This is basically meant to be a -/// "newtype"'d `u32`. -pub trait InternKey { - /// Create an instance of the intern-key from a `u32` value. - fn from_intern_id(v: InternId) -> Self; - - /// Extract the `u32` with which the intern-key was created. - fn as_intern_id(&self) -> InternId; -} - -impl InternKey for InternId { - fn from_intern_id(v: InternId) -> InternId { - v - } - - fn as_intern_id(&self) -> InternId { - *self - } -} - -#[derive(Debug)] -struct Slot { - /// Index of this slot in the list of interned values; - /// set to None if gc'd. - index: InternId, - - /// DatabaseKeyIndex for this slot. - database_key_index: DatabaseKeyIndex, - - /// Value that was interned. - value: K, - - /// When was this intern'd? - /// - /// (This informs the "changed-at" result) - interned_at: Revision, -} - -impl std::panic::RefUnwindSafe for InternedStorage -where - Q: Query, - Q::Key: std::panic::RefUnwindSafe, - Q::Value: InternKey, - Q::Value: std::panic::RefUnwindSafe, -{ -} - -impl InternTables { - /// Returns the slot for the given key. - fn slot_for_key(&self, key: &K) -> Option>> { - let index = self.map.get(key)?; - Some(self.slot_for_index(*index)) - } - - /// Returns the slot at the given index. - fn slot_for_index(&self, index: InternId) -> Arc> { - let slot = &self.values[index.as_usize()]; - slot.clone() - } -} - -impl Default for InternTables -where - K: Eq + Hash, -{ - fn default() -> Self { - Self { - map: Default::default(), - values: Default::default(), - } - } -} - -impl InternedStorage -where - Q: Query, - Q::Key: Eq + Hash + Clone, - Q::Value: InternKey, -{ - /// If `key` has already been interned, returns its slot. Otherwise, creates a new slot. - fn intern_index(&self, db: &>::DynDb, key: &Q::Key) -> Arc> { - if let Some(i) = self.intern_check(key) { - return i; - } - - let owned_key1 = key.to_owned(); - let owned_key2 = owned_key1.clone(); - let revision_now = db.salsa_runtime().current_revision(); - - let mut tables = self.tables.write(); - let tables = &mut *tables; - let entry = match tables.map.entry(owned_key1) { - Entry::Vacant(entry) => entry, - Entry::Occupied(entry) => { - // Somebody inserted this key while we were waiting - // for the write lock. In this case, we don't need to - // update the `accessed_at` field because they should - // have already done so! - let index = *entry.get(); - let slot = &tables.values[index.as_usize()]; - debug_assert_eq!(owned_key2, slot.value); - return slot.clone(); - } - }; - - let create_slot = |index: InternId| { - let database_key_index = DatabaseKeyIndex { - group_index: self.group_index, - query_index: Q::QUERY_INDEX, - key_index: index.as_u32(), - }; - Arc::new(Slot { - index, - database_key_index, - value: owned_key2, - interned_at: revision_now, - }) - }; - - let (slot, index); - index = InternId::from(tables.values.len()); - slot = create_slot(index); - tables.values.push(slot.clone()); - entry.insert(index); - - slot - } - - fn intern_check(&self, key: &Q::Key) -> Option>> { - self.tables.read().slot_for_key(key) - } - - /// Given an index, lookup and clone its value, updating the - /// `accessed_at` time if necessary. - fn lookup_value(&self, index: InternId) -> Arc> { - self.tables.read().slot_for_index(index) - } -} - -impl QueryStorageOps for InternedStorage -where - Q: Query, - Q::Value: InternKey, -{ - const CYCLE_STRATEGY: crate::plumbing::CycleRecoveryStrategy = CycleRecoveryStrategy::Panic; - - fn new(group_index: u16) -> Self { - InternedStorage { - group_index, - tables: RwLock::new(InternTables::default()), - } - } - - fn fmt_index( - &self, - _db: &>::DynDb, - index: DatabaseKeyIndex, - fmt: &mut std::fmt::Formatter<'_>, - ) -> std::fmt::Result { - assert_eq!(index.group_index, self.group_index); - assert_eq!(index.query_index, Q::QUERY_INDEX); - let intern_id = InternId::from(index.key_index); - let slot = self.lookup_value(intern_id); - write!(fmt, "{}({:?})", Q::QUERY_NAME, slot.value) - } - - fn maybe_changed_after( - &self, - db: &>::DynDb, - input: DatabaseKeyIndex, - revision: Revision, - ) -> bool { - assert_eq!(input.group_index, self.group_index); - assert_eq!(input.query_index, Q::QUERY_INDEX); - debug_assert!(revision < db.salsa_runtime().current_revision()); - let intern_id = InternId::from(input.key_index); - let slot = self.lookup_value(intern_id); - slot.maybe_changed_after(revision) - } - - fn fetch(&self, db: &>::DynDb, key: &Q::Key) -> Q::Value { - db.unwind_if_cancelled(); - let slot = self.intern_index(db, key); - let changed_at = slot.interned_at; - let index = slot.index; - db.salsa_runtime() - .report_query_read_and_unwind_if_cycle_resulted( - slot.database_key_index, - INTERN_DURABILITY, - changed_at, - ); - ::from_intern_id(index) - } - - fn durability(&self, _db: &>::DynDb, _key: &Q::Key) -> Durability { - INTERN_DURABILITY - } - - fn entries(&self, _db: &>::DynDb) -> C - where - C: std::iter::FromIterator>, - { - let tables = self.tables.read(); - tables - .map - .iter() - .map(|(key, index)| { - TableEntry::new(key.clone(), Some(::from_intern_id(*index))) - }) - .collect() - } -} - -impl QueryStorageMassOps for InternedStorage -where - Q: Query, - Q::Value: InternKey, -{ - fn purge(&self) { - *self.tables.write() = Default::default(); - } -} - -// Workaround for -// ``` -// IQ: for<'d> QueryDb< -// 'd, -// DynDb = >::DynDb, -// Group = >::Group, -// GroupStorage = >::GroupStorage, -// >, -// ``` -// not working to make rustc know DynDb, Group and GroupStorage being the same in `Q` and `IQ` -#[doc(hidden)] -pub trait EqualDynDb<'d, IQ>: QueryDb<'d> -where - IQ: QueryDb<'d>, -{ - fn convert_db(d: &Self::DynDb) -> &IQ::DynDb; - fn convert_group_storage(d: &Self::GroupStorage) -> &IQ::GroupStorage; -} - -impl<'d, IQ, Q> EqualDynDb<'d, IQ> for Q -where - Q: QueryDb<'d, DynDb = IQ::DynDb, Group = IQ::Group, GroupStorage = IQ::GroupStorage>, - Q::DynDb: HasQueryGroup, - IQ: QueryDb<'d>, -{ - fn convert_db(d: &Self::DynDb) -> &IQ::DynDb { - d - } - fn convert_group_storage(d: &Self::GroupStorage) -> &IQ::GroupStorage { - d - } -} - -impl QueryStorageOps for LookupInternedStorage -where - Q: Query, - Q::Key: InternKey, - Q::Value: Eq + Hash, - IQ: Query>, - for<'d> Q: EqualDynDb<'d, IQ>, -{ - const CYCLE_STRATEGY: CycleRecoveryStrategy = CycleRecoveryStrategy::Panic; - - fn new(_group_index: u16) -> Self { - LookupInternedStorage { - phantom: std::marker::PhantomData, - } - } - - fn fmt_index( - &self, - db: &>::DynDb, - index: DatabaseKeyIndex, - fmt: &mut std::fmt::Formatter<'_>, - ) -> std::fmt::Result { - let group_storage = - <>::DynDb as HasQueryGroup>::group_storage(db); - let interned_storage = IQ::query_storage(Q::convert_group_storage(group_storage)); - interned_storage.fmt_index(Q::convert_db(db), index, fmt) - } - - fn maybe_changed_after( - &self, - db: &>::DynDb, - input: DatabaseKeyIndex, - revision: Revision, - ) -> bool { - let group_storage = - <>::DynDb as HasQueryGroup>::group_storage(db); - let interned_storage = IQ::query_storage(Q::convert_group_storage(group_storage)); - interned_storage.maybe_changed_after(Q::convert_db(db), input, revision) - } - - fn fetch(&self, db: &>::DynDb, key: &Q::Key) -> Q::Value { - let index = key.as_intern_id(); - let group_storage = - <>::DynDb as HasQueryGroup>::group_storage(db); - let interned_storage = IQ::query_storage(Q::convert_group_storage(group_storage)); - let slot = interned_storage.lookup_value(index); - let value = slot.value.clone(); - let interned_at = slot.interned_at; - db.salsa_runtime() - .report_query_read_and_unwind_if_cycle_resulted( - slot.database_key_index, - INTERN_DURABILITY, - interned_at, - ); - value - } - - fn durability(&self, _db: &>::DynDb, _key: &Q::Key) -> Durability { - INTERN_DURABILITY - } - - fn entries(&self, db: &>::DynDb) -> C - where - C: std::iter::FromIterator>, - { - let group_storage = - <>::DynDb as HasQueryGroup>::group_storage(db); - let interned_storage = IQ::query_storage(Q::convert_group_storage(group_storage)); - let tables = interned_storage.tables.read(); - tables - .map - .iter() - .map(|(key, index)| { - TableEntry::new(::from_intern_id(*index), Some(key.clone())) - }) - .collect() - } -} - -impl QueryStorageMassOps for LookupInternedStorage -where - Q: Query, - Q::Key: InternKey, - Q::Value: Eq + Hash, - IQ: Query, -{ - fn purge(&self) {} -} - -impl Slot { - fn maybe_changed_after(&self, revision: Revision) -> bool { - self.interned_at > revision - } -} - -/// Check that `Slot: Send + Sync` as long as -/// `DB::DatabaseData: Send + Sync`, which in turn implies that -/// `Q::Key: Send + Sync`, `Q::Value: Send + Sync`. -#[allow(dead_code)] -fn check_send_sync() -where - K: Send + Sync, -{ - fn is_send_sync() {} - is_send_sync::>(); -} - -/// Check that `Slot: 'static` as long as -/// `DB::DatabaseData: 'static`, which in turn implies that -/// `Q::Key: 'static`, `Q::Value: 'static`. -#[allow(dead_code)] -fn check_static() -where - K: 'static, -{ - fn is_static() {} - is_static::>(); -} diff --git a/src/lib.rs b/src/lib.rs deleted file mode 100644 index 71d2b9461..000000000 --- a/src/lib.rs +++ /dev/null @@ -1,774 +0,0 @@ -#![allow(clippy::type_complexity)] -#![allow(clippy::question_mark)] -#![warn(rust_2018_idioms)] -#![warn(missing_docs)] - -//! The salsa crate is a crate for incremental recomputation. It -//! permits you to define a "database" of queries with both inputs and -//! values derived from those inputs; as you set the inputs, you can -//! re-execute the derived queries and it will try to re-use results -//! from previous invocations as appropriate. - -mod derived; -mod doctest; -mod durability; -mod hash; -mod input; -mod intern_id; -mod interned; -mod revision; -mod runtime; -mod storage; - -pub mod debug; -/// Items in this module are public for implementation reasons, -/// and are exempt from the SemVer guarantees. -#[doc(hidden)] -pub mod plumbing; - -use crate::plumbing::CycleRecoveryStrategy; -use crate::plumbing::DerivedQueryStorageOps; -use crate::plumbing::InputQueryStorageOps; -use crate::plumbing::LruQueryStorageOps; -use crate::plumbing::QueryStorageMassOps; -use crate::plumbing::QueryStorageOps; -pub use crate::revision::Revision; -use std::fmt::{self, Debug}; -use std::hash::Hash; -use std::panic::AssertUnwindSafe; -use std::panic::{self, UnwindSafe}; -use std::sync::Arc; - -pub use crate::durability::Durability; -pub use crate::intern_id::InternId; -pub use crate::interned::InternKey; -pub use crate::runtime::Runtime; -pub use crate::runtime::RuntimeId; -pub use crate::storage::Storage; - -/// The base trait which your "query context" must implement. Gives -/// access to the salsa runtime, which you must embed into your query -/// context (along with whatever other state you may require). -pub trait Database: plumbing::DatabaseOps { - /// This function is invoked at key points in the salsa - /// runtime. It permits the database to be customized and to - /// inject logging or other custom behavior. - fn salsa_event(&self, event_fn: Event) { - #![allow(unused_variables)] - } - - /// Starts unwinding the stack if the current revision is cancelled. - /// - /// This method can be called by query implementations that perform - /// potentially expensive computations, in order to speed up propagation of - /// cancellation. - /// - /// Cancellation will automatically be triggered by salsa on any query - /// invocation. - /// - /// This method should not be overridden by `Database` implementors. A - /// `salsa_event` is emitted when this method is called, so that should be - /// used instead. - #[inline] - fn unwind_if_cancelled(&self) { - let runtime = self.salsa_runtime(); - self.salsa_event(Event { - runtime_id: runtime.id(), - kind: EventKind::WillCheckCancellation, - }); - - let current_revision = runtime.current_revision(); - let pending_revision = runtime.pending_revision(); - log::debug!( - "unwind_if_cancelled: current_revision={:?}, pending_revision={:?}", - current_revision, - pending_revision - ); - if pending_revision > current_revision { - runtime.unwind_cancelled(); - } - } - - /// Gives access to the underlying salsa runtime. - /// - /// This method should not be overridden by `Database` implementors. - fn salsa_runtime(&self) -> &Runtime { - self.ops_salsa_runtime() - } - - /// Gives access to the underlying salsa runtime. - /// - /// This method should not be overridden by `Database` implementors. - fn salsa_runtime_mut(&mut self) -> &mut Runtime { - self.ops_salsa_runtime_mut() - } -} - -/// The `Event` struct identifies various notable things that can -/// occur during salsa execution. Instances of this struct are given -/// to `salsa_event`. -pub struct Event { - /// The id of the snapshot that triggered the event. Usually - /// 1-to-1 with a thread, as well. - pub runtime_id: RuntimeId, - - /// What sort of event was it. - pub kind: EventKind, -} - -impl Event { - /// Returns a type that gives a user-readable debug output. - /// Use like `println!("{:?}", index.debug(db))`. - pub fn debug<'me, D>(&'me self, db: &'me D) -> impl std::fmt::Debug + 'me - where - D: ?Sized + plumbing::DatabaseOps, - { - EventDebug { event: self, db } - } -} - -impl fmt::Debug for Event { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Event") - .field("runtime_id", &self.runtime_id) - .field("kind", &self.kind) - .finish() - } -} - -struct EventDebug<'me, D: ?Sized> -where - D: plumbing::DatabaseOps, -{ - event: &'me Event, - db: &'me D, -} - -impl<'me, D: ?Sized> fmt::Debug for EventDebug<'me, D> -where - D: plumbing::DatabaseOps, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("Event") - .field("runtime_id", &self.event.runtime_id) - .field("kind", &self.event.kind.debug(self.db)) - .finish() - } -} - -/// An enum identifying the various kinds of events that can occur. -pub enum EventKind { - /// Occurs when we found that all inputs to a memoized value are - /// up-to-date and hence the value can be re-used without - /// executing the closure. - /// - /// Executes before the "re-used" value is returned. - DidValidateMemoizedValue { - /// The database-key for the affected value. Implements `Debug`. - database_key: DatabaseKeyIndex, - }, - - /// Indicates that another thread (with id `other_runtime_id`) is processing the - /// given query (`database_key`), so we will block until they - /// finish. - /// - /// Executes after we have registered with the other thread but - /// before they have answered us. - /// - /// (NB: you can find the `id` of the current thread via the - /// `salsa_runtime`) - WillBlockOn { - /// The id of the runtime we will block on. - other_runtime_id: RuntimeId, - - /// The database-key for the affected value. Implements `Debug`. - database_key: DatabaseKeyIndex, - }, - - /// Indicates that the function for this query will be executed. - /// This is either because it has never executed before or because - /// its inputs may be out of date. - WillExecute { - /// The database-key for the affected value. Implements `Debug`. - database_key: DatabaseKeyIndex, - }, - - /// Indicates that `unwind_if_cancelled` was called and salsa will check if - /// the current revision has been cancelled. - WillCheckCancellation, -} - -impl EventKind { - /// Returns a type that gives a user-readable debug output. - /// Use like `println!("{:?}", index.debug(db))`. - pub fn debug<'me, D>(&'me self, db: &'me D) -> impl std::fmt::Debug + 'me - where - D: ?Sized + plumbing::DatabaseOps, - { - EventKindDebug { kind: self, db } - } -} - -impl fmt::Debug for EventKind { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - EventKind::DidValidateMemoizedValue { database_key } => fmt - .debug_struct("DidValidateMemoizedValue") - .field("database_key", database_key) - .finish(), - EventKind::WillBlockOn { - other_runtime_id, - database_key, - } => fmt - .debug_struct("WillBlockOn") - .field("other_runtime_id", other_runtime_id) - .field("database_key", database_key) - .finish(), - EventKind::WillExecute { database_key } => fmt - .debug_struct("WillExecute") - .field("database_key", database_key) - .finish(), - EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(), - } - } -} - -struct EventKindDebug<'me, D: ?Sized> -where - D: plumbing::DatabaseOps, -{ - kind: &'me EventKind, - db: &'me D, -} - -impl<'me, D: ?Sized> fmt::Debug for EventKindDebug<'me, D> -where - D: plumbing::DatabaseOps, -{ - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.kind { - EventKind::DidValidateMemoizedValue { database_key } => fmt - .debug_struct("DidValidateMemoizedValue") - .field("database_key", &database_key.debug(self.db)) - .finish(), - EventKind::WillBlockOn { - other_runtime_id, - database_key, - } => fmt - .debug_struct("WillBlockOn") - .field("other_runtime_id", &other_runtime_id) - .field("database_key", &database_key.debug(self.db)) - .finish(), - EventKind::WillExecute { database_key } => fmt - .debug_struct("WillExecute") - .field("database_key", &database_key.debug(self.db)) - .finish(), - EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(), - } - } -} - -/// Indicates a database that also supports parallel query -/// evaluation. All of Salsa's base query support is capable of -/// parallel execution, but for it to work, your query key/value types -/// must also be `Send`, as must any additional data in your database. -pub trait ParallelDatabase: Database + Send { - /// Creates a second handle to the database that holds the - /// database fixed at a particular revision. So long as this - /// "frozen" handle exists, any attempt to [`set`] an input will - /// block. - /// - /// [`set`]: struct.QueryTable.html#method.set - /// - /// This is the method you are meant to use most of the time in a - /// parallel setting where modifications may arise asynchronously - /// (e.g., a language server). In this context, it is common to - /// wish to "fork off" a snapshot of the database performing some - /// series of queries in parallel and arranging the results. Using - /// this method for that purpose ensures that those queries will - /// see a consistent view of the database (it is also advisable - /// for those queries to use the [`Runtime::unwind_if_cancelled`] - /// method to check for cancellation). - /// - /// # Panics - /// - /// It is not permitted to create a snapshot from inside of a - /// query. Attepting to do so will panic. - /// - /// # Deadlock warning - /// - /// The intended pattern for snapshots is that, once created, they - /// are sent to another thread and used from there. As such, the - /// `snapshot` acquires a "read lock" on the database -- - /// therefore, so long as the `snapshot` is not dropped, any - /// attempt to `set` a value in the database will block. If the - /// `snapshot` is owned by the same thread that is attempting to - /// `set`, this will cause a problem. - /// - /// # How to implement this - /// - /// Typically, this method will create a second copy of your - /// database type (`MyDatabaseType`, in the example below), - /// cloning over each of the fields from `self` into this new - /// copy. For the field that stores the salsa runtime, you should - /// use [the `Runtime::snapshot` method][rfm] to create a snapshot of the - /// runtime. Finally, package up the result using `Snapshot::new`, - /// which is a simple wrapper type that only gives `&self` access - /// to the database within (thus preventing the use of methods - /// that may mutate the inputs): - /// - /// [rfm]: struct.Runtime.html#method.snapshot - /// - /// ```rust,ignore - /// impl ParallelDatabase for MyDatabaseType { - /// fn snapshot(&self) -> Snapshot { - /// Snapshot::new( - /// MyDatabaseType { - /// runtime: self.runtime.snapshot(self), - /// other_field: self.other_field.clone(), - /// } - /// ) - /// } - /// } - /// ``` - fn snapshot(&self) -> Snapshot; -} - -/// Simple wrapper struct that takes ownership of a database `DB` and -/// only gives `&self` access to it. See [the `snapshot` method][fm] -/// for more details. -/// -/// [fm]: trait.ParallelDatabase.html#method.snapshot -#[derive(Debug)] -pub struct Snapshot -where - DB: ParallelDatabase, -{ - db: DB, -} - -impl Snapshot -where - DB: ParallelDatabase, -{ - /// Creates a `Snapshot` that wraps the given database handle - /// `db`. From this point forward, only shared references to `db` - /// will be possible. - pub fn new(db: DB) -> Self { - Snapshot { db } - } -} - -impl std::ops::Deref for Snapshot -where - DB: ParallelDatabase, -{ - type Target = DB; - - fn deref(&self) -> &DB { - &self.db - } -} - -/// An integer that uniquely identifies a particular query instance within the -/// database. Used to track dependencies between queries. Fully ordered and -/// equatable but those orderings are arbitrary, and meant to be used only for -/// inserting into maps and the like. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -pub struct DatabaseKeyIndex { - group_index: u16, - query_index: u16, - key_index: u32, -} - -impl DatabaseKeyIndex { - /// Returns the index of the query group containing this key. - #[inline] - pub fn group_index(self) -> u16 { - self.group_index - } - - /// Returns the index of the query within its query group. - #[inline] - pub fn query_index(self) -> u16 { - self.query_index - } - - /// Returns the index of this particular query key within the query. - #[inline] - pub fn key_index(self) -> u32 { - self.key_index - } - - /// Returns a type that gives a user-readable debug output. - /// Use like `println!("{:?}", index.debug(db))`. - pub fn debug(self, db: &D) -> impl std::fmt::Debug + '_ - where - D: ?Sized + plumbing::DatabaseOps, - { - DatabaseKeyIndexDebug { index: self, db } - } -} - -/// Helper type for `DatabaseKeyIndex::debug` -struct DatabaseKeyIndexDebug<'me, D: ?Sized> -where - D: plumbing::DatabaseOps, -{ - index: DatabaseKeyIndex, - db: &'me D, -} - -impl std::fmt::Debug for DatabaseKeyIndexDebug<'_, D> -where - D: plumbing::DatabaseOps, -{ - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.db.fmt_index(self.index, fmt) - } -} - -/// Trait implements by all of the "special types" associated with -/// each of your queries. -/// -/// Base trait of `Query` that has a lifetime parameter to allow the `DynDb` to be non-'static. -pub trait QueryDb<'d>: Sized { - /// Dyn version of the associated trait for this query group. - type DynDb: ?Sized + Database + HasQueryGroup + 'd; - - /// Associate query group struct. - type Group: plumbing::QueryGroup; - - /// Generated struct that contains storage for all queries in a group. - type GroupStorage; -} - -/// Trait implements by all of the "special types" associated with -/// each of your queries. -pub trait Query: Debug + Default + Sized + for<'d> QueryDb<'d> { - /// Type that you you give as a parameter -- for queries with zero - /// or more than one input, this will be a tuple. - type Key: Clone + Debug + Hash + Eq; - - /// What value does the query return? - type Value: Clone + Debug; - - /// Internal struct storing the values for the query. - // type Storage: plumbing::QueryStorageOps; - type Storage; - - /// A unique index identifying this query within the group. - const QUERY_INDEX: u16; - - /// Name of the query method (e.g., `foo`) - const QUERY_NAME: &'static str; - - /// Exact storage for this query from the storage for its group. - fn query_storage<'a>( - group_storage: &'a >::GroupStorage, - ) -> &'a Arc; - - /// Exact storage for this query from the storage for its group. - fn query_storage_mut<'a>( - group_storage: &'a >::GroupStorage, - ) -> &'a Arc; -} - -/// Return value from [the `query` method] on `Database`. -/// Gives access to various less common operations on queries. -/// -/// [the `query` method]: trait.Database.html#method.query -pub struct QueryTable<'me, Q> -where - Q: Query, -{ - db: &'me >::DynDb, - storage: &'me Q::Storage, -} - -impl<'me, Q> QueryTable<'me, Q> -where - Q: Query, - Q::Storage: QueryStorageOps, -{ - /// Constructs a new `QueryTable`. - pub fn new(db: &'me >::DynDb, storage: &'me Q::Storage) -> Self { - Self { db, storage } - } - - /// Execute the query on a given input. Usually it's easier to - /// invoke the trait method directly. Note that for variadic - /// queries (those with no inputs, or those with more than one - /// input) the key will be a tuple. - pub fn get(&self, key: Q::Key) -> Q::Value { - self.storage.fetch(self.db, &key) - } - - /// Completely clears the storage for this query. - /// - /// This method breaks internal invariants of salsa, so any further queries - /// might return nonsense results. It is useful only in very specific - /// circumstances -- for example, when one wants to observe which values - /// dropped together with the table - pub fn purge(&self) - where - Q::Storage: plumbing::QueryStorageMassOps, - { - self.storage.purge(); - } -} - -/// Return value from [the `query_mut` method] on `Database`. -/// Gives access to the `set` method, notably, that is used to -/// set the value of an input query. -/// -/// [the `query_mut` method]: trait.Database.html#method.query_mut -pub struct QueryTableMut<'me, Q> -where - Q: Query + 'me, -{ - runtime: &'me mut Runtime, - storage: &'me Q::Storage, -} - -impl<'me, Q> QueryTableMut<'me, Q> -where - Q: Query, -{ - /// Constructs a new `QueryTableMut`. - pub fn new(runtime: &'me mut Runtime, storage: &'me Q::Storage) -> Self { - Self { runtime, storage } - } - - /// Assign a value to an "input query". Must be used outside of - /// an active query computation. - /// - /// If you are using `snapshot`, see the notes on blocking - /// and cancellation on [the `query_mut` method]. - /// - /// [the `query_mut` method]: trait.Database.html#method.query_mut - pub fn set(&mut self, key: Q::Key, value: Q::Value) - where - Q::Storage: plumbing::InputQueryStorageOps, - { - self.set_with_durability(key, value, Durability::LOW); - } - - /// Assign a value to an "input query", with the additional - /// promise that this value will **never change**. Must be used - /// outside of an active query computation. - /// - /// If you are using `snapshot`, see the notes on blocking - /// and cancellation on [the `query_mut` method]. - /// - /// [the `query_mut` method]: trait.Database.html#method.query_mut - pub fn set_with_durability(&mut self, key: Q::Key, value: Q::Value, durability: Durability) - where - Q::Storage: plumbing::InputQueryStorageOps, - { - self.storage.set(self.runtime, &key, value, durability); - } - - /// Removes a value from an "input query". Must be used outside of - /// an active query computation. - /// - /// If you are using `snapshot`, see the notes on blocking - /// and cancellation on [the `query_mut` method]. - /// - /// # Panics - /// Panics if the value was not previously set by `set` or - /// `set_with_durability`. - /// - /// [the `query_mut` method]: trait.Database.html#method.query_mut - pub fn remove(&mut self, key: Q::Key) -> Q::Value - where - Q::Storage: plumbing::InputQueryStorageOps, - { - self.storage.remove(self.runtime, &key) - } - - /// Sets the size of LRU cache of values for this query table. - /// - /// That is, at most `cap` values will be preset in the table at the same - /// time. This helps with keeping maximum memory usage under control, at the - /// cost of potential extra recalculations of evicted values. - /// - /// If `cap` is zero, all values are preserved, this is the default. - pub fn set_lru_capacity(&self, cap: usize) - where - Q::Storage: plumbing::LruQueryStorageOps, - { - self.storage.set_lru_capacity(cap); - } - - /// Marks the computed value as outdated. - /// - /// This causes salsa to re-execute the query function on the next access to - /// the query, even if all dependencies are up to date. - /// - /// This is most commonly used as part of the [on-demand input - /// pattern](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html). - pub fn invalidate(&mut self, key: &Q::Key) - where - Q::Storage: plumbing::DerivedQueryStorageOps, - { - self.storage.invalidate(self.runtime, key) - } -} - -/// A panic payload indicating that execution of a salsa query was cancelled. -/// -/// This can occur for a few reasons: -/// * -/// * -/// * -#[derive(Debug)] -#[non_exhaustive] -pub enum Cancelled { - /// The query was operating on revision R, but there is a pending write to move to revision R+1. - #[non_exhaustive] - PendingWrite, - - /// The query was blocked on another thread, and that thread panicked. - #[non_exhaustive] - PropagatedPanic, -} - -impl Cancelled { - fn throw(self) -> ! { - // We use resume and not panic here to avoid running the panic - // hook (that is, to avoid collecting and printing backtrace). - std::panic::resume_unwind(Box::new(self)); - } - - /// Runs `f`, and catches any salsa cancellation. - pub fn catch(f: F) -> Result - where - F: FnOnce() -> T + UnwindSafe, - { - match panic::catch_unwind(f) { - Ok(t) => Ok(t), - Err(payload) => match payload.downcast() { - Ok(cancelled) => Err(*cancelled), - Err(payload) => panic::resume_unwind(payload), - }, - } - } -} - -impl std::fmt::Display for Cancelled { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let why = match self { - Cancelled::PendingWrite => "pending write", - Cancelled::PropagatedPanic => "propagated panic", - }; - f.write_str("cancelled because of ")?; - f.write_str(why) - } -} - -impl std::error::Error for Cancelled {} - -/// Captures the participants of a cycle that occurred when executing a query. -/// -/// This type is meant to be used to help give meaningful error messages to the -/// user or to help salsa developers figure out why their program is resulting -/// in a computation cycle. -/// -/// It is used in a few ways: -/// -/// * During [cycle recovery](https://https://salsa-rs.github.io/salsa/cycles/fallback.html), -/// where it is given to the fallback function. -/// * As the panic value when an unexpected cycle (i.e., a cycle where one or more participants -/// lacks cycle recovery information) occurs. -/// -/// You can read more about cycle handling in -/// the [salsa book](https://https://salsa-rs.github.io/salsa/cycles.html). -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct Cycle { - participants: plumbing::CycleParticipants, -} - -impl Cycle { - pub(crate) fn new(participants: plumbing::CycleParticipants) -> Self { - Self { participants } - } - - /// True if two `Cycle` values represent the same cycle. - pub(crate) fn is(&self, cycle: &Cycle) -> bool { - Arc::ptr_eq(&self.participants, &cycle.participants) - } - - pub(crate) fn throw(self) -> ! { - log::debug!("throwing cycle {:?}", self); - std::panic::resume_unwind(Box::new(self)) - } - - pub(crate) fn catch(execute: impl FnOnce() -> T) -> Result { - match std::panic::catch_unwind(AssertUnwindSafe(execute)) { - Ok(v) => Ok(v), - Err(err) => match err.downcast::() { - Ok(cycle) => Err(*cycle), - Err(other) => std::panic::resume_unwind(other), - }, - } - } - - /// Iterate over the [`DatabaseKeyIndex`] for each query participating - /// in the cycle. The start point of this iteration within the cycle - /// is arbitrary but deterministic, but the ordering is otherwise determined - /// by the execution. - pub fn participant_keys(&self) -> impl Iterator + '_ { - self.participants.iter().copied() - } - - /// Returns a vector with the debug information for - /// all the participants in the cycle. - pub fn all_participants(&self, db: &DB) -> Vec { - self.participant_keys() - .map(|d| format!("{:?}", d.debug(db))) - .collect() - } - - /// Returns a vector with the debug information for - /// those participants in the cycle that lacked recovery - /// information. - pub fn unexpected_participants(&self, db: &DB) -> Vec { - self.participant_keys() - .filter(|&d| db.cycle_recovery_strategy(d) == CycleRecoveryStrategy::Panic) - .map(|d| format!("{:?}", d.debug(db))) - .collect() - } - - /// Returns a "debug" view onto this strict that can be used to print out information. - pub fn debug<'me, DB: ?Sized + Database>(&'me self, db: &'me DB) -> impl std::fmt::Debug + 'me { - struct UnexpectedCycleDebug<'me> { - c: &'me Cycle, - db: &'me dyn Database, - } - - impl<'me> std::fmt::Debug for UnexpectedCycleDebug<'me> { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - fmt.debug_struct("UnexpectedCycle") - .field("all_participants", &self.c.all_participants(self.db)) - .field( - "unexpected_participants", - &self.c.unexpected_participants(self.db), - ) - .finish() - } - } - - UnexpectedCycleDebug { - c: self, - db: db.ops_database(), - } - } -} - -// Re-export the procedural macros. -#[allow(unused_imports)] -#[macro_use] -extern crate salsa_macros; -use plumbing::HasQueryGroup; -pub use salsa_macros::*; diff --git a/src/plumbing.rs b/src/plumbing.rs deleted file mode 100644 index 16d003045..000000000 --- a/src/plumbing.rs +++ /dev/null @@ -1,242 +0,0 @@ -#![allow(missing_docs)] - -use crate::debug::TableEntry; -use crate::durability::Durability; -use crate::Cycle; -use crate::Database; -use crate::Query; -use crate::QueryTable; -use crate::QueryTableMut; -use std::borrow::Borrow; -use std::fmt::Debug; -use std::hash::Hash; -use std::sync::Arc; - -pub use crate::derived::DependencyStorage; -pub use crate::derived::MemoizedStorage; -pub use crate::input::InputStorage; -pub use crate::interned::InternedStorage; -pub use crate::interned::LookupInternedStorage; -pub use crate::{revision::Revision, DatabaseKeyIndex, QueryDb, Runtime}; - -/// Defines various associated types. An impl of this -/// should be generated for your query-context type automatically by -/// the `database_storage` macro, so you shouldn't need to mess -/// with this trait directly. -pub trait DatabaseStorageTypes: Database { - /// Defines the "storage type", where all the query data is kept. - /// This type is defined by the `database_storage` macro. - type DatabaseStorage: Default; -} - -/// Internal operations that the runtime uses to operate on the database. -pub trait DatabaseOps { - /// Upcast this type to a `dyn Database`. - fn ops_database(&self) -> &dyn Database; - - /// Gives access to the underlying salsa runtime. - fn ops_salsa_runtime(&self) -> &Runtime; - - /// Gives access to the underlying salsa runtime. - fn ops_salsa_runtime_mut(&mut self) -> &mut Runtime; - - /// Formats a database key index in a human readable fashion. - fn fmt_index( - &self, - index: DatabaseKeyIndex, - fmt: &mut std::fmt::Formatter<'_>, - ) -> std::fmt::Result; - - /// True if the computed value for `input` may have changed since `revision`. - fn maybe_changed_after(&self, input: DatabaseKeyIndex, revision: Revision) -> bool; - - /// Find the `CycleRecoveryStrategy` for a given input. - fn cycle_recovery_strategy(&self, input: DatabaseKeyIndex) -> CycleRecoveryStrategy; - - /// Executes the callback for each kind of query. - fn for_each_query(&self, op: &mut dyn FnMut(&dyn QueryStorageMassOps)); -} - -/// Internal operations performed on the query storage as a whole -/// (note that these ops do not need to know the identity of the -/// query, unlike `QueryStorageOps`). -pub trait QueryStorageMassOps { - fn purge(&self); -} - -pub trait DatabaseKey: Clone + Debug + Eq + Hash {} - -pub trait QueryFunction: Query { - /// See `CycleRecoveryStrategy` - const CYCLE_STRATEGY: CycleRecoveryStrategy; - - fn execute(db: &>::DynDb, key: Self::Key) -> Self::Value; - - fn cycle_fallback( - db: &>::DynDb, - cycle: &Cycle, - key: &Self::Key, - ) -> Self::Value { - let _ = (db, cycle, key); - panic!( - "query `{:?}` doesn't support cycle fallback", - Self::default() - ) - } -} - -/// Cycle recovery strategy: Is this query capable of recovering from -/// a cycle that results from executing the function? If so, how? -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum CycleRecoveryStrategy { - /// Cannot recover from cycles: panic. - /// - /// This is the default. It is also what happens if a cycle - /// occurs and the queries involved have different recovery - /// strategies. - /// - /// In the case of a failure due to a cycle, the panic - /// value will be XXX (FIXME). - Panic, - - /// Recovers from cycles by storing a sentinel value. - /// - /// This value is computed by the `QueryFunction::cycle_fallback` - /// function. - Fallback, -} - -/// Create a query table, which has access to the storage for the query -/// and offers methods like `get`. -pub fn get_query_table<'me, Q>(db: &'me >::DynDb) -> QueryTable<'me, Q> -where - Q: Query + 'me, - Q::Storage: QueryStorageOps, -{ - let group_storage: &Q::GroupStorage = HasQueryGroup::group_storage(db); - let query_storage: &Q::Storage = Q::query_storage(group_storage); - QueryTable::new(db, query_storage) -} - -/// Create a mutable query table, which has access to the storage -/// for the query and offers methods like `set`. -pub fn get_query_table_mut<'me, Q>(db: &'me mut >::DynDb) -> QueryTableMut<'me, Q> -where - Q: Query, -{ - let (group_storage, runtime) = HasQueryGroup::group_storage_mut(db); - let query_storage = Q::query_storage_mut(group_storage); - QueryTableMut::new(runtime, &**query_storage) -} - -pub trait QueryGroup: Sized { - type GroupStorage; - - /// Dyn version of the associated database trait. - type DynDb: ?Sized + Database + HasQueryGroup; -} - -/// Trait implemented by a database for each group that it supports. -/// `S` and `K` are the types for *group storage* and *group key*, respectively. -pub trait HasQueryGroup: Database -where - G: QueryGroup, -{ - /// Access the group storage struct from the database. - fn group_storage(&self) -> &G::GroupStorage; - - /// Access the group storage struct from the database. - /// Also returns a ref to the `Runtime`, since otherwise - /// the database is borrowed and one cannot get access to it. - fn group_storage_mut(&mut self) -> (&G::GroupStorage, &mut Runtime); -} - -// ANCHOR:QueryStorageOps -pub trait QueryStorageOps -where - Self: QueryStorageMassOps, - Q: Query, -{ - // ANCHOR_END:QueryStorageOps - - /// See CycleRecoveryStrategy - const CYCLE_STRATEGY: CycleRecoveryStrategy; - - fn new(group_index: u16) -> Self; - - /// Format a database key index in a suitable way. - fn fmt_index( - &self, - db: &>::DynDb, - index: DatabaseKeyIndex, - fmt: &mut std::fmt::Formatter<'_>, - ) -> std::fmt::Result; - - // ANCHOR:maybe_changed_after - /// True if the value of `input`, which must be from this query, may have - /// changed after the given revision ended. - /// - /// This function should only be invoked with a revision less than the current - /// revision. - fn maybe_changed_after( - &self, - db: &>::DynDb, - input: DatabaseKeyIndex, - revision: Revision, - ) -> bool; - // ANCHOR_END:maybe_changed_after - - fn cycle_recovery_strategy(&self) -> CycleRecoveryStrategy { - Self::CYCLE_STRATEGY - } - - // ANCHOR:fetch - /// Execute the query, returning the result (often, the result - /// will be memoized). This is the "main method" for - /// queries. - /// - /// Returns `Err` in the event of a cycle, meaning that computing - /// the value for this `key` is recursively attempting to fetch - /// itself. - fn fetch(&self, db: &>::DynDb, key: &Q::Key) -> Q::Value; - // ANCHOR_END:fetch - - /// Returns the durability associated with a given key. - fn durability(&self, db: &>::DynDb, key: &Q::Key) -> Durability; - - /// Get the (current) set of the entries in the query storage - fn entries(&self, db: &>::DynDb) -> C - where - C: std::iter::FromIterator>; -} - -/// An optional trait that is implemented for "user mutable" storage: -/// that is, storage whose value is not derived from other storage but -/// is set independently. -pub trait InputQueryStorageOps -where - Q: Query, -{ - fn set(&self, runtime: &mut Runtime, key: &Q::Key, new_value: Q::Value, durability: Durability); - - fn remove(&self, runtime: &mut Runtime, key: &Q::Key) -> Q::Value; -} - -/// An optional trait that is implemented for "user mutable" storage: -/// that is, storage whose value is not derived from other storage but -/// is set independently. -pub trait LruQueryStorageOps { - fn set_lru_capacity(&self, new_capacity: usize); -} - -pub trait DerivedQueryStorageOps -where - Q: Query, -{ - fn invalidate(&self, runtime: &mut Runtime, key: &S) - where - S: Eq + Hash, - Q::Key: Borrow; -} - -pub type CycleParticipants = Arc>; diff --git a/src/revision.rs b/src/revision.rs deleted file mode 100644 index d1f6a3f9d..000000000 --- a/src/revision.rs +++ /dev/null @@ -1,70 +0,0 @@ -use std::num::NonZeroUsize; -use std::sync::atomic::{AtomicUsize, Ordering}; - -/// Value of the initial revision, as a u64. We don't use 0 -/// because we want to use a `NonZeroUsize`. -const START: usize = 1; - -/// A unique identifier for the current version of the database; each -/// time an input is changed, the revision number is incremented. -/// `Revision` is used internally to track which values may need to be -/// recomputed, but is not something you should have to interact with -/// directly as a user of salsa. -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct Revision { - generation: NonZeroUsize, -} - -impl Revision { - pub(crate) fn start() -> Self { - Self::from(START) - } - - pub(crate) fn from(g: usize) -> Self { - Self { - generation: NonZeroUsize::new(g).unwrap(), - } - } - - pub(crate) fn next(self) -> Revision { - Self::from(self.generation.get() + 1) - } - - fn as_usize(self) -> usize { - self.generation.get() - } -} - -impl std::fmt::Debug for Revision { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(fmt, "R{}", self.generation) - } -} - -#[derive(Debug)] -pub(crate) struct AtomicRevision { - data: AtomicUsize, -} - -impl AtomicRevision { - pub(crate) fn start() -> Self { - Self { - data: AtomicUsize::new(START), - } - } - - pub(crate) fn load(&self) -> Revision { - Revision::from(self.data.load(Ordering::SeqCst)) - } - - pub(crate) fn store(&self, r: Revision) { - self.data.store(r.as_usize(), Ordering::SeqCst); - } - - /// Increment by 1, returning previous value. - pub(crate) fn fetch_then_increment(&self) -> Revision { - let v = self.data.fetch_add(1, Ordering::SeqCst); - assert!(v != usize::max_value(), "revision overflow"); - Revision::from(v) - } -} diff --git a/src/runtime.rs b/src/runtime.rs deleted file mode 100644 index 73cc14884..000000000 --- a/src/runtime.rs +++ /dev/null @@ -1,690 +0,0 @@ -use crate::durability::Durability; -use crate::hash::*; -use crate::plumbing::CycleRecoveryStrategy; -use crate::revision::{AtomicRevision, Revision}; -use crate::{Cancelled, Cycle, Database, DatabaseKeyIndex, Event, EventKind}; -use log::debug; -use parking_lot::lock_api::{RawRwLock, RawRwLockRecursive}; -use parking_lot::{Mutex, RwLock}; -use std::hash::Hash; -use std::panic::panic_any; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Arc; - -mod dependency_graph; -use dependency_graph::DependencyGraph; - -pub(crate) mod local_state; -use local_state::LocalState; - -use self::local_state::{ActiveQueryGuard, QueryInputs, QueryRevisions}; - -/// The salsa runtime stores the storage for all queries as well as -/// tracking the query stack and dependencies between cycles. -/// -/// Each new runtime you create (e.g., via `Runtime::new` or -/// `Runtime::default`) will have an independent set of query storage -/// associated with it. Normally, therefore, you only do this once, at -/// the start of your application. -pub struct Runtime { - /// Our unique runtime id. - id: RuntimeId, - - /// If this is a "forked" runtime, then the `revision_guard` will - /// be `Some`; this guard holds a read-lock on the global query - /// lock. - revision_guard: Option, - - /// Local state that is specific to this runtime (thread). - local_state: LocalState, - - /// Shared state that is accessible via all runtimes. - shared_state: Arc, -} - -#[derive(Clone, Debug)] -pub(crate) enum WaitResult { - Completed, - Panicked, - Cycle(Cycle), -} - -impl Default for Runtime { - fn default() -> Self { - Runtime { - id: RuntimeId { counter: 0 }, - revision_guard: None, - shared_state: Default::default(), - local_state: Default::default(), - } - } -} - -impl std::fmt::Debug for Runtime { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - fmt.debug_struct("Runtime") - .field("id", &self.id()) - .field("forked", &self.revision_guard.is_some()) - .field("shared_state", &self.shared_state) - .finish() - } -} - -impl Runtime { - /// Create a new runtime; equivalent to `Self::default`. This is - /// used when creating a new database. - pub fn new() -> Self { - Self::default() - } - - /// See [`crate::storage::Storage::snapshot`]. - pub(crate) fn snapshot(&self) -> Self { - if self.local_state.query_in_progress() { - panic!("it is not legal to `snapshot` during a query (see salsa-rs/salsa#80)"); - } - - let revision_guard = RevisionGuard::new(&self.shared_state); - - let id = RuntimeId { - counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst), - }; - - Runtime { - id, - revision_guard: Some(revision_guard), - shared_state: self.shared_state.clone(), - local_state: Default::default(), - } - } - - /// A "synthetic write" causes the system to act *as though* some - /// input of durability `durability` has changed. This is mostly - /// useful for profiling scenarios. - /// - /// **WARNING:** Just like an ordinary write, this method triggers - /// cancellation. If you invoke it while a snapshot exists, it - /// will block until that snapshot is dropped -- if that snapshot - /// is owned by the current thread, this could trigger deadlock. - pub fn synthetic_write(&mut self, durability: Durability) { - self.with_incremented_revision(|_next_revision| Some(durability)); - } - - /// The unique identifier attached to this `SalsaRuntime`. Each - /// snapshotted runtime has a distinct identifier. - #[inline] - pub fn id(&self) -> RuntimeId { - self.id - } - - /// Returns the database-key for the query that this thread is - /// actively executing (if any). - pub fn active_query(&self) -> Option { - self.local_state.active_query() - } - - /// Read current value of the revision counter. - #[inline] - pub(crate) fn current_revision(&self) -> Revision { - self.shared_state.revisions[0].load() - } - - /// The revision in which values with durability `d` may have last - /// changed. For D0, this is just the current revision. But for - /// higher levels of durability, this value may lag behind the - /// current revision. If we encounter a value of durability Di, - /// then, we can check this function to get a "bound" on when the - /// value may have changed, which allows us to skip walking its - /// dependencies. - #[inline] - pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision { - self.shared_state.revisions[d.index()].load() - } - - /// Read current value of the revision counter. - #[inline] - pub(crate) fn pending_revision(&self) -> Revision { - self.shared_state.pending_revision.load() - } - - #[cold] - pub(crate) fn unwind_cancelled(&self) { - self.report_untracked_read(); - Cancelled::PendingWrite.throw(); - } - - /// Acquires the **global query write lock** (ensuring that no queries are - /// executing) and then increments the current revision counter; invokes - /// `op` with the global query write lock still held. - /// - /// While we wait to acquire the global query write lock, this method will - /// also increment `pending_revision_increments`, thus signalling to queries - /// that their results are "cancelled" and they should abort as expeditiously - /// as possible. - /// - /// The `op` closure should actually perform the writes needed. It is given - /// the new revision as an argument, and its return value indicates whether - /// any pre-existing value was modified: - /// - /// - returning `None` means that no pre-existing value was modified (this - /// could occur e.g. when setting some key on an input that was never set - /// before) - /// - returning `Some(d)` indicates that a pre-existing value was modified - /// and it had the durability `d`. This will update the records for when - /// values with each durability were modified. - /// - /// Note that, given our writer model, we can assume that only one thread is - /// attempting to increment the global revision at a time. - pub(crate) fn with_incremented_revision(&mut self, op: F) - where - F: FnOnce(Revision) -> Option, - { - log::debug!("increment_revision()"); - - if !self.permits_increment() { - panic!("increment_revision invoked during a query computation"); - } - - // Set the `pending_revision` field so that people - // know current revision is cancelled. - let current_revision = self.shared_state.pending_revision.fetch_then_increment(); - - // To modify the revision, we need the lock. - let shared_state = self.shared_state.clone(); - let _lock = shared_state.query_lock.write(); - - let old_revision = self.shared_state.revisions[0].fetch_then_increment(); - assert_eq!(current_revision, old_revision); - - let new_revision = current_revision.next(); - - debug!("increment_revision: incremented to {:?}", new_revision); - - if let Some(d) = op(new_revision) { - for rev in &self.shared_state.revisions[1..=d.index()] { - rev.store(new_revision); - } - } - } - - pub(crate) fn permits_increment(&self) -> bool { - self.revision_guard.is_none() && !self.local_state.query_in_progress() - } - - #[inline] - pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> { - self.local_state.push_query(database_key_index) - } - - /// Reports that the currently active query read the result from - /// another query. - /// - /// Also checks whether the "cycle participant" flag is set on - /// the current stack frame -- if so, panics with `CycleParticipant` - /// value, which should be caught by the code executing the query. - /// - /// # Parameters - /// - /// - `database_key`: the query whose result was read - /// - `changed_revision`: the last revision in which the result of that - /// query had changed - pub(crate) fn report_query_read_and_unwind_if_cycle_resulted( - &self, - input: DatabaseKeyIndex, - durability: Durability, - changed_at: Revision, - ) { - self.local_state - .report_query_read_and_unwind_if_cycle_resulted(input, durability, changed_at); - } - - /// Reports that the query depends on some state unknown to salsa. - /// - /// Queries which report untracked reads will be re-executed in the next - /// revision. - pub fn report_untracked_read(&self) { - self.local_state - .report_untracked_read(self.current_revision()); - } - - /// Acts as though the current query had read an input with the given durability; this will force the current query's durability to be at most `durability`. - /// - /// This is mostly useful to control the durability level for [on-demand inputs](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html). - pub fn report_synthetic_read(&self, durability: Durability) { - let changed_at = self.last_changed_revision(durability); - self.local_state - .report_synthetic_read(durability, changed_at); - } - - /// Handles a cycle in the dependency graph that was detected when the - /// current thread tried to block on `database_key_index` which is being - /// executed by `to_id`. If this function returns, then `to_id` no longer - /// depends on the current thread, and so we should continue executing - /// as normal. Otherwise, the function will throw a `Cycle` which is expected - /// to be caught by some frame on our stack. This occurs either if there is - /// a frame on our stack with cycle recovery (possibly the top one!) or if there - /// is no cycle recovery at all. - fn unblock_cycle_and_maybe_throw( - &self, - db: &dyn Database, - dg: &mut DependencyGraph, - database_key_index: DatabaseKeyIndex, - to_id: RuntimeId, - ) { - debug!( - "unblock_cycle_and_maybe_throw(database_key={:?})", - database_key_index - ); - - let mut from_stack = self.local_state.take_query_stack(); - let from_id = self.id(); - - // Make a "dummy stack frame". As we iterate through the cycle, we will collect the - // inputs from each participant. Then, if we are participating in cycle recovery, we - // will propagate those results to all participants. - let mut cycle_query = ActiveQuery::new(database_key_index); - - // Identify the cycle participants: - let cycle = { - let mut v = vec![]; - dg.for_each_cycle_participant( - from_id, - &mut from_stack, - database_key_index, - to_id, - |aqs| { - aqs.iter_mut().for_each(|aq| { - cycle_query.add_from(aq); - v.push(aq.database_key_index); - }); - }, - ); - - // We want to give the participants in a deterministic order - // (at least for this execution, not necessarily across executions), - // no matter where it started on the stack. Find the minimum - // key and rotate it to the front. - let min = v.iter().min().unwrap(); - let index = v.iter().position(|p| p == min).unwrap(); - v.rotate_left(index); - - // No need to store extra memory. - v.shrink_to_fit(); - - Cycle::new(Arc::new(v)) - }; - debug!( - "cycle {:?}, cycle_query {:#?}", - cycle.debug(db), - cycle_query, - ); - - // We can remove the cycle participants from the list of dependencies; - // they are a strongly connected component (SCC) and we only care about - // dependencies to things outside the SCC that control whether it will - // form again. - cycle_query.remove_cycle_participants(&cycle); - - // Mark each cycle participant that has recovery set, along with - // any frames that come after them on the same thread. Those frames - // are going to be unwound so that fallback can occur. - dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| { - aqs.iter_mut() - .skip_while( - |aq| match db.cycle_recovery_strategy(aq.database_key_index) { - CycleRecoveryStrategy::Panic => true, - CycleRecoveryStrategy::Fallback => false, - }, - ) - .for_each(|aq| { - debug!("marking {:?} for fallback", aq.database_key_index.debug(db)); - aq.take_inputs_from(&cycle_query); - assert!(aq.cycle.is_none()); - aq.cycle = Some(cycle.clone()); - }); - }); - - // Unblock every thread that has cycle recovery with a `WaitResult::Cycle`. - // They will throw the cycle, which will be caught by the frame that has - // cycle recovery so that it can execute that recovery. - let (me_recovered, others_recovered) = - dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id); - - self.local_state.restore_query_stack(from_stack); - - if me_recovered { - // If the current thread has recovery, we want to throw - // so that it can begin. - cycle.throw() - } else if others_recovered { - // If other threads have recovery but we didn't: return and we will block on them. - } else { - // if nobody has recover, then we panic - panic_any(cycle); - } - } - - /// Block until `other_id` completes executing `database_key`; - /// panic or unwind in the case of a cycle. - /// - /// `query_mutex_guard` is the guard for the current query's state; - /// it will be dropped after we have successfully registered the - /// dependency. - /// - /// # Propagating panics - /// - /// If the thread `other_id` panics, then our thread is considered - /// cancelled, so this function will panic with a `Cancelled` value. - /// - /// # Cycle handling - /// - /// If the thread `other_id` already depends on the current thread, - /// and hence there is a cycle in the query graph, then this function - /// will unwind instead of returning normally. The method of unwinding - /// depends on the [`Self::mutual_cycle_recovery_strategy`] - /// of the cycle participants: - /// - /// * [`CycleRecoveryStrategy::Panic`]: panic with the [`Cycle`] as the value. - /// * [`CycleRecoveryStrategy::Fallback`]: initiate unwinding with [`CycleParticipant::unwind`]. - pub(crate) fn block_on_or_unwind( - &self, - db: &dyn Database, - database_key: DatabaseKeyIndex, - other_id: RuntimeId, - query_mutex_guard: QueryMutexGuard, - ) { - let mut dg = self.shared_state.dependency_graph.lock(); - - if dg.depends_on(other_id, self.id()) { - self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id); - - // If the above fn returns, then (via cycle recovery) it has unblocked the - // cycle, so we can continue. - assert!(!dg.depends_on(other_id, self.id())); - } - - db.salsa_event(Event { - runtime_id: self.id(), - kind: EventKind::WillBlockOn { - other_runtime_id: other_id, - database_key, - }, - }); - - let stack = self.local_state.take_query_stack(); - - let (stack, result) = DependencyGraph::block_on( - dg, - self.id(), - database_key, - other_id, - stack, - query_mutex_guard, - ); - - self.local_state.restore_query_stack(stack); - - match result { - WaitResult::Completed => (), - - // If the other thread panicked, then we consider this thread - // cancelled. The assumption is that the panic will be detected - // by the other thread and responded to appropriately. - WaitResult::Panicked => Cancelled::PropagatedPanic.throw(), - - WaitResult::Cycle(c) => c.throw(), - } - } - - /// Invoked when this runtime completed computing `database_key` with - /// the given result `wait_result` (`wait_result` should be `None` if - /// computing `database_key` panicked and could not complete). - /// This function unblocks any dependent queries and allows them - /// to continue executing. - pub(crate) fn unblock_queries_blocked_on( - &self, - database_key: DatabaseKeyIndex, - wait_result: WaitResult, - ) { - self.shared_state - .dependency_graph - .lock() - .unblock_runtimes_blocked_on(database_key, wait_result); - } -} - -/// State that will be common to all threads (when we support multiple threads) -struct SharedState { - /// Stores the next id to use for a snapshotted runtime (starts at 1). - next_id: AtomicUsize, - - /// Whenever derived queries are executing, they acquire this lock - /// in read mode. Mutating inputs (and thus creating a new - /// revision) requires a write lock (thus guaranteeing that no - /// derived queries are in progress). Note that this is not needed - /// to prevent **race conditions** -- the revision counter itself - /// is stored in an `AtomicUsize` so it can be cheaply read - /// without acquiring the lock. Rather, the `query_lock` is used - /// to ensure a higher-level consistency property. - query_lock: RwLock<()>, - - /// This is typically equal to `revision` -- set to `revision+1` - /// when a new revision is pending (which implies that the current - /// revision is cancelled). - pending_revision: AtomicRevision, - - /// Stores the "last change" revision for values of each duration. - /// This vector is always of length at least 1 (for Durability 0) - /// but its total length depends on the number of durations. The - /// element at index 0 is special as it represents the "current - /// revision". In general, we have the invariant that revisions - /// in here are *declining* -- that is, `revisions[i] >= - /// revisions[i + 1]`, for all `i`. This is because when you - /// modify a value with durability D, that implies that values - /// with durability less than D may have changed too. - revisions: Vec, - - /// The dependency graph tracks which runtimes are blocked on one - /// another, waiting for queries to terminate. - dependency_graph: Mutex, -} - -impl SharedState { - fn with_durabilities(durabilities: usize) -> Self { - SharedState { - next_id: AtomicUsize::new(1), - query_lock: Default::default(), - revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(), - pending_revision: AtomicRevision::start(), - dependency_graph: Default::default(), - } - } -} - -impl std::panic::RefUnwindSafe for SharedState {} - -impl Default for SharedState { - fn default() -> Self { - Self::with_durabilities(Durability::LEN) - } -} - -impl std::fmt::Debug for SharedState { - fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let query_lock = if self.query_lock.try_write().is_some() { - "" - } else if self.query_lock.try_read().is_some() { - "" - } else { - "" - }; - fmt.debug_struct("SharedState") - .field("query_lock", &query_lock) - .field("revisions", &self.revisions) - .field("pending_revision", &self.pending_revision) - .finish() - } -} - -#[derive(Debug)] -struct ActiveQuery { - /// What query is executing - database_key_index: DatabaseKeyIndex, - - /// Minimum durability of inputs observed so far. - durability: Durability, - - /// Maximum revision of all inputs observed. If we observe an - /// untracked read, this will be set to the most recent revision. - changed_at: Revision, - - /// Set of subqueries that were accessed thus far, or `None` if - /// there was an untracked the read. - dependencies: Option>, - - /// Stores the entire cycle, if one is found and this query is part of it. - cycle: Option, -} - -impl ActiveQuery { - fn new(database_key_index: DatabaseKeyIndex) -> Self { - ActiveQuery { - database_key_index, - durability: Durability::MAX, - changed_at: Revision::start(), - dependencies: Some(FxIndexSet::default()), - cycle: None, - } - } - - fn add_read(&mut self, input: DatabaseKeyIndex, durability: Durability, revision: Revision) { - if let Some(set) = &mut self.dependencies { - set.insert(input); - } - - self.durability = self.durability.min(durability); - self.changed_at = self.changed_at.max(revision); - } - - fn add_untracked_read(&mut self, changed_at: Revision) { - self.dependencies = None; - self.durability = Durability::LOW; - self.changed_at = changed_at; - } - - fn add_synthetic_read(&mut self, durability: Durability, revision: Revision) { - self.dependencies = None; - self.durability = self.durability.min(durability); - self.changed_at = self.changed_at.max(revision); - } - - pub(crate) fn revisions(&self) -> QueryRevisions { - let inputs = match &self.dependencies { - None => QueryInputs::Untracked, - - Some(dependencies) => { - if dependencies.is_empty() { - QueryInputs::NoInputs - } else { - QueryInputs::Tracked { - inputs: dependencies.iter().copied().collect(), - } - } - } - }; - - QueryRevisions { - changed_at: self.changed_at, - inputs, - durability: self.durability, - } - } - - /// Adds any dependencies from `other` into `self`. - /// Used during cycle recovery, see [`Runtime::create_cycle_error`]. - fn add_from(&mut self, other: &ActiveQuery) { - self.changed_at = self.changed_at.max(other.changed_at); - self.durability = self.durability.min(other.durability); - if let Some(other_dependencies) = &other.dependencies { - if let Some(my_dependencies) = &mut self.dependencies { - my_dependencies.extend(other_dependencies.iter().copied()); - } - } else { - self.dependencies = None; - } - } - - /// Removes the participants in `cycle` from my dependencies. - /// Used during cycle recovery, see [`Runtime::create_cycle_error`]. - fn remove_cycle_participants(&mut self, cycle: &Cycle) { - if let Some(my_dependencies) = &mut self.dependencies { - for p in cycle.participant_keys() { - my_dependencies.shift_remove(&p); - } - } - } - - /// Copy the changed-at, durability, and dependencies from `cycle_query`. - /// Used during cycle recovery, see [`Runtime::create_cycle_error`]. - pub(crate) fn take_inputs_from(&mut self, cycle_query: &ActiveQuery) { - self.changed_at = cycle_query.changed_at; - self.durability = cycle_query.durability; - self.dependencies.clone_from(&cycle_query.dependencies); - } -} - -/// A unique identifier for a particular runtime. Each time you create -/// a snapshot, a fresh `RuntimeId` is generated. Once a snapshot is -/// complete, its `RuntimeId` may potentially be re-used. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct RuntimeId { - counter: usize, -} - -#[derive(Clone, Debug)] -pub(crate) struct StampedValue { - pub(crate) value: V, - pub(crate) durability: Durability, - pub(crate) changed_at: Revision, -} - -struct RevisionGuard { - shared_state: Arc, -} - -impl RevisionGuard { - fn new(shared_state: &Arc) -> Self { - // Subtle: we use a "recursive" lock here so that it is not an - // error to acquire a read-lock when one is already held (this - // happens when a query uses `snapshot` to spawn off parallel - // workers, for example). - // - // This has the side-effect that we are responsible to ensure - // that people contending for the write lock do not starve, - // but this is what we achieve via the cancellation mechanism. - // - // (In particular, since we only ever have one "mutating - // handle" to the database, the only contention for the global - // query lock occurs when there are "futures" evaluating - // queries in parallel, and those futures hold a read-lock - // already, so the starvation problem is more about them bring - // themselves to a close, versus preventing other people from - // *starting* work). - unsafe { - shared_state.query_lock.raw().lock_shared_recursive(); - } - - Self { - shared_state: shared_state.clone(), - } - } -} - -impl Drop for RevisionGuard { - fn drop(&mut self) { - // Release our read-lock without using RAII. As documented in - // `Snapshot::new` above, this requires the unsafe keyword. - unsafe { - self.shared_state.query_lock.raw().unlock_shared(); - } - } -} diff --git a/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs deleted file mode 100644 index b7fdf3efb..000000000 --- a/src/runtime/dependency_graph.rs +++ /dev/null @@ -1,277 +0,0 @@ -use std::sync::Arc; - -use crate::{DatabaseKeyIndex, RuntimeId}; -use parking_lot::{Condvar, MutexGuard}; -use rustc_hash::FxHashMap; -use smallvec::SmallVec; - -use super::{ActiveQuery, WaitResult}; - -type QueryStack = Vec; - -#[derive(Debug, Default)] -pub(super) struct DependencyGraph { - /// A `(K -> V)` pair in this map indicates that the the runtime - /// `K` is blocked on some query executing in the runtime `V`. - /// This encodes a graph that must be acyclic (or else deadlock - /// will result). - edges: FxHashMap, - - /// Encodes the `RuntimeId` that are blocked waiting for the result - /// of a given query. - query_dependents: FxHashMap>, - - /// When a key K completes which had dependent queries Qs blocked on it, - /// it stores its `WaitResult` here. As they wake up, each query Q in Qs will - /// come here to fetch their results. - wait_results: FxHashMap, -} - -#[derive(Debug)] -struct Edge { - blocked_on_id: RuntimeId, - blocked_on_key: DatabaseKeyIndex, - stack: QueryStack, - - /// Signalled whenever a query with dependents completes. - /// Allows those dependents to check if they are ready to unblock. - condvar: Arc, -} - -impl DependencyGraph { - /// True if `from_id` depends on `to_id`. - /// - /// (i.e., there is a path from `from_id` to `to_id` in the graph.) - pub(super) fn depends_on(&mut self, from_id: RuntimeId, to_id: RuntimeId) -> bool { - let mut p = from_id; - while let Some(q) = self.edges.get(&p).map(|edge| edge.blocked_on_id) { - if q == to_id { - return true; - } - - p = q; - } - p == to_id - } - - /// Invokes `closure` with a `&mut ActiveQuery` for each query that participates in the cycle. - /// The cycle runs as follows: - /// - /// 1. The runtime `from_id`, which has the stack `from_stack`, would like to invoke `database_key`... - /// 2. ...but `database_key` is already being executed by `to_id`... - /// 3. ...and `to_id` is transitively dependent on something which is present on `from_stack`. - pub(super) fn for_each_cycle_participant( - &mut self, - from_id: RuntimeId, - from_stack: &mut QueryStack, - database_key: DatabaseKeyIndex, - to_id: RuntimeId, - mut closure: impl FnMut(&mut [ActiveQuery]), - ) { - debug_assert!(self.depends_on(to_id, from_id)); - - // To understand this algorithm, consider this [drawing](https://is.gd/TGLI9v): - // - // database_key = QB2 - // from_id = A - // to_id = B - // from_stack = [QA1, QA2, QA3] - // - // self.edges[B] = { C, QC2, [QB1..QB3] } - // self.edges[C] = { A, QA2, [QC1..QC3] } - // - // The cyclic - // edge we have - // failed to add. - // : - // A : B C - // : - // QA1 v QB1 QC1 - // ┌► QA2 ┌──► QB2 ┌─► QC2 - // │ QA3 ───┘ QB3 ──┘ QC3 ───┐ - // │ │ - // └───────────────────────────────┘ - // - // Final output: [QB2, QB3, QC2, QC3, QA2, QA3] - - let mut id = to_id; - let mut key = database_key; - while id != from_id { - // Looking at the diagram above, the idea is to - // take the edge from `to_id` starting at `key` - // (inclusive) and down to the end. We can then - // load up the next thread (i.e., we start at B/QB2, - // and then load up the dependency on C/QC2). - let edge = self.edges.get_mut(&id).unwrap(); - let prefix = edge - .stack - .iter_mut() - .take_while(|p| p.database_key_index != key) - .count(); - closure(&mut edge.stack[prefix..]); - id = edge.blocked_on_id; - key = edge.blocked_on_key; - } - - // Finally, we copy in the results from `from_stack`. - let prefix = from_stack - .iter_mut() - .take_while(|p| p.database_key_index != key) - .count(); - closure(&mut from_stack[prefix..]); - } - - /// Unblock each blocked runtime (excluding the current one) if some - /// query executing in that runtime is participating in cycle fallback. - /// - /// Returns a boolean (Current, Others) where: - /// * Current is true if the current runtime has cycle participants - /// with fallback; - /// * Others is true if other runtimes were unblocked. - pub(super) fn maybe_unblock_runtimes_in_cycle( - &mut self, - from_id: RuntimeId, - from_stack: &QueryStack, - database_key: DatabaseKeyIndex, - to_id: RuntimeId, - ) -> (bool, bool) { - // See diagram in `for_each_cycle_participant`. - let mut id = to_id; - let mut key = database_key; - let mut others_unblocked = false; - while id != from_id { - let edge = self.edges.get(&id).unwrap(); - let prefix = edge - .stack - .iter() - .take_while(|p| p.database_key_index != key) - .count(); - let next_id = edge.blocked_on_id; - let next_key = edge.blocked_on_key; - - if let Some(cycle) = edge.stack[prefix..] - .iter() - .rev() - .find_map(|aq| aq.cycle.clone()) - { - // Remove `id` from the list of runtimes blocked on `next_key`: - self.query_dependents - .get_mut(&next_key) - .unwrap() - .retain(|r| *r != id); - - // Unblock runtime so that it can resume execution once lock is released: - self.unblock_runtime(id, WaitResult::Cycle(cycle)); - - others_unblocked = true; - } - - id = next_id; - key = next_key; - } - - let prefix = from_stack - .iter() - .take_while(|p| p.database_key_index != key) - .count(); - let this_unblocked = from_stack[prefix..].iter().any(|aq| aq.cycle.is_some()); - - (this_unblocked, others_unblocked) - } - - /// Modifies the graph so that `from_id` is blocked - /// on `database_key`, which is being computed by - /// `to_id`. - /// - /// For this to be reasonable, the lock on the - /// results table for `database_key` must be held. - /// This ensures that computing `database_key` doesn't - /// complete before `block_on` executes. - /// - /// Preconditions: - /// * No path from `to_id` to `from_id` - /// (i.e., `me.depends_on(to_id, from_id)` is false) - /// * `held_mutex` is a read lock (or stronger) on `database_key` - pub(super) fn block_on( - mut me: MutexGuard<'_, Self>, - from_id: RuntimeId, - database_key: DatabaseKeyIndex, - to_id: RuntimeId, - from_stack: QueryStack, - query_mutex_guard: QueryMutexGuard, - ) -> (QueryStack, WaitResult) { - let condvar = me.add_edge(from_id, database_key, to_id, from_stack); - - // Release the mutex that prevents `database_key` - // from completing, now that the edge has been added. - drop(query_mutex_guard); - - loop { - if let Some(stack_and_result) = me.wait_results.remove(&from_id) { - debug_assert!(!me.edges.contains_key(&from_id)); - return stack_and_result; - } - condvar.wait(&mut me); - } - } - - /// Helper for `block_on`: performs actual graph modification - /// to add a dependency edge from `from_id` to `to_id`, which is - /// computing `database_key`. - fn add_edge( - &mut self, - from_id: RuntimeId, - database_key: DatabaseKeyIndex, - to_id: RuntimeId, - from_stack: QueryStack, - ) -> Arc { - assert_ne!(from_id, to_id); - debug_assert!(!self.edges.contains_key(&from_id)); - debug_assert!(!self.depends_on(to_id, from_id)); - - let condvar = Arc::new(Condvar::new()); - self.edges.insert( - from_id, - Edge { - blocked_on_id: to_id, - blocked_on_key: database_key, - stack: from_stack, - condvar: condvar.clone(), - }, - ); - self.query_dependents - .entry(database_key) - .or_default() - .push(from_id); - condvar - } - - /// Invoked when runtime `to_id` completes executing - /// `database_key`. - pub(super) fn unblock_runtimes_blocked_on( - &mut self, - database_key: DatabaseKeyIndex, - wait_result: WaitResult, - ) { - let dependents = self - .query_dependents - .remove(&database_key) - .unwrap_or_default(); - - for from_id in dependents { - self.unblock_runtime(from_id, wait_result.clone()); - } - } - - /// Unblock the runtime with the given id with the given wait-result. - /// This will cause it resume execution (though it will have to grab - /// the lock on this data structure first, to recover the wait result). - fn unblock_runtime(&mut self, id: RuntimeId, wait_result: WaitResult) { - let edge = self.edges.remove(&id).expect("not blocked"); - self.wait_results.insert(id, (edge.stack, wait_result)); - - // Now that we have inserted the `wait_results`, - // notify the thread. - edge.condvar.notify_one(); - } -} diff --git a/src/runtime/local_state.rs b/src/runtime/local_state.rs deleted file mode 100644 index bf357d4f3..000000000 --- a/src/runtime/local_state.rs +++ /dev/null @@ -1,244 +0,0 @@ -use log::debug; - -use crate::durability::Durability; -use crate::runtime::ActiveQuery; -use crate::runtime::Revision; -use crate::Cycle; -use crate::DatabaseKeyIndex; -use std::cell::RefCell; -use std::sync::Arc; - -use super::StampedValue; - -/// State that is specific to a single execution thread. -/// -/// Internally, this type uses ref-cells. -/// -/// **Note also that all mutations to the database handle (and hence -/// to the local-state) must be undone during unwinding.** -pub(super) struct LocalState { - /// Vector of active queries. - /// - /// This is normally `Some`, but it is set to `None` - /// while the query is blocked waiting for a result. - /// - /// Unwinding note: pushes onto this vector must be popped -- even - /// during unwinding. - query_stack: RefCell>>, -} - -/// Summarizes "all the inputs that a query used" -#[derive(Debug, Clone)] -pub(crate) struct QueryRevisions { - /// The most revision in which some input changed. - pub(crate) changed_at: Revision, - - /// Minimum durability of the inputs to this query. - pub(crate) durability: Durability, - - /// The inputs that went into our query, if we are tracking them. - pub(crate) inputs: QueryInputs, -} - -impl QueryRevisions { - pub(crate) fn stamped_value(&self, value: V) -> StampedValue { - StampedValue { - value, - durability: self.durability, - changed_at: self.changed_at, - } - } -} - -/// Every input. -#[derive(Debug, Clone)] -pub(crate) enum QueryInputs { - /// Non-empty set of inputs, fully known - Tracked { inputs: Arc<[DatabaseKeyIndex]> }, - - /// Empty set of inputs, fully known. - NoInputs, - - /// Unknown quantity of inputs - Untracked, -} - -impl Default for LocalState { - fn default() -> Self { - LocalState { - query_stack: RefCell::new(Some(Vec::new())), - } - } -} - -impl LocalState { - #[inline] - pub(super) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> { - let mut query_stack = self.query_stack.borrow_mut(); - let query_stack = query_stack.as_mut().expect("local stack taken"); - query_stack.push(ActiveQuery::new(database_key_index)); - ActiveQueryGuard { - local_state: self, - database_key_index, - push_len: query_stack.len(), - } - } - - fn with_query_stack(&self, c: impl FnOnce(&mut Vec) -> R) -> R { - c(self - .query_stack - .borrow_mut() - .as_mut() - .expect("query stack taken")) - } - - pub(super) fn query_in_progress(&self) -> bool { - self.with_query_stack(|stack| !stack.is_empty()) - } - - pub(super) fn active_query(&self) -> Option { - self.with_query_stack(|stack| { - stack - .last() - .map(|active_query| active_query.database_key_index) - }) - } - - pub(super) fn report_query_read_and_unwind_if_cycle_resulted( - &self, - input: DatabaseKeyIndex, - durability: Durability, - changed_at: Revision, - ) { - debug!( - "report_query_read_and_unwind_if_cycle_resulted(input={:?}, durability={:?}, changed_at={:?})", - input, durability, changed_at - ); - self.with_query_stack(|stack| { - if let Some(top_query) = stack.last_mut() { - top_query.add_read(input, durability, changed_at); - - // We are a cycle participant: - // - // C0 --> ... --> Ci --> Ci+1 -> ... -> Cn --> C0 - // ^ ^ - // : | - // This edge -----+ | - // | - // | - // N0 - // - // In this case, the value we have just read from `Ci+1` - // is actually the cycle fallback value and not especially - // interesting. We unwind now with `CycleParticipant` to avoid - // executing the rest of our query function. This unwinding - // will be caught and our own fallback value will be used. - // - // Note that `Ci+1` may` have *other* callers who are not - // participants in the cycle (e.g., N0 in the graph above). - // They will not have the `cycle` marker set in their - // stack frames, so they will just read the fallback value - // from `Ci+1` and continue on their merry way. - if let Some(cycle) = &top_query.cycle { - cycle.clone().throw() - } - } - }) - } - - pub(super) fn report_untracked_read(&self, current_revision: Revision) { - self.with_query_stack(|stack| { - if let Some(top_query) = stack.last_mut() { - top_query.add_untracked_read(current_revision); - } - }) - } - - /// Update the top query on the stack to act as though it read a value - /// of durability `durability` which changed in `revision`. - pub(super) fn report_synthetic_read(&self, durability: Durability, revision: Revision) { - self.with_query_stack(|stack| { - if let Some(top_query) = stack.last_mut() { - top_query.add_synthetic_read(durability, revision); - } - }) - } - - /// Takes the query stack and returns it. This is used when - /// the current thread is blocking. The stack must be restored - /// with [`Self::restore_query_stack`] when the thread unblocks. - pub(super) fn take_query_stack(&self) -> Vec { - assert!( - self.query_stack.borrow().is_some(), - "query stack already taken" - ); - self.query_stack.take().unwrap() - } - - /// Restores a query stack taken with [`Self::take_query_stack`] once - /// the thread unblocks. - pub(super) fn restore_query_stack(&self, stack: Vec) { - assert!(self.query_stack.borrow().is_none(), "query stack not taken"); - self.query_stack.replace(Some(stack)); - } -} - -impl std::panic::RefUnwindSafe for LocalState {} - -/// When a query is pushed onto the `active_query` stack, this guard -/// is returned to represent its slot. The guard can be used to pop -/// the query from the stack -- in the case of unwinding, the guard's -/// destructor will also remove the query. -pub(crate) struct ActiveQueryGuard<'me> { - local_state: &'me LocalState, - push_len: usize, - pub(crate) database_key_index: DatabaseKeyIndex, -} - -impl ActiveQueryGuard<'_> { - fn pop_helper(&self) -> ActiveQuery { - self.local_state.with_query_stack(|stack| { - // Sanity check: pushes and pops should be balanced. - assert_eq!(stack.len(), self.push_len); - debug_assert_eq!( - stack.last().unwrap().database_key_index, - self.database_key_index - ); - stack.pop().unwrap() - }) - } - - /// Invoked when the query has successfully completed execution. - pub(super) fn complete(self) -> ActiveQuery { - let query = self.pop_helper(); - std::mem::forget(self); - query - } - - /// Pops an active query from the stack. Returns the [`QueryRevisions`] - /// which summarizes the other queries that were accessed during this - /// query's execution. - #[inline] - pub(crate) fn pop(self) -> QueryRevisions { - // Extract accumulated inputs. - let popped_query = self.complete(); - - // If this frame were a cycle participant, it would have unwound. - assert!(popped_query.cycle.is_none()); - - popped_query.revisions() - } - - /// If the active query is registered as a cycle participant, remove and - /// return that cycle. - pub(crate) fn take_cycle(&self) -> Option { - self.local_state - .with_query_stack(|stack| stack.last_mut()?.cycle.take()) - } -} - -impl Drop for ActiveQueryGuard<'_> { - fn drop(&mut self) { - self.pop_helper(); - } -} diff --git a/src/storage.rs b/src/storage.rs deleted file mode 100644 index da64a03ee..000000000 --- a/src/storage.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::{plumbing::DatabaseStorageTypes, Runtime}; -use std::sync::Arc; - -/// Stores the cached results and dependency information for all the queries -/// defined on your salsa database. Also embeds a [`Runtime`] which is used to -/// manage query execution. Every database must include a `storage: -/// Storage` field. -pub struct Storage { - query_store: Arc, - runtime: Runtime, -} - -impl Default for Storage { - fn default() -> Self { - Self { - query_store: Default::default(), - runtime: Default::default(), - } - } -} - -impl Storage { - /// Gives access to the underlying salsa runtime. - pub fn salsa_runtime(&self) -> &Runtime { - &self.runtime - } - - /// Gives access to the underlying salsa runtime. - pub fn salsa_runtime_mut(&mut self) -> &mut Runtime { - &mut self.runtime - } - - /// Access the query storage tables. Not meant to be used directly by end - /// users. - pub fn query_store(&self) -> &DB::DatabaseStorage { - &self.query_store - } - - /// Access the query storage tables. Not meant to be used directly by end - /// users. - pub fn query_store_mut(&mut self) -> (&DB::DatabaseStorage, &mut Runtime) { - (&self.query_store, &mut self.runtime) - } - - /// Returns a "snapshotted" storage, suitable for use in a forked database. - /// This snapshot hold a read-lock on the global state, which means that any - /// attempt to `set` an input will block until the forked runtime is - /// dropped. See `ParallelDatabase::snapshot` for more information. - /// - /// **Warning.** This second handle is intended to be used from a separate - /// thread. Using two database handles from the **same thread** can lead to - /// deadlock. - pub fn snapshot(&self) -> Self { - Storage { - query_store: self.query_store.clone(), - runtime: self.runtime.snapshot(), - } - } -} diff --git a/tests/cycles.rs b/tests/cycles.rs deleted file mode 100644 index 45af432e9..000000000 --- a/tests/cycles.rs +++ /dev/null @@ -1,492 +0,0 @@ -use std::panic::UnwindSafe; - -use salsa::{Durability, ParallelDatabase, Snapshot}; -use test_log::test; - -// Axes: -// -// Threading -// * Intra-thread -// * Cross-thread -- part of cycle is on one thread, part on another -// -// Recovery strategies: -// * Panic -// * Fallback -// * Mixed -- multiple strategies within cycle participants -// -// Across revisions: -// * N/A -- only one revision -// * Present in new revision, not old -// * Present in old revision, not new -// * Present in both revisions -// -// Dependencies -// * Tracked -// * Untracked -- cycle participant(s) contain untracked reads -// -// Layers -// * Direct -- cycle participant is directly invoked from test -// * Indirect -- invoked a query that invokes the cycle -// -// -// | Thread | Recovery | Old, New | Dep style | Layers | Test Name | -// | ------ | -------- | -------- | --------- | ------ | --------- | -// | Intra | Panic | N/A | Tracked | direct | cycle_memoized | -// | Intra | Panic | N/A | Untracked | direct | cycle_volatile | -// | Intra | Fallback | N/A | Tracked | direct | cycle_cycle | -// | Intra | Fallback | N/A | Tracked | indirect | inner_cycle | -// | Intra | Fallback | Both | Tracked | direct | cycle_revalidate | -// | Intra | Fallback | New | Tracked | direct | cycle_appears | -// | Intra | Fallback | Old | Tracked | direct | cycle_disappears | -// | Intra | Fallback | Old | Tracked | direct | cycle_disappears_durability | -// | Intra | Mixed | N/A | Tracked | direct | cycle_mixed_1 | -// | Intra | Mixed | N/A | Tracked | direct | cycle_mixed_2 | -// | Cross | Fallback | N/A | Tracked | both | parallel/cycles.rs: recover_parallel_cycle | -// | Cross | Panic | N/A | Tracked | both | parallel/cycles.rs: panic_parallel_cycle | - -#[derive(PartialEq, Eq, Hash, Clone, Debug)] -struct Error { - cycle: Vec, -} - -#[derive(Default)] -#[salsa::database(GroupStruct)] -struct DatabaseImpl { - storage: salsa::Storage, -} - -impl salsa::Database for DatabaseImpl {} - -impl ParallelDatabase for DatabaseImpl { - fn snapshot(&self) -> Snapshot { - Snapshot::new(DatabaseImpl { - storage: self.storage.snapshot(), - }) - } -} - -/// The queries A, B, and C in `Database` can be configured -/// to invoke one another in arbitrary ways using this -/// enum. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -enum CycleQuery { - None, - A, - B, - C, - AthenC, -} - -#[salsa::query_group(GroupStruct)] -trait Database: salsa::Database { - // `a` and `b` depend on each other and form a cycle - fn memoized_a(&self) -> (); - fn memoized_b(&self) -> (); - fn volatile_a(&self) -> (); - fn volatile_b(&self) -> (); - - #[salsa::input] - fn a_invokes(&self) -> CycleQuery; - - #[salsa::input] - fn b_invokes(&self) -> CycleQuery; - - #[salsa::input] - fn c_invokes(&self) -> CycleQuery; - - #[salsa::cycle(recover_a)] - fn cycle_a(&self) -> Result<(), Error>; - - #[salsa::cycle(recover_b)] - fn cycle_b(&self) -> Result<(), Error>; - - fn cycle_c(&self) -> Result<(), Error>; -} - -fn recover_a(db: &dyn Database, cycle: &salsa::Cycle) -> Result<(), Error> { - Err(Error { - cycle: cycle.all_participants(db), - }) -} - -fn recover_b(db: &dyn Database, cycle: &salsa::Cycle) -> Result<(), Error> { - Err(Error { - cycle: cycle.all_participants(db), - }) -} - -fn memoized_a(db: &dyn Database) { - db.memoized_b() -} - -fn memoized_b(db: &dyn Database) { - db.memoized_a() -} - -fn volatile_a(db: &dyn Database) { - db.salsa_runtime().report_untracked_read(); - db.volatile_b() -} - -fn volatile_b(db: &dyn Database) { - db.salsa_runtime().report_untracked_read(); - db.volatile_a() -} - -impl CycleQuery { - fn invoke(self, db: &dyn Database) -> Result<(), Error> { - match self { - CycleQuery::A => db.cycle_a(), - CycleQuery::B => db.cycle_b(), - CycleQuery::C => db.cycle_c(), - CycleQuery::AthenC => { - let _ = db.cycle_a(); - db.cycle_c() - } - CycleQuery::None => Ok(()), - } - } -} - -fn cycle_a(db: &dyn Database) -> Result<(), Error> { - dbg!("cycle_a"); - db.a_invokes().invoke(db) -} - -fn cycle_b(db: &dyn Database) -> Result<(), Error> { - dbg!("cycle_b"); - db.b_invokes().invoke(db) -} - -fn cycle_c(db: &dyn Database) -> Result<(), Error> { - dbg!("cycle_c"); - db.c_invokes().invoke(db) -} - -#[track_caller] -fn extract_cycle(f: impl FnOnce() + UnwindSafe) -> salsa::Cycle { - let v = std::panic::catch_unwind(f); - if let Err(d) = &v { - if let Some(cycle) = d.downcast_ref::() { - return cycle.clone(); - } - } - panic!("unexpected value: {:?}", v) -} - -#[test] -fn cycle_memoized() { - let db = DatabaseImpl::default(); - let cycle = extract_cycle(|| db.memoized_a()); - insta::assert_debug_snapshot!(cycle.unexpected_participants(&db), @r###" - [ - "memoized_a(())", - "memoized_b(())", - ] - "###); -} - -#[test] -fn cycle_volatile() { - let db = DatabaseImpl::default(); - let cycle = extract_cycle(|| db.volatile_a()); - insta::assert_debug_snapshot!(cycle.unexpected_participants(&db), @r###" - [ - "volatile_a(())", - "volatile_b(())", - ] - "###); -} - -#[test] -fn cycle_cycle() { - let mut query = DatabaseImpl::default(); - - // A --> B - // ^ | - // +-----+ - - query.set_a_invokes(CycleQuery::B); - query.set_b_invokes(CycleQuery::A); - - assert!(query.cycle_a().is_err()); -} - -#[test] -fn inner_cycle() { - let mut query = DatabaseImpl::default(); - - // A --> B <-- C - // ^ | - // +-----+ - - query.set_a_invokes(CycleQuery::B); - query.set_b_invokes(CycleQuery::A); - query.set_c_invokes(CycleQuery::B); - - let err = query.cycle_c(); - assert!(err.is_err()); - let cycle = err.unwrap_err().cycle; - insta::assert_debug_snapshot!(cycle, @r###" - [ - "cycle_a(())", - "cycle_b(())", - ] - "###); -} - -#[test] -fn cycle_revalidate() { - let mut db = DatabaseImpl::default(); - - // A --> B - // ^ | - // +-----+ - db.set_a_invokes(CycleQuery::B); - db.set_b_invokes(CycleQuery::A); - - assert!(db.cycle_a().is_err()); - db.set_b_invokes(CycleQuery::A); // same value as default - assert!(db.cycle_a().is_err()); -} - -#[test] -fn cycle_revalidate_unchanged_twice() { - let mut db = DatabaseImpl::default(); - - // A --> B - // ^ | - // +-----+ - db.set_a_invokes(CycleQuery::B); - db.set_b_invokes(CycleQuery::A); - - assert!(db.cycle_a().is_err()); - db.set_c_invokes(CycleQuery::A); // force new revisi5on - - // on this run - insta::assert_debug_snapshot!(db.cycle_a(), @r###" - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ) - "###); -} - -#[test] -fn cycle_appears() { - let mut db = DatabaseImpl::default(); - - // A --> B - db.set_a_invokes(CycleQuery::B); - db.set_b_invokes(CycleQuery::None); - assert!(db.cycle_a().is_ok()); - - // A --> B - // ^ | - // +-----+ - db.set_b_invokes(CycleQuery::A); - log::debug!("Set Cycle Leaf"); - assert!(db.cycle_a().is_err()); -} - -#[test] -fn cycle_disappears() { - let mut db = DatabaseImpl::default(); - - // A --> B - // ^ | - // +-----+ - db.set_a_invokes(CycleQuery::B); - db.set_b_invokes(CycleQuery::A); - assert!(db.cycle_a().is_err()); - - // A --> B - db.set_b_invokes(CycleQuery::None); - assert!(db.cycle_a().is_ok()); -} - -/// A variant on `cycle_disappears` in which the values of -/// `a_invokes` and `b_invokes` are set with durability values. -/// If we are not careful, this could cause us to overlook -/// the fact that the cycle will no longer occur. -#[test] -fn cycle_disappears_durability() { - let mut db = DatabaseImpl::default(); - db.set_a_invokes_with_durability(CycleQuery::B, Durability::LOW); - db.set_b_invokes_with_durability(CycleQuery::A, Durability::HIGH); - - let res = db.cycle_a(); - assert!(res.is_err()); - - // At this point, `a` read `LOW` input, and `b` read `HIGH` input. However, - // because `b` participates in the same cycle as `a`, its final durability - // should be `LOW`. - // - // Check that setting a `LOW` input causes us to re-execute `b` query, and - // observe that the cycle goes away. - db.set_a_invokes_with_durability(CycleQuery::None, Durability::LOW); - - let res = db.cycle_b(); - assert!(res.is_ok()); -} - -#[test] -fn cycle_mixed_1() { - let mut db = DatabaseImpl::default(); - // A --> B <-- C - // | ^ - // +-----+ - db.set_a_invokes(CycleQuery::B); - db.set_b_invokes(CycleQuery::C); - db.set_c_invokes(CycleQuery::B); - - let u = db.cycle_c(); - insta::assert_debug_snapshot!(u, @r###" - Err( - Error { - cycle: [ - "cycle_b(())", - "cycle_c(())", - ], - }, - ) - "###); -} - -#[test] -fn cycle_mixed_2() { - let mut db = DatabaseImpl::default(); - - // Configuration: - // - // A --> B --> C - // ^ | - // +-----------+ - db.set_a_invokes(CycleQuery::B); - db.set_b_invokes(CycleQuery::C); - db.set_c_invokes(CycleQuery::A); - - let u = db.cycle_a(); - insta::assert_debug_snapshot!(u, @r###" - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - "cycle_c(())", - ], - }, - ) - "###); -} - -#[test] -fn cycle_deterministic_order() { - // No matter whether we start from A or B, we get the same set of participants: - let db = || { - let mut db = DatabaseImpl::default(); - // A --> B - // ^ | - // +-----+ - db.set_a_invokes(CycleQuery::B); - db.set_b_invokes(CycleQuery::A); - db - }; - let a = db().cycle_a(); - let b = db().cycle_b(); - insta::assert_debug_snapshot!((a, b), @r###" - ( - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ), - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ), - ) - "###); -} - -#[test] -fn cycle_multiple() { - // No matter whether we start from A or B, we get the same set of participants: - let mut db = DatabaseImpl::default(); - - // Configuration: - // - // A --> B <-- C - // ^ | ^ - // +-----+ | - // | | - // +-----+ - // - // Here, conceptually, B encounters a cycle with A and then - // recovers. - db.set_a_invokes(CycleQuery::B); - db.set_b_invokes(CycleQuery::AthenC); - db.set_c_invokes(CycleQuery::B); - - let c = db.cycle_c(); - let b = db.cycle_b(); - let a = db.cycle_a(); - insta::assert_debug_snapshot!((a, b, c), @r###" - ( - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ), - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ), - Err( - Error { - cycle: [ - "cycle_a(())", - "cycle_b(())", - ], - }, - ), - ) - "###); -} - -#[test] -fn cycle_recovery_set_but_not_participating() { - let mut db = DatabaseImpl::default(); - - // A --> C -+ - // ^ | - // +--+ - db.set_a_invokes(CycleQuery::C); - db.set_c_invokes(CycleQuery::C); - - // Here we expect C to panic and A not to recover: - let r = extract_cycle(|| drop(db.cycle_a())); - insta::assert_debug_snapshot!(r.all_participants(&db), @r###" - [ - "cycle_c(())", - ] - "###); -} diff --git a/tests/dyn_trait.rs b/tests/dyn_trait.rs deleted file mode 100644 index 09ebc5c4c..000000000 --- a/tests/dyn_trait.rs +++ /dev/null @@ -1,28 +0,0 @@ -//! Test that you can implement a query using a `dyn Trait` setup. - -#[salsa::database(DynTraitStorage)] -#[derive(Default)] -struct DynTraitDatabase { - storage: salsa::Storage, -} - -impl salsa::Database for DynTraitDatabase {} - -#[salsa::query_group(DynTraitStorage)] -trait DynTrait { - #[salsa::input] - fn input(&self, x: u32) -> u32; - - fn output(&self, x: u32) -> u32; -} - -fn output(db: &dyn DynTrait, x: u32) -> u32 { - db.input(x) * 2 -} - -#[test] -fn dyn_trait() { - let mut query = DynTraitDatabase::default(); - query.set_input(22, 23); - assert_eq!(query.output(22), 46); -} diff --git a/tests/incremental/constants.rs b/tests/incremental/constants.rs deleted file mode 100644 index 30f42b136..000000000 --- a/tests/incremental/constants.rs +++ /dev/null @@ -1,148 +0,0 @@ -use crate::implementation::{TestContext, TestContextImpl}; -use salsa::debug::DebugQueryTable; -use salsa::Durability; - -#[salsa::query_group(Constants)] -pub(crate) trait ConstantsDatabase: TestContext { - #[salsa::input] - fn input(&self, key: char) -> usize; - - fn add(&self, key1: char, key2: char) -> usize; - - fn add3(&self, key1: char, key2: char, key3: char) -> usize; -} - -fn add(db: &dyn ConstantsDatabase, key1: char, key2: char) -> usize { - db.log().add(format!("add({}, {})", key1, key2)); - db.input(key1) + db.input(key2) -} - -fn add3(db: &dyn ConstantsDatabase, key1: char, key2: char, key3: char) -> usize { - db.log().add(format!("add3({}, {}, {})", key1, key2, key3)); - db.add(key1, key2) + db.input(key3) -} - -// Test we can assign a constant and things will be correctly -// recomputed afterwards. -#[test] -fn invalidate_constant() { - let db = &mut TestContextImpl::default(); - db.set_input_with_durability('a', 44, Durability::HIGH); - db.set_input_with_durability('b', 22, Durability::HIGH); - assert_eq!(db.add('a', 'b'), 66); - - db.set_input_with_durability('a', 66, Durability::HIGH); - assert_eq!(db.add('a', 'b'), 88); -} - -#[test] -fn invalidate_constant_1() { - let db = &mut TestContextImpl::default(); - - // Not constant: - db.set_input('a', 44); - assert_eq!(db.add('a', 'a'), 88); - - // Becomes constant: - db.set_input_with_durability('a', 44, Durability::HIGH); - assert_eq!(db.add('a', 'a'), 88); - - // Invalidates: - db.set_input_with_durability('a', 33, Durability::HIGH); - assert_eq!(db.add('a', 'a'), 66); -} - -// Test cases where we assign same value to 'a' after declaring it a -// constant. -#[test] -fn set_after_constant_same_value() { - let db = &mut TestContextImpl::default(); - db.set_input_with_durability('a', 44, Durability::HIGH); - db.set_input_with_durability('a', 44, Durability::HIGH); - db.set_input('a', 44); -} - -#[test] -fn not_constant() { - let mut db = TestContextImpl::default(); - - db.set_input('a', 22); - db.set_input('b', 44); - assert_eq!(db.add('a', 'b'), 66); - assert_eq!(Durability::LOW, AddQuery.in_db(&db).durability(('a', 'b'))); -} - -#[test] -fn durability() { - let mut db = TestContextImpl::default(); - - db.set_input_with_durability('a', 22, Durability::HIGH); - db.set_input_with_durability('b', 44, Durability::HIGH); - assert_eq!(db.add('a', 'b'), 66); - assert_eq!(Durability::HIGH, AddQuery.in_db(&db).durability(('a', 'b'))); -} - -#[test] -fn mixed_constant() { - let mut db = TestContextImpl::default(); - - db.set_input_with_durability('a', 22, Durability::HIGH); - db.set_input('b', 44); - assert_eq!(db.add('a', 'b'), 66); - assert_eq!(Durability::LOW, AddQuery.in_db(&db).durability(('a', 'b'))); -} - -#[test] -fn becomes_constant_with_change() { - let mut db = TestContextImpl::default(); - - db.set_input('a', 22); - db.set_input('b', 44); - assert_eq!(db.add('a', 'b'), 66); - assert_eq!(Durability::LOW, AddQuery.in_db(&db).durability(('a', 'b'))); - - db.set_input_with_durability('a', 23, Durability::HIGH); - assert_eq!(db.add('a', 'b'), 67); - assert_eq!(Durability::LOW, AddQuery.in_db(&db).durability(('a', 'b'))); - - db.set_input_with_durability('b', 45, Durability::HIGH); - assert_eq!(db.add('a', 'b'), 68); - assert_eq!(Durability::HIGH, AddQuery.in_db(&db).durability(('a', 'b'))); - - db.set_input_with_durability('b', 45, Durability::MEDIUM); - assert_eq!(db.add('a', 'b'), 68); - assert_eq!( - Durability::MEDIUM, - AddQuery.in_db(&db).durability(('a', 'b')) - ); -} - -// Test a subtle case in which an input changes from constant to -// non-constant, but its value doesn't change. If we're not careful, -// this can cause us to incorrectly consider derived values as still -// being constant. -#[test] -fn constant_to_non_constant() { - let mut db = TestContextImpl::default(); - - db.set_input_with_durability('a', 11, Durability::HIGH); - db.set_input_with_durability('b', 22, Durability::HIGH); - db.set_input_with_durability('c', 33, Durability::HIGH); - - // Here, `add3` invokes `add`, which yields 33. Both calls are - // constant. - assert_eq!(db.add3('a', 'b', 'c'), 66); - - db.set_input('a', 11); - - // Here, `add3` invokes `add`, which *still* yields 33, but which - // is no longer constant. Since value didn't change, we might - // preserve `add3` unchanged, not noticing that it is no longer - // constant. - assert_eq!(db.add3('a', 'b', 'c'), 66); - - // In that case, we would not get the correct result here, when - // 'a' changes *again*. - db.set_input('a', 22); - assert_eq!(db.add3('a', 'b', 'c'), 77); -} diff --git a/tests/incremental/counter.rs b/tests/incremental/counter.rs deleted file mode 100644 index c04857e24..000000000 --- a/tests/incremental/counter.rs +++ /dev/null @@ -1,14 +0,0 @@ -use std::cell::Cell; - -#[derive(Default)] -pub(crate) struct Counter { - value: Cell, -} - -impl Counter { - pub(crate) fn increment(&self) -> usize { - let v = self.value.get(); - self.value.set(v + 1); - v - } -} diff --git a/tests/incremental/implementation.rs b/tests/incremental/implementation.rs deleted file mode 100644 index a9c0a4a01..000000000 --- a/tests/incremental/implementation.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::constants; -use crate::counter::Counter; -use crate::log::Log; -use crate::memoized_dep_inputs; -use crate::memoized_inputs; -use crate::memoized_volatile; - -pub(crate) trait TestContext: salsa::Database { - fn clock(&self) -> &Counter; - fn log(&self) -> &Log; -} - -#[salsa::database( - constants::Constants, - memoized_dep_inputs::MemoizedDepInputs, - memoized_inputs::MemoizedInputs, - memoized_volatile::MemoizedVolatile -)] -#[derive(Default)] -pub(crate) struct TestContextImpl { - storage: salsa::Storage, - clock: Counter, - log: Log, -} - -impl TestContextImpl { - #[track_caller] - pub(crate) fn assert_log(&self, expected_log: &[&str]) { - let expected_text = &format!("{:#?}", expected_log); - let actual_text = &format!("{:#?}", self.log().take()); - - if expected_text == actual_text { - return; - } - - for diff in diff::lines(expected_text, actual_text) { - match diff { - diff::Result::Left(l) => println!("-{}", l), - diff::Result::Both(l, _) => println!(" {}", l), - diff::Result::Right(r) => println!("+{}", r), - } - } - - panic!("incorrect log results"); - } -} - -impl TestContext for TestContextImpl { - fn clock(&self) -> &Counter { - &self.clock - } - - fn log(&self) -> &Log { - &self.log - } -} - -impl salsa::Database for TestContextImpl {} diff --git a/tests/incremental/log.rs b/tests/incremental/log.rs deleted file mode 100644 index 1ee57fe66..000000000 --- a/tests/incremental/log.rs +++ /dev/null @@ -1,16 +0,0 @@ -use std::cell::RefCell; - -#[derive(Default)] -pub(crate) struct Log { - data: RefCell>, -} - -impl Log { - pub(crate) fn add(&self, text: impl Into) { - self.data.borrow_mut().push(text.into()); - } - - pub(crate) fn take(&self) -> Vec { - self.data.take() - } -} diff --git a/tests/incremental/main.rs b/tests/incremental/main.rs deleted file mode 100644 index bcd13c75f..000000000 --- a/tests/incremental/main.rs +++ /dev/null @@ -1,9 +0,0 @@ -mod constants; -mod counter; -mod implementation; -mod log; -mod memoized_dep_inputs; -mod memoized_inputs; -mod memoized_volatile; - -fn main() {} diff --git a/tests/incremental/memoized_dep_inputs.rs b/tests/incremental/memoized_dep_inputs.rs deleted file mode 100644 index d76cbfb1f..000000000 --- a/tests/incremental/memoized_dep_inputs.rs +++ /dev/null @@ -1,61 +0,0 @@ -use crate::implementation::{TestContext, TestContextImpl}; - -#[salsa::query_group(MemoizedDepInputs)] -pub(crate) trait MemoizedDepInputsContext: TestContext { - fn dep_memoized2(&self) -> usize; - fn dep_memoized1(&self) -> usize; - #[salsa::dependencies] - fn dep_derived1(&self) -> usize; - #[salsa::input] - fn dep_input1(&self) -> usize; - #[salsa::input] - fn dep_input2(&self) -> usize; -} - -fn dep_memoized2(db: &dyn MemoizedDepInputsContext) -> usize { - db.log().add("Memoized2 invoked"); - db.dep_memoized1() -} - -fn dep_memoized1(db: &dyn MemoizedDepInputsContext) -> usize { - db.log().add("Memoized1 invoked"); - db.dep_derived1() * 2 -} - -fn dep_derived1(db: &dyn MemoizedDepInputsContext) -> usize { - db.log().add("Derived1 invoked"); - db.dep_input1() / 2 -} - -#[test] -fn revalidate() { - let db = &mut TestContextImpl::default(); - - db.set_dep_input1(0); - - // Initial run starts from Memoized2: - let v = db.dep_memoized2(); - assert_eq!(v, 0); - db.assert_log(&["Memoized2 invoked", "Memoized1 invoked", "Derived1 invoked"]); - - // After that, we first try to validate Memoized1 but wind up - // running Memoized2. Note that we don't try to validate - // Derived1, so it is invoked by Memoized1. - db.set_dep_input1(44); - let v = db.dep_memoized2(); - assert_eq!(v, 44); - db.assert_log(&["Memoized1 invoked", "Derived1 invoked", "Memoized2 invoked"]); - - // Here validation of Memoized1 succeeds so Memoized2 never runs. - let value = db.remove_dep_input1() + 1; - db.set_dep_input1(value); - let v = db.dep_memoized2(); - assert_eq!(v, 44); - db.assert_log(&["Memoized1 invoked", "Derived1 invoked"]); - - // Here, a change to input2 doesn't affect us, so nothing runs. - db.set_dep_input2(45); - let v = db.dep_memoized2(); - assert_eq!(v, 44); - db.assert_log(&[]); -} diff --git a/tests/incremental/memoized_inputs.rs b/tests/incremental/memoized_inputs.rs deleted file mode 100644 index ae5bf7637..000000000 --- a/tests/incremental/memoized_inputs.rs +++ /dev/null @@ -1,86 +0,0 @@ -use crate::implementation::{TestContext, TestContextImpl}; - -#[salsa::query_group(MemoizedInputs)] -pub(crate) trait MemoizedInputsContext: TestContext { - fn max(&self) -> usize; - #[salsa::input] - fn input1(&self) -> usize; - #[salsa::input] - fn input2(&self) -> usize; -} - -fn max(db: &dyn MemoizedInputsContext) -> usize { - db.log().add("Max invoked"); - std::cmp::max(db.input1(), db.input2()) -} - -#[test] -fn revalidate() { - let db = &mut TestContextImpl::default(); - - db.set_input1(0); - db.set_input2(0); - - let v = db.max(); - assert_eq!(v, 0); - db.assert_log(&["Max invoked"]); - - let v = db.max(); - assert_eq!(v, 0); - db.assert_log(&[]); - - db.set_input1(44); - db.assert_log(&[]); - - let v = db.max(); - assert_eq!(v, 44); - db.assert_log(&["Max invoked"]); - - let v = db.max(); - assert_eq!(v, 44); - db.assert_log(&[]); - - db.set_input1(44); - db.assert_log(&[]); - db.set_input2(66); - db.assert_log(&[]); - db.set_input1(64); - db.assert_log(&[]); - - let value = db.remove_input1() + 1; - db.set_input1(value); - db.assert_log(&[]); - let value = db.remove_input2() + 1; - db.set_input2(value); - db.assert_log(&[]); - let value = db.remove_input1() + 1; - db.set_input1(value); - db.assert_log(&[]); - - let v = db.max(); - assert_eq!(v, 67); - db.assert_log(&["Max invoked"]); - - let v = db.max(); - assert_eq!(v, 67); - db.assert_log(&[]); -} - -/// Test that invoking `set` on an input with the same value still -/// triggers a new revision. -#[test] -fn set_after_no_change() { - let db = &mut TestContextImpl::default(); - - db.set_input2(0); - - db.set_input1(44); - let v = db.max(); - assert_eq!(v, 44); - db.assert_log(&["Max invoked"]); - - db.set_input1(44); - let v = db.max(); - assert_eq!(v, 44); - db.assert_log(&["Max invoked"]); -} diff --git a/tests/incremental/memoized_volatile.rs b/tests/incremental/memoized_volatile.rs deleted file mode 100644 index e046db0b9..000000000 --- a/tests/incremental/memoized_volatile.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crate::implementation::{TestContext, TestContextImpl}; -use salsa::{Database, Durability}; -use test_log::test; - -#[salsa::query_group(MemoizedVolatile)] -pub(crate) trait MemoizedVolatileContext: TestContext { - // Queries for testing a "volatile" value wrapped by - // memoization. - fn memoized2(&self) -> usize; - fn memoized1(&self) -> usize; - fn volatile(&self) -> usize; -} - -fn memoized2(db: &dyn MemoizedVolatileContext) -> usize { - db.log().add("Memoized2 invoked"); - db.memoized1() -} - -fn memoized1(db: &dyn MemoizedVolatileContext) -> usize { - db.log().add("Memoized1 invoked"); - let v = db.volatile(); - v / 2 -} - -fn volatile(db: &dyn MemoizedVolatileContext) -> usize { - db.log().add("Volatile invoked"); - db.salsa_runtime().report_untracked_read(); - db.clock().increment() -} - -#[test] -fn volatile_x2() { - let query = TestContextImpl::default(); - - // Invoking volatile twice doesn't execute twice, because volatile - // queries are memoized by default. - query.volatile(); - query.volatile(); - query.assert_log(&["Volatile invoked"]); -} - -/// Test that: -/// -/// - On the first run of R0, we recompute everything. -/// - On the second run of R1, we recompute nothing. -/// - On the first run of R1, we recompute Memoized1 but not Memoized2 (since Memoized1 result -/// did not change). -/// - On the second run of R1, we recompute nothing. -/// - On the first run of R2, we recompute everything (since Memoized1 result *did* change). -#[test] -fn revalidate() { - let mut query = TestContextImpl::default(); - - query.memoized2(); - query.assert_log(&["Memoized2 invoked", "Memoized1 invoked", "Volatile invoked"]); - - query.memoized2(); - query.assert_log(&[]); - - // Second generation: volatile will change (to 1) but memoized1 - // will not (still 0, as 1/2 = 0) - query.salsa_runtime_mut().synthetic_write(Durability::LOW); - query.memoized2(); - query.assert_log(&["Volatile invoked", "Memoized1 invoked"]); - query.memoized2(); - query.assert_log(&[]); - - // Third generation: volatile will change (to 2) and memoized1 - // will too (to 1). Therefore, after validating that Memoized1 - // changed, we now invoke Memoized2. - query.salsa_runtime_mut().synthetic_write(Durability::LOW); - - query.memoized2(); - query.assert_log(&["Volatile invoked", "Memoized1 invoked", "Memoized2 invoked"]); - - query.memoized2(); - query.assert_log(&[]); -} diff --git a/tests/interned.rs b/tests/interned.rs deleted file mode 100644 index bf8683114..000000000 --- a/tests/interned.rs +++ /dev/null @@ -1,98 +0,0 @@ -//! Test that you can implement a query using a `dyn Trait` setup. - -use salsa::InternId; - -#[salsa::database(InternStorage)] -#[derive(Default)] -struct Database { - storage: salsa::Storage, -} - -impl salsa::Database for Database {} - -impl salsa::ParallelDatabase for Database { - fn snapshot(&self) -> salsa::Snapshot { - salsa::Snapshot::new(Database { - storage: self.storage.snapshot(), - }) - } -} - -#[salsa::query_group(InternStorage)] -trait Intern { - #[salsa::interned] - fn intern1(&self, x: String) -> InternId; - - #[salsa::interned] - fn intern2(&self, x: String, y: String) -> InternId; - - #[salsa::interned] - fn intern_key(&self, x: String) -> InternKey; -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub struct InternKey(InternId); - -impl salsa::InternKey for InternKey { - fn from_intern_id(v: InternId) -> Self { - InternKey(v) - } - - fn as_intern_id(&self) -> InternId { - self.0 - } -} - -#[test] -fn test_intern1() { - let db = Database::default(); - let foo0 = db.intern1("foo".to_string()); - let bar0 = db.intern1("bar".to_string()); - let foo1 = db.intern1("foo".to_string()); - let bar1 = db.intern1("bar".to_string()); - - assert_eq!(foo0, foo1); - assert_eq!(bar0, bar1); - assert_ne!(foo0, bar0); - - assert_eq!("foo".to_string(), db.lookup_intern1(foo0)); - assert_eq!("bar".to_string(), db.lookup_intern1(bar0)); -} - -#[test] -fn test_intern2() { - let db = Database::default(); - let foo0 = db.intern2("x".to_string(), "foo".to_string()); - let bar0 = db.intern2("x".to_string(), "bar".to_string()); - let foo1 = db.intern2("x".to_string(), "foo".to_string()); - let bar1 = db.intern2("x".to_string(), "bar".to_string()); - - assert_eq!(foo0, foo1); - assert_eq!(bar0, bar1); - assert_ne!(foo0, bar0); - - assert_eq!( - ("x".to_string(), "foo".to_string()), - db.lookup_intern2(foo0) - ); - assert_eq!( - ("x".to_string(), "bar".to_string()), - db.lookup_intern2(bar0) - ); -} - -#[test] -fn test_intern_key() { - let db = Database::default(); - let foo0 = db.intern_key("foo".to_string()); - let bar0 = db.intern_key("bar".to_string()); - let foo1 = db.intern_key("foo".to_string()); - let bar1 = db.intern_key("bar".to_string()); - - assert_eq!(foo0, foo1); - assert_eq!(bar0, bar1); - assert_ne!(foo0, bar0); - - assert_eq!("foo".to_string(), db.lookup_intern_key(foo0)); - assert_eq!("bar".to_string(), db.lookup_intern_key(bar0)); -} diff --git a/tests/lru.rs b/tests/lru.rs deleted file mode 100644 index 66f3498c0..000000000 --- a/tests/lru.rs +++ /dev/null @@ -1,163 +0,0 @@ -//! Test setting LRU actually limits the number of things in the database; -use std::{ - cell::RefCell, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, -}; - -use salsa::{Database as _, Durability}; - -trait LruPeek { - fn log(&self, event: String); -} - -#[derive(Debug, PartialEq, Eq)] -struct HotPotato(u32); - -thread_local! { - static N_POTATOES: AtomicUsize = const { AtomicUsize::new(0) } -} - -impl HotPotato { - fn new(id: u32) -> HotPotato { - N_POTATOES.with(|n| n.fetch_add(1, Ordering::SeqCst)); - HotPotato(id) - } -} - -impl Drop for HotPotato { - fn drop(&mut self) { - N_POTATOES.with(|n| n.fetch_sub(1, Ordering::SeqCst)); - } -} - -#[salsa::query_group(QueryGroupStorage)] -trait QueryGroup: salsa::Database + LruPeek { - fn get2(&self, x: u32) -> u32; - fn get(&self, x: u32) -> Arc; - fn get_volatile(&self, x: u32) -> usize; -} - -/// Create a hotpotato (this will increment the counter above) -fn get(db: &dyn QueryGroup, x: u32) -> Arc { - db.log(format!("get({x})")); - Arc::new(HotPotato::new(x)) -} - -/// Forward to the `get` query -fn get2(db: &dyn QueryGroup, x: u32) -> u32 { - db.log(format!("get2({x})")); - db.get(x).0 -} - -// Like `get`, but with a volatile input, which means it can't -// be LRU'd. -fn get_volatile(db: &dyn QueryGroup, _x: u32) -> usize { - static COUNTER: AtomicUsize = AtomicUsize::new(0); - db.salsa_runtime().report_untracked_read(); - COUNTER.fetch_add(1, Ordering::SeqCst) -} - -#[salsa::database(QueryGroupStorage)] -#[derive(Default)] -struct Database { - storage: salsa::Storage, - logs: RefCell>, -} - -impl salsa::Database for Database {} - -impl LruPeek for Database { - fn log(&self, event: String) { - eprintln!("{event}"); - self.logs.borrow_mut().push(event); - } -} - -fn load_n_potatoes() -> usize { - N_POTATOES.with(|n| n.load(Ordering::SeqCst)) -} - -#[test] -fn lru_works() { - let mut db = Database::default(); - GetQuery.in_db_mut(&mut db).set_lru_capacity(32); - assert_eq!(load_n_potatoes(), 0); - - for i in 0..128u32 { - let p = db.get(i); - assert_eq!(p.0, i) - } - assert_eq!(load_n_potatoes(), 32); - - for i in 0..128u32 { - let p = db.get(i); - assert_eq!(p.0, i) - } - assert_eq!(load_n_potatoes(), 32); - - GetQuery.in_db_mut(&mut db).set_lru_capacity(32); - assert_eq!(load_n_potatoes(), 32); - - GetQuery.in_db_mut(&mut db).set_lru_capacity(64); - assert_eq!(load_n_potatoes(), 32); - for i in 0..128u32 { - let p = db.get(i); - assert_eq!(p.0, i) - } - assert_eq!(load_n_potatoes(), 64); - - // Special case: setting capacity to zero disables LRU - GetQuery.in_db_mut(&mut db).set_lru_capacity(0); - assert_eq!(load_n_potatoes(), 64); - for i in 0..128u32 { - let p = db.get(i); - assert_eq!(p.0, i) - } - assert_eq!(load_n_potatoes(), 128); - - drop(db); - assert_eq!(load_n_potatoes(), 0); -} - -#[test] -fn lru_doesnt_break_volatile_queries() { - let mut db = Database::default(); - GetVolatileQuery.in_db_mut(&mut db).set_lru_capacity(32); - // Here, we check that we execute each volatile query at most once, despite - // LRU. That does mean that we have more values in DB than the LRU capacity, - // but it's much better than inconsistent results from volatile queries! - for i in (0..3).flat_map(|_| 0..128usize) { - let x = db.get_volatile(i as u32); - assert_eq!(x, i) - } -} - -#[test] -fn lru_keeps_dependency_info() { - let mut db = Database::default(); - let capacity = 4; - GetQuery.in_db_mut(&mut db).set_lru_capacity(capacity); - - // Invoke `get2` 128 times. This will (in turn) invoke - // `get`, which will trigger LRU after 32 executions. - for i in 0..(capacity + 1) { - let p = db.get2(i as u32); - assert_eq!(p, i as u32); - } - - db.salsa_runtime_mut().synthetic_write(Durability::HIGH); - - // We want to test that calls to `get2` are still considered - // clean. Check that no new executions occur as we go here. - let events = db.logs.borrow().len(); - assert_eq!(events, (capacity + 1) * 2); - - // calling `get2(0)` has to check that `get(0)` is still valid; - // even though we've evicted it (LRU), we find that it is still good - let p = db.get2(0); - assert_eq!(p, 0); - assert_eq!(db.logs.borrow().len(), events); -} diff --git a/tests/macros.rs b/tests/macros.rs deleted file mode 100644 index 3d818e53c..000000000 --- a/tests/macros.rs +++ /dev/null @@ -1,11 +0,0 @@ -#[salsa::query_group(MyStruct)] -trait MyDatabase: salsa::Database { - #[salsa::invoke(another_module::another_name)] - fn my_query(&self, key: ()) -> (); -} - -mod another_module { - pub(crate) fn another_name(_: &dyn crate::MyDatabase, (): ()) {} -} - -fn main() {} diff --git a/tests/no_send_sync.rs b/tests/no_send_sync.rs deleted file mode 100644 index 2648f2b7a..000000000 --- a/tests/no_send_sync.rs +++ /dev/null @@ -1,33 +0,0 @@ -extern crate salsa; - -use std::rc::Rc; - -#[salsa::query_group(NoSendSyncStorage)] -trait NoSendSyncDatabase: salsa::Database { - fn no_send_sync_value(&self, key: bool) -> Rc; - fn no_send_sync_key(&self, key: Rc) -> bool; -} - -fn no_send_sync_value(_db: &dyn NoSendSyncDatabase, key: bool) -> Rc { - Rc::new(key) -} - -fn no_send_sync_key(_db: &dyn NoSendSyncDatabase, key: Rc) -> bool { - *key -} - -#[salsa::database(NoSendSyncStorage)] -#[derive(Default)] -struct DatabaseImpl { - storage: salsa::Storage, -} - -impl salsa::Database for DatabaseImpl {} - -#[test] -fn no_send_sync() { - let db = DatabaseImpl::default(); - - assert_eq!(db.no_send_sync_value(true), Rc::new(true)); - assert!(!db.no_send_sync_key(Rc::new(false))); -} diff --git a/tests/on_demand_inputs.rs b/tests/on_demand_inputs.rs deleted file mode 100644 index 6e2b68360..000000000 --- a/tests/on_demand_inputs.rs +++ /dev/null @@ -1,156 +0,0 @@ -//! Test that "on-demand" input pattern works. -//! -//! On-demand inputs are inputs computed lazily on the fly. They are simulated -//! via a b query with zero inputs, which uses `add_synthetic_read` to -//! tweak durability and `invalidate` to clear the input. - -use std::{cell::RefCell, collections::HashMap, rc::Rc}; - -use salsa::{Database as _, Durability, EventKind}; - -#[salsa::query_group(QueryGroupStorage)] -trait QueryGroup: salsa::Database + AsRef> { - fn a(&self, x: u32) -> u32; - fn b(&self, x: u32) -> u32; - fn c(&self, x: u32) -> u32; -} - -fn a(db: &dyn QueryGroup, x: u32) -> u32 { - let durability = if x % 2 == 0 { - Durability::LOW - } else { - Durability::HIGH - }; - db.salsa_runtime().report_synthetic_read(durability); - let external_state: &HashMap = db.as_ref(); - external_state[&x] -} - -fn b(db: &dyn QueryGroup, x: u32) -> u32 { - db.a(x) -} - -fn c(db: &dyn QueryGroup, x: u32) -> u32 { - db.b(x) -} - -#[salsa::database(QueryGroupStorage)] -#[derive(Default)] -struct Database { - storage: salsa::Storage, - external_state: HashMap, - #[allow(clippy::type_complexity)] - on_event: Option>, -} - -impl salsa::Database for Database { - fn salsa_event(&self, event: salsa::Event) { - dbg!(event.debug(self)); - - if let Some(cb) = &self.on_event { - cb(self, event) - } - } -} - -impl AsRef> for Database { - fn as_ref(&self) -> &HashMap { - &self.external_state - } -} - -#[test] -fn on_demand_input_works() { - let mut db = Database::default(); - - db.external_state.insert(1, 10); - assert_eq!(db.b(1), 10); - assert_eq!(db.a(1), 10); - - // We changed external state, but haven't signaled about this yet, - // so we expect to see the old answer - db.external_state.insert(1, 92); - assert_eq!(db.b(1), 10); - assert_eq!(db.a(1), 10); - - AQuery.in_db_mut(&mut db).invalidate(&1); - assert_eq!(db.b(1), 92); - assert_eq!(db.a(1), 92); - - // Downstream queries should also be rerun if we call `a` first. - db.external_state.insert(1, 50); - AQuery.in_db_mut(&mut db).invalidate(&1); - assert_eq!(db.a(1), 50); - assert_eq!(db.b(1), 50); -} - -#[test] -fn on_demand_input_durability() { - let mut db = Database::default(); - - let events = Rc::new(RefCell::new(vec![])); - db.on_event = Some(Box::new({ - let events = events.clone(); - move |db, event| { - if let EventKind::WillCheckCancellation = event.kind { - // these events are not interesting - } else { - events.borrow_mut().push(format!("{:?}", event.debug(db))) - } - } - })); - - events.replace(vec![]); - db.external_state.insert(1, 10); - db.external_state.insert(2, 20); - assert_eq!(db.b(1), 10); - assert_eq!(db.b(2), 20); - insta::assert_debug_snapshot!(events, @r###" - RefCell { - value: [ - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: b(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: b(2) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", - ], - } - "###); - - eprintln!("------------------"); - db.salsa_runtime_mut().synthetic_write(Durability::LOW); - events.replace(vec![]); - assert_eq!(db.c(1), 10); - assert_eq!(db.c(2), 20); - // Re-execute `a(2)` because that has low durability, but not `a(1)` - insta::assert_debug_snapshot!(events, @r###" - RefCell { - value: [ - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: c(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: b(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: c(2) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: b(2) } }", - ], - } - "###); - - eprintln!("------------------"); - db.salsa_runtime_mut().synthetic_write(Durability::HIGH); - events.replace(vec![]); - assert_eq!(db.c(1), 10); - assert_eq!(db.c(2), 20); - // Re-execute both `a(1)` and `a(2)`, but we don't re-execute any `b` queries as the - // result didn't actually change. - insta::assert_debug_snapshot!(events, @r###" - RefCell { - value: [ - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: b(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: c(1) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: WillExecute { database_key: a(2) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: b(2) } }", - "Event { runtime_id: RuntimeId { counter: 0 }, kind: DidValidateMemoizedValue { database_key: c(2) } }", - ], - } - "###); -} diff --git a/tests/panic_safely.rs b/tests/panic_safely.rs deleted file mode 100644 index 551e6c7ae..000000000 --- a/tests/panic_safely.rs +++ /dev/null @@ -1,95 +0,0 @@ -use salsa::{Database, ParallelDatabase, Snapshot}; -use std::panic::{self, AssertUnwindSafe}; -use std::sync::atomic::{AtomicU32, Ordering::SeqCst}; - -#[salsa::query_group(PanicSafelyStruct)] -trait PanicSafelyDatabase: salsa::Database { - #[salsa::input] - fn one(&self) -> usize; - - fn panic_safely(&self) -> (); - - fn outer(&self) -> (); -} - -fn panic_safely(db: &dyn PanicSafelyDatabase) { - assert_eq!(db.one(), 1); -} - -static OUTER_CALLS: AtomicU32 = AtomicU32::new(0); - -fn outer(db: &dyn PanicSafelyDatabase) { - OUTER_CALLS.fetch_add(1, SeqCst); - db.panic_safely(); -} - -#[salsa::database(PanicSafelyStruct)] -#[derive(Default)] -struct DatabaseStruct { - storage: salsa::Storage, -} - -impl salsa::Database for DatabaseStruct {} - -impl salsa::ParallelDatabase for DatabaseStruct { - fn snapshot(&self) -> Snapshot { - Snapshot::new(DatabaseStruct { - storage: self.storage.snapshot(), - }) - } -} - -#[test] -fn should_panic_safely() { - let mut db = DatabaseStruct::default(); - db.set_one(0); - - // Invoke `db.panic_safely() without having set `db.one`. `db.one` will - // return 0 and we should catch the panic. - let result = panic::catch_unwind(AssertUnwindSafe({ - let db = db.snapshot(); - move || db.panic_safely() - })); - assert!(result.is_err()); - - // Set `db.one` to 1 and assert ok - db.set_one(1); - let result = panic::catch_unwind(AssertUnwindSafe(|| db.panic_safely())); - assert!(result.is_ok()); - - // Check, that memoized outer is not invalidated by a panic - { - assert_eq!(OUTER_CALLS.load(SeqCst), 0); - db.outer(); - assert_eq!(OUTER_CALLS.load(SeqCst), 1); - - db.set_one(0); - let result = panic::catch_unwind(AssertUnwindSafe(|| db.outer())); - assert!(result.is_err()); - assert_eq!(OUTER_CALLS.load(SeqCst), 1); - - db.set_one(1); - db.outer(); - assert_eq!(OUTER_CALLS.load(SeqCst), 1); - } -} - -#[test] -fn storages_are_unwind_safe() { - fn check_unwind_safe() {} - check_unwind_safe::<&DatabaseStruct>(); -} - -#[test] -fn panics_clear_query_stack() { - let db = DatabaseStruct::default(); - - // Invoke `db.panic_if_not_one() without having set `db.input`. `db.input` - // will default to 0 and we should catch the panic. - let result = panic::catch_unwind(AssertUnwindSafe(|| db.panic_safely())); - assert!(result.is_err()); - - // The database has been poisoned and any attempt to increment the - // revision should panic. - assert_eq!(db.salsa_runtime().active_query(), None); -} diff --git a/tests/parallel/cancellation.rs b/tests/parallel/cancellation.rs deleted file mode 100644 index 9a92e5cc1..000000000 --- a/tests/parallel/cancellation.rs +++ /dev/null @@ -1,132 +0,0 @@ -use crate::setup::{CancellationFlag, Knobs, ParDatabase, ParDatabaseImpl, WithValue}; -use salsa::{Cancelled, ParallelDatabase}; - -macro_rules! assert_cancelled { - ($thread:expr) => { - match $thread.join() { - Ok(value) => panic!("expected cancellation, got {:?}", value), - Err(payload) => match payload.downcast::() { - Ok(_) => {} - Err(payload) => ::std::panic::resume_unwind(payload), - }, - } - }; -} - -/// Add test where a call to `sum` is cancelled by a simultaneous -/// write. Check that we recompute the result in next revision, even -/// though none of the inputs have changed. -#[test] -fn in_par_get_set_cancellation_immediate() { - let mut db = ParDatabaseImpl::default(); - - db.set_input('a', 100); - db.set_input('b', 10); - db.set_input('c', 1); - db.set_input('d', 0); - - let thread1 = std::thread::spawn({ - let db = db.snapshot(); - move || { - // This will not return until it sees cancellation is - // signaled. - db.knobs().sum_signal_on_entry.with_value(1, || { - db.knobs() - .sum_wait_for_cancellation - .with_value(CancellationFlag::Panic, || db.sum("abc")) - }) - } - }); - - // Wait until we have entered `sum` in the other thread. - db.wait_for(1); - - // Try to set the input. This will signal cancellation. - db.set_input('d', 1000); - - // This should re-compute the value (even though no input has changed). - let thread2 = std::thread::spawn({ - let db = db.snapshot(); - move || db.sum("abc") - }); - - assert_eq!(db.sum("d"), 1000); - assert_cancelled!(thread1); - assert_eq!(thread2.join().unwrap(), 111); -} - -/// Here, we check that `sum`'s cancellation is propagated -/// to `sum2` properly. -#[test] -fn in_par_get_set_cancellation_transitive() { - let mut db = ParDatabaseImpl::default(); - - db.set_input('a', 100); - db.set_input('b', 10); - db.set_input('c', 1); - db.set_input('d', 0); - - let thread1 = std::thread::spawn({ - let db = db.snapshot(); - move || { - // This will not return until it sees cancellation is - // signaled. - db.knobs().sum_signal_on_entry.with_value(1, || { - db.knobs() - .sum_wait_for_cancellation - .with_value(CancellationFlag::Panic, || db.sum2("abc")) - }) - } - }); - - // Wait until we have entered `sum` in the other thread. - db.wait_for(1); - - // Try to set the input. This will signal cancellation. - db.set_input('d', 1000); - - // This should re-compute the value (even though no input has changed). - let thread2 = std::thread::spawn({ - let db = db.snapshot(); - move || db.sum2("abc") - }); - - assert_eq!(db.sum2("d"), 1000); - assert_cancelled!(thread1); - assert_eq!(thread2.join().unwrap(), 111); -} - -/// https://github.com/salsa-rs/salsa/issues/66 -#[test] -fn no_back_dating_in_cancellation() { - let mut db = ParDatabaseImpl::default(); - - db.set_input('a', 1); - let thread1 = std::thread::spawn({ - let db = db.snapshot(); - move || { - // Here we compute a long-chain of queries, - // but the last one gets cancelled. - db.knobs().sum_signal_on_entry.with_value(1, || { - db.knobs() - .sum_wait_for_cancellation - .with_value(CancellationFlag::Panic, || db.sum3("a")) - }) - } - }); - - db.wait_for(1); - - // Set unrelated input to bump revision - db.set_input('b', 2); - - // Here we should recompuet the whole chain again, clearing the cancellation - // state. If we get `usize::max()` here, it is a bug! - assert_eq!(db.sum3("a"), 1); - - assert_cancelled!(thread1); - - db.set_input('a', 3); - db.set_input('a', 4); - assert_eq!(db.sum3("ab"), 6); -} diff --git a/tests/parallel/frozen.rs b/tests/parallel/frozen.rs deleted file mode 100644 index 41c0b8e9f..000000000 --- a/tests/parallel/frozen.rs +++ /dev/null @@ -1,58 +0,0 @@ -use crate::setup::{ParDatabase, ParDatabaseImpl}; -use crate::signal::Signal; -use salsa::{Database, ParallelDatabase}; -use std::{ - panic::{catch_unwind, AssertUnwindSafe}, - sync::Arc, -}; - -/// Add test where a call to `sum` is cancelled by a simultaneous -/// write. Check that we recompute the result in next revision, even -/// though none of the inputs have changed. -#[test] -fn in_par_get_set_cancellation() { - let mut db = ParDatabaseImpl::default(); - - db.set_input('a', 1); - - let signal = Arc::new(Signal::default()); - - let thread1 = std::thread::spawn({ - let db = db.snapshot(); - let signal = signal.clone(); - move || { - // Check that cancellation flag is not yet set, because - // `set` cannot have been called yet. - catch_unwind(AssertUnwindSafe(|| db.unwind_if_cancelled())).unwrap(); - - // Signal other thread to proceed. - signal.signal(1); - - // Wait for other thread to signal cancellation - catch_unwind(AssertUnwindSafe(|| loop { - db.unwind_if_cancelled(); - std::thread::yield_now(); - })) - .unwrap_err(); - } - }); - - let thread2 = std::thread::spawn({ - move || { - // Wait until thread 1 has asserted that they are not cancelled - // before we invoke `set.` - signal.wait_for(1); - - // This will block until thread1 drops the revision lock. - let value = db.remove_input('a') + 1; - db.set_input('a', value); - - db.input('a') - } - }); - - thread1.join().unwrap(); - - let c = thread2.join().unwrap(); - assert_eq!(c, 2); -} diff --git a/tests/parallel/independent.rs b/tests/parallel/independent.rs deleted file mode 100644 index bd6ba3bf9..000000000 --- a/tests/parallel/independent.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::setup::{ParDatabase, ParDatabaseImpl}; -use salsa::ParallelDatabase; - -/// Test two `sum` queries (on distinct keys) executing in different -/// threads. Really just a test that `snapshot` etc compiles. -#[test] -fn in_par_two_independent_queries() { - let mut db = ParDatabaseImpl::default(); - - db.set_input('a', 100); - db.set_input('b', 10); - db.set_input('c', 1); - db.set_input('d', 200); - db.set_input('e', 20); - db.set_input('f', 2); - - let thread1 = std::thread::spawn({ - let db = db.snapshot(); - move || db.sum("abc") - }); - - let thread2 = std::thread::spawn({ - let db = db.snapshot(); - move || db.sum("def") - }); - - assert_eq!(thread1.join().unwrap(), 111); - assert_eq!(thread2.join().unwrap(), 222); -} diff --git a/tests/parallel/main.rs b/tests/parallel/main.rs deleted file mode 100644 index 31c0da183..000000000 --- a/tests/parallel/main.rs +++ /dev/null @@ -1,13 +0,0 @@ -mod setup; - -mod cancellation; -mod frozen; -mod independent; -mod parallel_cycle_all_recover; -mod parallel_cycle_mid_recover; -mod parallel_cycle_none_recover; -mod parallel_cycle_one_recovers; -mod race; -mod signal; -mod stress; -mod true_parallel; diff --git a/tests/parallel/parallel_cycle_all_recover.rs b/tests/parallel/parallel_cycle_all_recover.rs deleted file mode 100644 index aea965c02..000000000 --- a/tests/parallel/parallel_cycle_all_recover.rs +++ /dev/null @@ -1,110 +0,0 @@ -//! Test for cycle recover spread across two threads. -//! See `../cycles.rs` for a complete listing of cycle tests, -//! both intra and cross thread. - -use crate::setup::{Knobs, ParDatabaseImpl}; -use salsa::ParallelDatabase; -use test_log::test; - -// Recover cycle test: -// -// The pattern is as follows. -// -// Thread A Thread B -// -------- -------- -// a1 b1 -// | wait for stage 1 (blocks) -// signal stage 1 | -// wait for stage 2 (blocks) (unblocked) -// | signal stage 2 -// (unblocked) wait for stage 3 (blocks) -// a2 | -// b1 (blocks -> stage 3) | -// | (unblocked) -// | b2 -// | a1 (cycle detected, recovers) -// | b2 completes, recovers -// | b1 completes, recovers -// a2 sees cycle, recovers -// a1 completes, recovers - -#[test] -fn parallel_cycle_all_recover() { - let db = ParDatabaseImpl::default(); - db.knobs().signal_on_will_block.set(3); - - let thread_a = std::thread::spawn({ - let db = db.snapshot(); - move || db.a1(1) - }); - - let thread_b = std::thread::spawn({ - let db = db.snapshot(); - move || db.b1(1) - }); - - assert_eq!(thread_a.join().unwrap(), 11); - assert_eq!(thread_b.join().unwrap(), 21); -} - -#[salsa::query_group(ParallelCycleAllRecover)] -pub(crate) trait TestDatabase: Knobs { - #[salsa::cycle(recover_a1)] - fn a1(&self, key: i32) -> i32; - - #[salsa::cycle(recover_a2)] - fn a2(&self, key: i32) -> i32; - - #[salsa::cycle(recover_b1)] - fn b1(&self, key: i32) -> i32; - - #[salsa::cycle(recover_b2)] - fn b2(&self, key: i32) -> i32; -} - -fn recover_a1(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { - log::debug!("recover_a1"); - key * 10 + 1 -} - -fn recover_a2(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { - log::debug!("recover_a2"); - key * 10 + 2 -} - -fn recover_b1(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { - log::debug!("recover_b1"); - key * 20 + 1 -} - -fn recover_b2(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { - log::debug!("recover_b2"); - key * 20 + 2 -} - -fn a1(db: &dyn TestDatabase, key: i32) -> i32 { - // Wait to create the cycle until both threads have entered - db.signal(1); - db.wait_for(2); - - db.a2(key) -} - -fn a2(db: &dyn TestDatabase, key: i32) -> i32 { - db.b1(key) -} - -fn b1(db: &dyn TestDatabase, key: i32) -> i32 { - // Wait to create the cycle until both threads have entered - db.wait_for(1); - db.signal(2); - - // Wait for thread A to block on this thread - db.wait_for(3); - - db.b2(key) -} - -fn b2(db: &dyn TestDatabase, key: i32) -> i32 { - db.a1(key) -} diff --git a/tests/parallel/parallel_cycle_mid_recover.rs b/tests/parallel/parallel_cycle_mid_recover.rs deleted file mode 100644 index 698a38b24..000000000 --- a/tests/parallel/parallel_cycle_mid_recover.rs +++ /dev/null @@ -1,110 +0,0 @@ -//! Test for cycle recover spread across two threads. -//! See `../cycles.rs` for a complete listing of cycle tests, -//! both intra and cross thread. - -use crate::setup::{Knobs, ParDatabaseImpl}; -use salsa::ParallelDatabase; -use test_log::test; - -// Recover cycle test: -// -// The pattern is as follows. -// -// Thread A Thread B -// -------- -------- -// a1 b1 -// | wait for stage 1 (blocks) -// signal stage 1 | -// wait for stage 2 (blocks) (unblocked) -// | | -// | b2 -// | b3 -// | a1 (blocks -> stage 2) -// (unblocked) | -// a2 (cycle detected) | -// b3 recovers -// b2 resumes -// b1 panics because bug - -#[test] -fn parallel_cycle_mid_recovers() { - let db = ParDatabaseImpl::default(); - db.knobs().signal_on_will_block.set(2); - - let thread_a = std::thread::spawn({ - let db = db.snapshot(); - move || db.a1(1) - }); - - let thread_b = std::thread::spawn({ - let db = db.snapshot(); - move || db.b1(1) - }); - - // We expect that the recovery function yields - // `1 * 20 + 2`, which is returned (and forwarded) - // to b1, and from there to a2 and a1. - assert_eq!(thread_a.join().unwrap(), 22); - assert_eq!(thread_b.join().unwrap(), 22); -} - -#[salsa::query_group(ParallelCycleMidRecovers)] -pub(crate) trait TestDatabase: Knobs { - fn a1(&self, key: i32) -> i32; - - fn a2(&self, key: i32) -> i32; - - #[salsa::cycle(recover_b1)] - fn b1(&self, key: i32) -> i32; - - fn b2(&self, key: i32) -> i32; - - #[salsa::cycle(recover_b3)] - fn b3(&self, key: i32) -> i32; -} - -fn recover_b1(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { - log::debug!("recover_b1"); - key * 20 + 2 -} - -fn recover_b3(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { - log::debug!("recover_b1"); - key * 200 + 2 -} - -fn a1(db: &dyn TestDatabase, key: i32) -> i32 { - // tell thread b we have started - db.signal(1); - - // wait for thread b to block on a1 - db.wait_for(2); - - db.a2(key) -} - -fn a2(db: &dyn TestDatabase, key: i32) -> i32 { - // create the cycle - db.b1(key) -} - -fn b1(db: &dyn TestDatabase, key: i32) -> i32 { - // wait for thread a to have started - db.wait_for(1); - - db.b2(key); - - 0 -} - -fn b2(db: &dyn TestDatabase, key: i32) -> i32 { - // will encounter a cycle but recover - db.b3(key); - db.b1(key); // hasn't recovered yet - 0 -} - -fn b3(db: &dyn TestDatabase, key: i32) -> i32 { - // will block on thread a, signaling stage 2 - db.a1(key) -} diff --git a/tests/parallel/parallel_cycle_none_recover.rs b/tests/parallel/parallel_cycle_none_recover.rs deleted file mode 100644 index b413599a4..000000000 --- a/tests/parallel/parallel_cycle_none_recover.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! Test a cycle where no queries recover that occurs across threads. -//! See the `../cycles.rs` for a complete listing of cycle tests, -//! both intra and cross thread. - -use crate::setup::{Knobs, ParDatabaseImpl}; -use salsa::ParallelDatabase; -use test_log::test; - -#[test] -fn parallel_cycle_none_recover() { - let db = ParDatabaseImpl::default(); - db.knobs().signal_on_will_block.set(3); - - let thread_a = std::thread::spawn({ - let db = db.snapshot(); - move || db.a(-1) - }); - - let thread_b = std::thread::spawn({ - let db = db.snapshot(); - move || db.b(-1) - }); - - // We expect B to panic because it detects a cycle (it is the one that calls A, ultimately). - // Right now, it panics with a string. - let err_b = thread_b.join().unwrap_err(); - if let Some(c) = err_b.downcast_ref::() { - insta::assert_debug_snapshot!(c.unexpected_participants(&db), @r###" - [ - "a(-1)", - "b(-1)", - ] - "###); - } else { - panic!("b failed in an unexpected way: {:?}", err_b); - } - - // We expect A to propagate a panic, which causes us to use the sentinel - // type `Canceled`. - assert!(thread_a - .join() - .unwrap_err() - .downcast_ref::() - .is_some()); -} - -#[salsa::query_group(ParallelCycleNoneRecover)] -pub(crate) trait TestDatabase: Knobs { - fn a(&self, key: i32) -> i32; - fn b(&self, key: i32) -> i32; -} - -fn a(db: &dyn TestDatabase, key: i32) -> i32 { - // Wait to create the cycle until both threads have entered - db.signal(1); - db.wait_for(2); - - db.b(key) -} - -fn b(db: &dyn TestDatabase, key: i32) -> i32 { - // Wait to create the cycle until both threads have entered - db.wait_for(1); - db.signal(2); - - // Wait for thread A to block on this thread - db.wait_for(3); - - // Now try to execute A - db.a(key) -} diff --git a/tests/parallel/parallel_cycle_one_recovers.rs b/tests/parallel/parallel_cycle_one_recovers.rs deleted file mode 100644 index 2980ec270..000000000 --- a/tests/parallel/parallel_cycle_one_recovers.rs +++ /dev/null @@ -1,95 +0,0 @@ -//! Test for cycle recover spread across two threads. -//! See `../cycles.rs` for a complete listing of cycle tests, -//! both intra and cross thread. - -use crate::setup::{Knobs, ParDatabaseImpl}; -use salsa::ParallelDatabase; -use test_log::test; - -// Recover cycle test: -// -// The pattern is as follows. -// -// Thread A Thread B -// -------- -------- -// a1 b1 -// | wait for stage 1 (blocks) -// signal stage 1 | -// wait for stage 2 (blocks) (unblocked) -// | signal stage 2 -// (unblocked) wait for stage 3 (blocks) -// a2 | -// b1 (blocks -> stage 3) | -// | (unblocked) -// | b2 -// | a1 (cycle detected) -// a2 recovery fn executes | -// a1 completes normally | -// b2 completes, recovers -// b1 completes, recovers - -#[test] -fn parallel_cycle_one_recovers() { - let db = ParDatabaseImpl::default(); - db.knobs().signal_on_will_block.set(3); - - let thread_a = std::thread::spawn({ - let db = db.snapshot(); - move || db.a1(1) - }); - - let thread_b = std::thread::spawn({ - let db = db.snapshot(); - move || db.b1(1) - }); - - // We expect that the recovery function yields - // `1 * 20 + 2`, which is returned (and forwarded) - // to b1, and from there to a2 and a1. - assert_eq!(thread_a.join().unwrap(), 22); - assert_eq!(thread_b.join().unwrap(), 22); -} - -#[salsa::query_group(ParallelCycleOneRecovers)] -pub(crate) trait TestDatabase: Knobs { - fn a1(&self, key: i32) -> i32; - - #[salsa::cycle(recover)] - fn a2(&self, key: i32) -> i32; - - fn b1(&self, key: i32) -> i32; - - fn b2(&self, key: i32) -> i32; -} - -fn recover(_db: &dyn TestDatabase, _cycle: &salsa::Cycle, key: &i32) -> i32 { - log::debug!("recover"); - key * 20 + 2 -} - -fn a1(db: &dyn TestDatabase, key: i32) -> i32 { - // Wait to create the cycle until both threads have entered - db.signal(1); - db.wait_for(2); - - db.a2(key) -} - -fn a2(db: &dyn TestDatabase, key: i32) -> i32 { - db.b1(key) -} - -fn b1(db: &dyn TestDatabase, key: i32) -> i32 { - // Wait to create the cycle until both threads have entered - db.wait_for(1); - db.signal(2); - - // Wait for thread A to block on this thread - db.wait_for(3); - - db.b2(key) -} - -fn b2(db: &dyn TestDatabase, key: i32) -> i32 { - db.a1(key) -} diff --git a/tests/parallel/race.rs b/tests/parallel/race.rs deleted file mode 100644 index 0d89f70fc..000000000 --- a/tests/parallel/race.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::panic::AssertUnwindSafe; - -use crate::setup::{ParDatabase, ParDatabaseImpl}; -use salsa::{Cancelled, ParallelDatabase}; - -/// Test where a read and a set are racing with one another. -/// Should be atomic. -#[test] -fn in_par_get_set_race() { - let mut db = ParDatabaseImpl::default(); - - db.set_input('a', 100); - db.set_input('b', 10); - db.set_input('c', 1); - - let thread1 = std::thread::spawn({ - let db = db.snapshot(); - move || Cancelled::catch(AssertUnwindSafe(|| db.sum("abc"))) - }); - - let thread2 = std::thread::spawn(move || { - let value = db.remove_input('a') * 10; - db.set_input('a', value); - db.sum("a") - }); - - // If the 1st thread runs first, you get 111, otherwise you get - // 1011; if they run concurrently and the 1st thread observes the - // cancellation, it'll unwind. - let result1 = thread1.join().unwrap(); - if let Ok(value1) = result1 { - assert!(value1 == 111 || value1 == 1011, "illegal result {}", value1); - } - - // thread2 can not observe a cancellation because it performs a - // database write before running any other queries. - assert_eq!(thread2.join().unwrap(), 1000); -} diff --git a/tests/parallel/setup.rs b/tests/parallel/setup.rs deleted file mode 100644 index 9acc34c8b..000000000 --- a/tests/parallel/setup.rs +++ /dev/null @@ -1,197 +0,0 @@ -use crate::signal::Signal; -use salsa::Database; -use salsa::ParallelDatabase; -use salsa::Snapshot; -use std::sync::Arc; -use std::{ - cell::Cell, - panic::{catch_unwind, resume_unwind, AssertUnwindSafe}, -}; - -#[salsa::query_group(Par)] -pub(crate) trait ParDatabase: Knobs { - #[salsa::input] - fn input(&self, key: char) -> usize; - - fn sum(&self, key: &'static str) -> usize; - - /// Invokes `sum` - fn sum2(&self, key: &'static str) -> usize; - - /// Invokes `sum` but doesn't really care about the result. - fn sum2_drop_sum(&self, key: &'static str) -> usize; - - /// Invokes `sum2` - fn sum3(&self, key: &'static str) -> usize; - - /// Invokes `sum2_drop_sum` - fn sum3_drop_sum(&self, key: &'static str) -> usize; -} - -/// Various "knobs" and utilities used by tests to force -/// a certain behavior. -pub(crate) trait Knobs { - fn knobs(&self) -> &KnobsStruct; - - fn signal(&self, stage: usize); - - fn wait_for(&self, stage: usize); -} - -pub(crate) trait WithValue { - fn with_value(&self, value: T, closure: impl FnOnce() -> R) -> R; -} - -impl WithValue for Cell { - fn with_value(&self, value: T, closure: impl FnOnce() -> R) -> R { - let old_value = self.replace(value); - - let result = catch_unwind(AssertUnwindSafe(closure)); - - self.set(old_value); - - match result { - Ok(r) => r, - Err(payload) => resume_unwind(payload), - } - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Default)] -pub(crate) enum CancellationFlag { - #[default] - Down, - Panic, -} - -/// Various "knobs" that can be used to customize how the queries -/// behave on one specific thread. Note that this state is -/// intentionally thread-local (apart from `signal`). -#[derive(Clone, Default)] -pub(crate) struct KnobsStruct { - /// A kind of flexible barrier used to coordinate execution across - /// threads to ensure we reach various weird states. - pub(crate) signal: Arc, - - /// When this database is about to block, send a signal. - pub(crate) signal_on_will_block: Cell, - - /// Invocations of `sum` will signal this stage on entry. - pub(crate) sum_signal_on_entry: Cell, - - /// Invocations of `sum` will wait for this stage on entry. - pub(crate) sum_wait_for_on_entry: Cell, - - /// If true, invocations of `sum` will panic before they exit. - pub(crate) sum_should_panic: Cell, - - /// If true, invocations of `sum` will wait for cancellation before - /// they exit. - pub(crate) sum_wait_for_cancellation: Cell, - - /// Invocations of `sum` will wait for this stage prior to exiting. - pub(crate) sum_wait_for_on_exit: Cell, - - /// Invocations of `sum` will signal this stage prior to exiting. - pub(crate) sum_signal_on_exit: Cell, - - /// Invocations of `sum3_drop_sum` will panic unconditionally - pub(crate) sum3_drop_sum_should_panic: Cell, -} - -fn sum(db: &dyn ParDatabase, key: &'static str) -> usize { - let mut sum = 0; - - db.signal(db.knobs().sum_signal_on_entry.get()); - - db.wait_for(db.knobs().sum_wait_for_on_entry.get()); - - if db.knobs().sum_should_panic.get() { - panic!("query set to panic before exit") - } - - for ch in key.chars() { - sum += db.input(ch); - } - - match db.knobs().sum_wait_for_cancellation.get() { - CancellationFlag::Down => (), - CancellationFlag::Panic => { - log::debug!("waiting for cancellation"); - loop { - db.unwind_if_cancelled(); - std::thread::yield_now(); - } - } - } - - db.wait_for(db.knobs().sum_wait_for_on_exit.get()); - - db.signal(db.knobs().sum_signal_on_exit.get()); - - sum -} - -fn sum2(db: &dyn ParDatabase, key: &'static str) -> usize { - db.sum(key) -} - -fn sum2_drop_sum(db: &dyn ParDatabase, key: &'static str) -> usize { - let _ = db.sum(key); - 22 -} - -fn sum3(db: &dyn ParDatabase, key: &'static str) -> usize { - db.sum2(key) -} - -fn sum3_drop_sum(db: &dyn ParDatabase, key: &'static str) -> usize { - if db.knobs().sum3_drop_sum_should_panic.get() { - panic!("sum3_drop_sum executed") - } - db.sum2_drop_sum(key) -} - -#[salsa::database( - Par, - crate::parallel_cycle_all_recover::ParallelCycleAllRecover, - crate::parallel_cycle_none_recover::ParallelCycleNoneRecover, - crate::parallel_cycle_mid_recover::ParallelCycleMidRecovers, - crate::parallel_cycle_one_recovers::ParallelCycleOneRecovers -)] -#[derive(Default)] -pub(crate) struct ParDatabaseImpl { - storage: salsa::Storage, - knobs: KnobsStruct, -} - -impl Database for ParDatabaseImpl { - fn salsa_event(&self, event: salsa::Event) { - if let salsa::EventKind::WillBlockOn { .. } = event.kind { - self.signal(self.knobs().signal_on_will_block.get()); - } - } -} - -impl ParallelDatabase for ParDatabaseImpl { - fn snapshot(&self) -> Snapshot { - Snapshot::new(ParDatabaseImpl { - storage: self.storage.snapshot(), - knobs: self.knobs.clone(), - }) - } -} - -impl Knobs for ParDatabaseImpl { - fn knobs(&self) -> &KnobsStruct { - &self.knobs - } - - fn signal(&self, stage: usize) { - self.knobs.signal.signal(stage); - } - - fn wait_for(&self, stage: usize) { - self.knobs.signal.wait_for(stage); - } -} diff --git a/tests/parallel/signal.rs b/tests/parallel/signal.rs deleted file mode 100644 index 4072a30af..000000000 --- a/tests/parallel/signal.rs +++ /dev/null @@ -1,40 +0,0 @@ -use parking_lot::{Condvar, Mutex}; - -#[derive(Default)] -pub(crate) struct Signal { - value: Mutex, - cond_var: Condvar, -} - -impl Signal { - pub(crate) fn signal(&self, stage: usize) { - log::debug!("signal({})", stage); - - // This check avoids acquiring the lock for things that will - // clearly be a no-op. Not *necessary* but helps to ensure we - // are more likely to encounter weird race conditions; - // otherwise calls to `sum` will tend to be unnecessarily - // synchronous. - if stage > 0 { - let mut v = self.value.lock(); - if stage > *v { - *v = stage; - self.cond_var.notify_all(); - } - } - } - - /// Waits until the given condition is true; the fn is invoked - /// with the current stage. - pub(crate) fn wait_for(&self, stage: usize) { - log::debug!("wait_for({})", stage); - - // As above, avoid lock if clearly a no-op. - if stage > 0 { - let mut v = self.value.lock(); - while *v < stage { - self.cond_var.wait(&mut v); - } - } - } -} diff --git a/tests/parallel/stress.rs b/tests/parallel/stress.rs deleted file mode 100644 index ac8c3092f..000000000 --- a/tests/parallel/stress.rs +++ /dev/null @@ -1,184 +0,0 @@ -use rand::seq::SliceRandom; -use rand::Rng; - -use salsa::ParallelDatabase; -use salsa::Snapshot; -use salsa::{Cancelled, Database}; - -// Number of operations a reader performs -const N_MUTATOR_OPS: usize = 100; -const N_READER_OPS: usize = 100; - -#[salsa::query_group(Stress)] -trait StressDatabase: salsa::Database { - #[salsa::input] - fn a(&self, key: usize) -> usize; - - fn b(&self, key: usize) -> usize; - - fn c(&self, key: usize) -> usize; -} - -fn b(db: &dyn StressDatabase, key: usize) -> usize { - db.unwind_if_cancelled(); - db.a(key) -} - -fn c(db: &dyn StressDatabase, key: usize) -> usize { - db.b(key) -} - -#[salsa::database(Stress)] -#[derive(Default)] -struct StressDatabaseImpl { - storage: salsa::Storage, -} - -impl salsa::Database for StressDatabaseImpl {} - -impl salsa::ParallelDatabase for StressDatabaseImpl { - fn snapshot(&self) -> Snapshot { - Snapshot::new(StressDatabaseImpl { - storage: self.storage.snapshot(), - }) - } -} - -#[derive(Clone, Copy, Debug)] -enum Query { - A, - B, - C, -} - -enum MutatorOp { - WriteOp(WriteOp), - LaunchReader { - ops: Vec, - check_cancellation: bool, - }, -} - -#[derive(Debug)] -enum WriteOp { - AddA(usize, isize), - SetA(usize, usize), -} - -#[derive(Debug)] -enum ReadOp { - Get(Query, usize), -} - -impl rand::distributions::Distribution for rand::distributions::Standard { - fn sample(&self, rng: &mut R) -> Query { - *[Query::A, Query::B, Query::C].choose(rng).unwrap() - } -} - -impl rand::distributions::Distribution for rand::distributions::Standard { - fn sample(&self, rng: &mut R) -> MutatorOp { - if rng.gen_bool(0.5) { - MutatorOp::WriteOp(rng.gen()) - } else { - MutatorOp::LaunchReader { - ops: (0..N_READER_OPS).map(|_| rng.gen()).collect(), - check_cancellation: rng.gen(), - } - } - } -} - -impl rand::distributions::Distribution for rand::distributions::Standard { - fn sample(&self, rng: &mut R) -> WriteOp { - let key = rng.gen::() % 10; - let value = rng.gen::() % 10; - if rng.gen_bool(0.5) { - WriteOp::AddA(key, value as isize - 5) - } else { - WriteOp::SetA(key, value) - } - } -} - -impl rand::distributions::Distribution for rand::distributions::Standard { - fn sample(&self, rng: &mut R) -> ReadOp { - let query = rng.gen::(); - let key = rng.gen::() % 10; - ReadOp::Get(query, key) - } -} - -fn db_reader_thread(db: &StressDatabaseImpl, ops: Vec, check_cancellation: bool) { - for op in ops { - if check_cancellation { - db.unwind_if_cancelled(); - } - op.execute(db); - } -} - -impl WriteOp { - fn execute(self, db: &mut StressDatabaseImpl) { - match self { - WriteOp::AddA(key, value_delta) => { - let value = db.remove_a(key); - let value = (value as isize + value_delta) as usize; - db.set_a(key, value); - } - WriteOp::SetA(key, value) => { - db.set_a(key, value); - } - } - } -} - -impl ReadOp { - fn execute(self, db: &StressDatabaseImpl) { - match self { - ReadOp::Get(query, key) => match query { - Query::A => { - db.a(key); - } - Query::B => { - let _ = db.b(key); - } - Query::C => { - let _ = db.c(key); - } - }, - } - } -} - -#[test] -fn stress_test() { - let mut db = StressDatabaseImpl::default(); - for i in 0..10 { - db.set_a(i, i); - } - - let mut rng = rand::thread_rng(); - - // generate the ops that the mutator thread will perform - let write_ops: Vec = (0..N_MUTATOR_OPS).map(|_| rng.gen()).collect(); - - // execute the "main thread", which sometimes snapshots off other threads - let mut all_threads = vec![]; - for op in write_ops { - match op { - MutatorOp::WriteOp(w) => w.execute(&mut db), - MutatorOp::LaunchReader { - ops, - check_cancellation, - } => all_threads.push(std::thread::spawn({ - let db = db.snapshot(); - move || Cancelled::catch(|| db_reader_thread(&db, ops, check_cancellation)) - })), - } - } - - for thread in all_threads { - thread.join().unwrap().ok(); - } -} diff --git a/tests/parallel/true_parallel.rs b/tests/parallel/true_parallel.rs deleted file mode 100644 index 03432dca9..000000000 --- a/tests/parallel/true_parallel.rs +++ /dev/null @@ -1,126 +0,0 @@ -use crate::setup::{Knobs, ParDatabase, ParDatabaseImpl, WithValue}; -use salsa::ParallelDatabase; -use std::panic::{self, AssertUnwindSafe}; - -/// Test where two threads are executing sum. We show that they can -/// both be executing sum in parallel by having thread1 wait for -/// thread2 to send a signal before it leaves (similarly, thread2 -/// waits for thread1 to send a signal before it enters). -#[test] -fn true_parallel_different_keys() { - let mut db = ParDatabaseImpl::default(); - - db.set_input('a', 100); - db.set_input('b', 10); - db.set_input('c', 1); - - // Thread 1 will signal stage 1 when it enters and wait for stage 2. - let thread1 = std::thread::spawn({ - let db = db.snapshot(); - move || { - let v = db.knobs().sum_signal_on_entry.with_value(1, || { - db.knobs() - .sum_wait_for_on_exit - .with_value(2, || db.sum("a")) - }); - v - } - }); - - // Thread 2 will wait_for stage 1 when it enters and signal stage 2 - // when it leaves. - let thread2 = std::thread::spawn({ - let db = db.snapshot(); - move || { - let v = db.knobs().sum_wait_for_on_entry.with_value(1, || { - db.knobs().sum_signal_on_exit.with_value(2, || db.sum("b")) - }); - v - } - }); - - assert_eq!(thread1.join().unwrap(), 100); - assert_eq!(thread2.join().unwrap(), 10); -} - -/// Add a test that tries to trigger a conflict, where we fetch -/// `sum("abc")` from two threads simultaneously, and of them -/// therefore has to block. -#[test] -fn true_parallel_same_keys() { - let mut db = ParDatabaseImpl::default(); - - db.set_input('a', 100); - db.set_input('b', 10); - db.set_input('c', 1); - - // Thread 1 will wait_for a barrier in the start of `sum` - let thread1 = std::thread::spawn({ - let db = db.snapshot(); - move || { - let v = db.knobs().sum_signal_on_entry.with_value(1, || { - db.knobs() - .sum_wait_for_on_entry - .with_value(2, || db.sum("abc")) - }); - v - } - }); - - // Thread 2 will wait until Thread 1 has entered sum and then -- - // once it has set itself to block -- signal Thread 1 to - // continue. This way, we test out the mechanism of one thread - // blocking on another. - let thread2 = std::thread::spawn({ - let db = db.snapshot(); - move || { - db.knobs().signal.wait_for(1); - db.knobs().signal_on_will_block.set(2); - db.sum("abc") - } - }); - - assert_eq!(thread1.join().unwrap(), 111); - assert_eq!(thread2.join().unwrap(), 111); -} - -/// Add a test that tries to trigger a conflict, where we fetch `sum("a")` -/// from two threads simultaneously. After `thread2` begins blocking, -/// we force `thread1` to panic and should see that propagate to `thread2`. -#[test] -fn true_parallel_propagate_panic() { - let mut db = ParDatabaseImpl::default(); - - db.set_input('a', 1); - - // `thread1` will wait_for a barrier in the start of `sum`. Once it can - // continue, it will panic. - let thread1 = std::thread::spawn({ - let db = db.snapshot(); - move || { - let v = db.knobs().sum_signal_on_entry.with_value(1, || { - db.knobs().sum_wait_for_on_entry.with_value(2, || { - db.knobs().sum_should_panic.with_value(true, || db.sum("a")) - }) - }); - v - } - }); - - // `thread2` will wait until `thread1` has entered sum and then -- once it - // has set itself to block -- signal `thread1` to continue. - let thread2 = std::thread::spawn({ - let db = db.snapshot(); - move || { - db.knobs().signal.wait_for(1); - db.knobs().signal_on_will_block.set(2); - db.sum("a") - } - }); - - let result1 = panic::catch_unwind(AssertUnwindSafe(|| thread1.join().unwrap())); - let result2 = panic::catch_unwind(AssertUnwindSafe(|| thread2.join().unwrap())); - - assert!(result1.is_err()); - assert!(result2.is_err()); -} diff --git a/tests/remove_input.rs b/tests/remove_input.rs deleted file mode 100644 index da77592ae..000000000 --- a/tests/remove_input.rs +++ /dev/null @@ -1,48 +0,0 @@ -//! Test that transparent (uncached) queries work - -#[salsa::query_group(QueryGroupStorage)] -trait QueryGroup { - #[salsa::input] - fn input(&self, c: char) -> u32; - - fn increment(&self, c: char) -> u32; -} - -fn increment(db: &dyn QueryGroup, c: char) -> u32 { - db.input(c) + 1 -} - -#[salsa::database(QueryGroupStorage)] -#[derive(Default)] -struct Database { - storage: salsa::Storage, -} - -impl salsa::Database for Database {} - -#[test] -fn remove_input_from_cached_query() { - let mut db = Database::default(); - - db.set_input('a', 22); - db.set_input('b', 44); - assert_eq!(db.increment('a'), 23); - assert_eq!(db.increment('b'), 45); - - db.remove_input('a'); - assert_eq!(db.increment('b'), 45); -} - -#[test] -fn remove_and_restore_input_from_cached_query() { - let mut db = Database::default(); - - db.set_input('a', 22); - db.set_input('b', 44); - assert_eq!(db.increment('a'), 23); - assert_eq!(db.increment('b'), 45); - - db.remove_input('a'); - db.set_input('a', 66); - assert_eq!(db.increment('a'), 67); -} diff --git a/tests/storage_varieties/implementation.rs b/tests/storage_varieties/implementation.rs deleted file mode 100644 index 2843660f1..000000000 --- a/tests/storage_varieties/implementation.rs +++ /dev/null @@ -1,19 +0,0 @@ -use crate::queries; -use std::cell::Cell; - -#[salsa::database(queries::GroupStruct)] -#[derive(Default)] -pub(crate) struct DatabaseImpl { - storage: salsa::Storage, - counter: Cell, -} - -impl queries::Counter for DatabaseImpl { - fn increment(&self) -> usize { - let v = self.counter.get(); - self.counter.set(v + 1); - v - } -} - -impl salsa::Database for DatabaseImpl {} diff --git a/tests/storage_varieties/main.rs b/tests/storage_varieties/main.rs deleted file mode 100644 index e92c61740..000000000 --- a/tests/storage_varieties/main.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod implementation; -mod queries; -mod tests; - -fn main() {} diff --git a/tests/storage_varieties/queries.rs b/tests/storage_varieties/queries.rs deleted file mode 100644 index 0847fadef..000000000 --- a/tests/storage_varieties/queries.rs +++ /dev/null @@ -1,22 +0,0 @@ -pub(crate) trait Counter: salsa::Database { - fn increment(&self) -> usize; -} - -#[salsa::query_group(GroupStruct)] -pub(crate) trait Database: Counter { - fn memoized(&self) -> usize; - fn volatile(&self) -> usize; -} - -/// Because this query is memoized, we only increment the counter -/// the first time it is invoked. -fn memoized(db: &dyn Database) -> usize { - db.volatile() -} - -/// Because this query is volatile, each time it is invoked, -/// we will increment the counter. -fn volatile(db: &dyn Database) -> usize { - db.salsa_runtime().report_untracked_read(); - db.increment() -} diff --git a/tests/storage_varieties/tests.rs b/tests/storage_varieties/tests.rs deleted file mode 100644 index f75c7c142..000000000 --- a/tests/storage_varieties/tests.rs +++ /dev/null @@ -1,49 +0,0 @@ -#![cfg(test)] - -use crate::implementation::DatabaseImpl; -use crate::queries::Database; -use salsa::Database as _Database; -use salsa::Durability; - -#[test] -fn memoized_twice() { - let db = DatabaseImpl::default(); - let v1 = db.memoized(); - let v2 = db.memoized(); - assert_eq!(v1, v2); -} - -#[test] -fn volatile_twice() { - let mut db = DatabaseImpl::default(); - let v1 = db.volatile(); - let v2 = db.volatile(); // volatiles are cached, so 2nd read returns the same - assert_eq!(v1, v2); - - db.salsa_runtime_mut().synthetic_write(Durability::LOW); // clears volatile caches - - let v3 = db.volatile(); // will re-increment the counter - let v4 = db.volatile(); // second call will be cached - assert_eq!(v1 + 1, v3); - assert_eq!(v3, v4); -} - -#[test] -fn intermingled() { - let mut db = DatabaseImpl::default(); - let v1 = db.volatile(); - let v2 = db.memoized(); - let v3 = db.volatile(); // cached - let v4 = db.memoized(); // cached - - assert_eq!(v1, v2); - assert_eq!(v1, v3); - assert_eq!(v2, v4); - - db.salsa_runtime_mut().synthetic_write(Durability::LOW); // clears volatile caches - - let v5 = db.memoized(); // re-executes volatile, caches new result - let v6 = db.memoized(); // re-use cached result - assert_eq!(v4 + 1, v5); - assert_eq!(v5, v6); -} diff --git a/tests/transparent.rs b/tests/transparent.rs deleted file mode 100644 index fce5c9c3c..000000000 --- a/tests/transparent.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! Test that transparent (uncached) queries work - -#[salsa::query_group(QueryGroupStorage)] -trait QueryGroup { - #[salsa::input] - fn input(&self, x: u32) -> u32; - #[salsa::transparent] - fn wrap(&self, x: u32) -> u32; - fn get(&self, x: u32) -> u32; -} - -fn wrap(db: &dyn QueryGroup, x: u32) -> u32 { - db.input(x) -} - -fn get(db: &dyn QueryGroup, x: u32) -> u32 { - db.wrap(x) -} - -#[salsa::database(QueryGroupStorage)] -#[derive(Default)] -struct Database { - storage: salsa::Storage, -} - -impl salsa::Database for Database {} - -#[test] -fn transparent_queries_work() { - let mut db = Database::default(); - - db.set_input(1, 10); - assert_eq!(db.get(1), 10); - assert_eq!(db.get(1), 10); - - let value = db.remove_input(1) + 82; - db.set_input(1, value); - assert_eq!(db.get(1), 92); - assert_eq!(db.get(1), 92); -} diff --git a/tests/variadic.rs b/tests/variadic.rs deleted file mode 100644 index cb857844e..000000000 --- a/tests/variadic.rs +++ /dev/null @@ -1,51 +0,0 @@ -#[salsa::query_group(HelloWorld)] -trait HelloWorldDatabase: salsa::Database { - #[salsa::input] - fn input(&self, a: u32, b: u32) -> u32; - - fn none(&self) -> u32; - - fn one(&self, k: u32) -> u32; - - fn two(&self, a: u32, b: u32) -> u32; - - fn trailing(&self, a: u32, b: u32) -> u32; -} - -fn none(_db: &dyn HelloWorldDatabase) -> u32 { - 22 -} - -fn one(_db: &dyn HelloWorldDatabase, k: u32) -> u32 { - k * 2 -} - -fn two(_db: &dyn HelloWorldDatabase, a: u32, b: u32) -> u32 { - a * b -} - -fn trailing(_db: &dyn HelloWorldDatabase, a: u32, b: u32) -> u32 { - a - b -} - -#[salsa::database(HelloWorld)] -#[derive(Default)] -struct DatabaseStruct { - storage: salsa::Storage, -} - -impl salsa::Database for DatabaseStruct {} - -#[test] -fn execute() { - let mut db = DatabaseStruct::default(); - - // test what happens with inputs: - db.set_input(1, 2, 3); - assert_eq!(db.input(1, 2), 3); - - assert_eq!(db.none(), 22); - assert_eq!(db.one(11), 22); - assert_eq!(db.two(11, 2), 22); - assert_eq!(db.trailing(24, 2), 22); -} From c7851112a579348eb76f27ef22cb9232280358b5 Mon Sep 17 00:00:00 2001 From: Matthijs Brobbel Date: Tue, 18 Jun 2024 09:40:21 +0200 Subject: [PATCH 2/7] Rename `salsa-2022` to `salsa` --- Cargo.toml | 32 +++++++++++++++++++ components/salsa-2022/Cargo.toml | 19 ----------- .../Cargo.toml | 8 +++-- .../src/accumulator.rs | 0 .../src/configuration.rs | 0 .../src/db.rs | 0 .../src/db_lifetime.rs | 0 .../src/debug.rs | 0 .../src/debug_with_db.rs | 0 .../src/input.rs | 0 .../src/interned.rs | 0 .../src/jar.rs | 0 .../src/lib.rs | 0 .../src/options.rs | 0 .../src/salsa_struct.rs | 0 .../src/tracked.rs | 0 .../src/tracked_fn.rs | 0 .../src/tracked_struct.rs | 0 .../src/update.rs | 0 .../src/xform.rs | 0 {examples-2022 => examples}/calc/Cargo.toml | 0 .../calc/src/compile.rs | 0 {examples-2022 => examples}/calc/src/db.rs | 0 {examples-2022 => examples}/calc/src/ir.rs | 0 {examples-2022 => examples}/calc/src/main.rs | 0 .../calc/src/parser.rs | 0 .../calc/src/type_check.rs | 0 .../lazy-input/Cargo.toml | 0 .../lazy-input/inputs/a | 0 .../lazy-input/inputs/aa | 0 .../lazy-input/inputs/b | 0 .../lazy-input/inputs/start | 0 .../lazy-input/src/main.rs | 0 salsa-2022-tests/Cargo.toml | 15 --------- .../salsa-2022/src => src}/accumulator.rs | 0 {components/salsa-2022/src => src}/alloc.rs | 0 .../salsa-2022/src => src}/cancelled.rs | 0 {components/salsa-2022/src => src}/cycle.rs | 0 .../salsa-2022/src => src}/database.rs | 0 {components/salsa-2022/src => src}/debug.rs | 0 .../salsa-2022/src => src}/durability.rs | 0 {components/salsa-2022/src => src}/event.rs | 0 .../salsa-2022/src => src}/function.rs | 0 .../src => src}/function/accumulated.rs | 0 .../src => src}/function/backdate.rs | 0 .../salsa-2022/src => src}/function/delete.rs | 0 .../src => src}/function/diff_outputs.rs | 0 .../src => src}/function/execute.rs | 0 .../salsa-2022/src => src}/function/fetch.rs | 0 .../salsa-2022/src => src}/function/inputs.rs | 0 .../salsa-2022/src => src}/function/lru.rs | 0 .../function/maybe_changed_after.rs | 0 .../salsa-2022/src => src}/function/memo.rs | 0 .../src => src}/function/specify.rs | 0 .../salsa-2022/src => src}/function/store.rs | 0 .../salsa-2022/src => src}/function/sync.rs | 0 {components/salsa-2022/src => src}/hash.rs | 0 {components/salsa-2022/src => src}/id.rs | 0 .../salsa-2022/src => src}/ingredient.rs | 0 .../salsa-2022/src => src}/ingredient_list.rs | 0 {components/salsa-2022/src => src}/input.rs | 0 .../salsa-2022/src => src}/input_field.rs | 0 .../salsa-2022/src => src}/interned.rs | 0 {components/salsa-2022/src => src}/jar.rs | 0 {components/salsa-2022/src => src}/key.rs | 0 {components/salsa-2022/src => src}/lib.rs | 16 +++++----- .../salsa-2022/src => src}/plumbing.rs | 0 .../salsa-2022/src => src}/revision.rs | 0 {components/salsa-2022/src => src}/routes.rs | 0 {components/salsa-2022/src => src}/runtime.rs | 0 .../src => src}/runtime/active_query.rs | 0 .../src => src}/runtime/dependency_graph.rs | 0 .../src => src}/runtime/local_state.rs | 0 .../src => src}/runtime/shared_state.rs | 0 .../salsa-2022/src => src}/salsa_struct.rs | 0 {components/salsa-2022/src => src}/setter.rs | 0 {components/salsa-2022/src => src}/storage.rs | 0 .../salsa-2022/src => src}/tracked_struct.rs | 0 .../src => src}/tracked_struct/struct_map.rs | 0 .../tracked_struct/tracked_field.rs | 0 {components/salsa-2022/src => src}/update.rs | 0 .../accumulate-from-tracked-fn.rs | 3 +- .../accumulate-reuse-workaround.rs | 3 +- .../tests => tests}/accumulate-reuse.rs | 3 +- .../tests => tests}/accumulate.rs | 3 +- .../src/lib.rs => tests/common/mod.rs | 5 ++- .../accumulator_fields_incompatibles.rs | 0 .../accumulator_fields_incompatibles.stderr | 0 .../compile-fail/accumulator_incompatibles.rs | 0 .../accumulator_incompatibles.stderr | 0 .../compile-fail/get-set-on-private-field.rs | 0 .../get-set-on-private-field.stderr | 0 .../input_struct_id_fields_no_setters.rs | 0 .../input_struct_id_fields_no_setters.stderr | 0 .../input_struct_incompatibles.rs | 0 .../input_struct_incompatibles.stderr | 0 .../interned_struct_incompatibles.rs | 0 .../interned_struct_incompatibles.stderr | 0 .../compile-fail/jars_incompatibles.rs | 0 .../compile-fail/jars_incompatibles.stderr | 0 .../lru_can_not_be_used_with_specify.rs | 0 .../lru_can_not_be_used_with_specify.stderr | 0 ...of-tracked-structs-from-older-revisions.rs | 0 ...racked-structs-from-older-revisions.stderr | 0 .../salsa_fields_incompatibles.rs | 0 .../salsa_fields_incompatibles.stderr | 0 .../compile-fail/singleton_only_for_input.rs | 0 .../singleton_only_for_input.stderr | 6 ++++ .../compile-fail/span-input-setter.rs | 0 .../compile-fail/span-input-setter.stderr | 0 .../compile-fail/span-tracked-getter.rs | 0 .../compile-fail/span-tracked-getter.stderr | 0 ...es-not-work-if-the-key-is-a-salsa-input.rs | 0 ...ot-work-if-the-key-is-a-salsa-input.stderr | 2 +- ...not-work-if-the-key-is-a-salsa-interned.rs | 0 ...work-if-the-key-is-a-salsa-interned.stderr | 2 +- .../compile-fail/tracked_fn_incompatibles.rs | 0 .../tracked_fn_incompatibles.stderr | 0 .../tracked_impl_incompatibles.rs | 0 .../tracked_impl_incompatibles.stderr | 0 .../tracked_method_incompatibles.rs | 0 .../tracked_method_incompatibles.stderr | 0 .../tracked_method_on_untracked_impl.rs | 0 .../tracked_method_on_untracked_impl.stderr | 0 .../tracked_struct_incompatibles.rs | 0 .../tracked_struct_incompatibles.stderr | 0 .../tests => tests}/compile_fail.rs | 0 .../tests => tests}/create-empty-database.rs | 0 .../create-large-jar-database.rs | 0 {salsa-2022-tests/tests => tests}/cycles.rs | 0 {salsa-2022-tests/tests => tests}/debug.rs | 0 .../tests => tests}/deletion-cascade.rs | 3 +- {salsa-2022-tests/tests => tests}/deletion.rs | 3 +- .../elided-lifetime-in-tracked-fn.rs | 3 +- ...truct_changes_but_fn_depends_on_field_y.rs | 3 +- ...input_changes_but_fn_depends_on_field_y.rs | 3 +- .../tests => tests}/hello_world.rs | 3 +- .../tests => tests}/input_with_ids.rs | 0 .../interned-struct-with-lifetime.rs | 3 +- .../tests => tests}/is_send_sync.rs | 0 {salsa-2022-tests/tests => tests}/lru.rs | 3 +- .../tests => tests}/mutate_in_place.rs | 3 +- .../tests => tests}/override_new_get_set.rs | 0 ...ng-tracked-struct-outside-of-tracked-fn.rs | 0 .../tests => tests}/parallel/main.rs | 0 .../parallel/parallel_cycle_all_recover.rs | 0 .../parallel/parallel_cycle_mid_recover.rs | 0 .../parallel/parallel_cycle_none_recover.rs | 0 .../parallel/parallel_cycle_one_recover.rs | 0 .../tests => tests}/parallel/setup.rs | 0 .../tests => tests}/parallel/signal.rs | 0 .../preverify-struct-with-leaked-data.rs | 3 +- .../tests => tests}/singleton.rs | 3 +- ...the-key-is-created-in-the-current-query.rs | 0 .../specify_tracked_fn_in_rev_1_but_not_2.rs | 3 +- .../tracked-struct-id-field-bad-eq.rs | 0 .../tracked-struct-id-field-bad-hash.rs | 0 .../tracked-struct-unchanged-in-new-rev.rs | 0 .../tracked-struct-value-field-bad-eq.rs | 3 +- .../tracked-struct-value-field-not-eq.rs | 0 .../tests => tests}/tracked_fn_constant.rs | 0 .../tests => tests}/tracked_fn_on_input.rs | 0 .../tests => tests}/tracked_fn_on_tracked.rs | 0 .../tracked_fn_on_tracked_specify.rs | 0 .../tracked_fn_read_own_entity.rs | 3 +- .../tracked_fn_read_own_specify.rs | 3 +- .../tests => tests}/tracked_method.rs | 0 .../tests => tests}/tracked_struct_db1_lt.rs | 3 +- .../tests => tests}/tracked_with_intern.rs | 0 .../tests => tests}/tracked_with_struct_db.rs | 0 .../tests => tests}/warnings/main.rs | 0 .../warnings/needless_borrow.rs | 0 .../warnings/needless_lifetimes.rs | 0 .../warnings/unused_variable_db.rs | 0 174 files changed, 98 insertions(+), 67 deletions(-) create mode 100644 Cargo.toml delete mode 100644 components/salsa-2022/Cargo.toml rename components/{salsa-2022-macros => salsa-macros}/Cargo.toml (51%) rename components/{salsa-2022-macros => salsa-macros}/src/accumulator.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/configuration.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/db.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/db_lifetime.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/debug.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/debug_with_db.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/input.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/interned.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/jar.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/lib.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/options.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/salsa_struct.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/tracked.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/tracked_fn.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/tracked_struct.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/update.rs (100%) rename components/{salsa-2022-macros => salsa-macros}/src/xform.rs (100%) rename {examples-2022 => examples}/calc/Cargo.toml (100%) rename {examples-2022 => examples}/calc/src/compile.rs (100%) rename {examples-2022 => examples}/calc/src/db.rs (100%) rename {examples-2022 => examples}/calc/src/ir.rs (100%) rename {examples-2022 => examples}/calc/src/main.rs (100%) rename {examples-2022 => examples}/calc/src/parser.rs (100%) rename {examples-2022 => examples}/calc/src/type_check.rs (100%) rename {examples-2022 => examples}/lazy-input/Cargo.toml (100%) rename {examples-2022 => examples}/lazy-input/inputs/a (100%) rename {examples-2022 => examples}/lazy-input/inputs/aa (100%) rename {examples-2022 => examples}/lazy-input/inputs/b (100%) rename {examples-2022 => examples}/lazy-input/inputs/start (100%) rename {examples-2022 => examples}/lazy-input/src/main.rs (100%) delete mode 100644 salsa-2022-tests/Cargo.toml rename {components/salsa-2022/src => src}/accumulator.rs (100%) rename {components/salsa-2022/src => src}/alloc.rs (100%) rename {components/salsa-2022/src => src}/cancelled.rs (100%) rename {components/salsa-2022/src => src}/cycle.rs (100%) rename {components/salsa-2022/src => src}/database.rs (100%) rename {components/salsa-2022/src => src}/debug.rs (100%) rename {components/salsa-2022/src => src}/durability.rs (100%) rename {components/salsa-2022/src => src}/event.rs (100%) rename {components/salsa-2022/src => src}/function.rs (100%) rename {components/salsa-2022/src => src}/function/accumulated.rs (100%) rename {components/salsa-2022/src => src}/function/backdate.rs (100%) rename {components/salsa-2022/src => src}/function/delete.rs (100%) rename {components/salsa-2022/src => src}/function/diff_outputs.rs (100%) rename {components/salsa-2022/src => src}/function/execute.rs (100%) rename {components/salsa-2022/src => src}/function/fetch.rs (100%) rename {components/salsa-2022/src => src}/function/inputs.rs (100%) rename {components/salsa-2022/src => src}/function/lru.rs (100%) rename {components/salsa-2022/src => src}/function/maybe_changed_after.rs (100%) rename {components/salsa-2022/src => src}/function/memo.rs (100%) rename {components/salsa-2022/src => src}/function/specify.rs (100%) rename {components/salsa-2022/src => src}/function/store.rs (100%) rename {components/salsa-2022/src => src}/function/sync.rs (100%) rename {components/salsa-2022/src => src}/hash.rs (100%) rename {components/salsa-2022/src => src}/id.rs (100%) rename {components/salsa-2022/src => src}/ingredient.rs (100%) rename {components/salsa-2022/src => src}/ingredient_list.rs (100%) rename {components/salsa-2022/src => src}/input.rs (100%) rename {components/salsa-2022/src => src}/input_field.rs (100%) rename {components/salsa-2022/src => src}/interned.rs (100%) rename {components/salsa-2022/src => src}/jar.rs (100%) rename {components/salsa-2022/src => src}/key.rs (100%) rename {components/salsa-2022/src => src}/lib.rs (78%) rename {components/salsa-2022/src => src}/plumbing.rs (100%) rename {components/salsa-2022/src => src}/revision.rs (100%) rename {components/salsa-2022/src => src}/routes.rs (100%) rename {components/salsa-2022/src => src}/runtime.rs (100%) rename {components/salsa-2022/src => src}/runtime/active_query.rs (100%) rename {components/salsa-2022/src => src}/runtime/dependency_graph.rs (100%) rename {components/salsa-2022/src => src}/runtime/local_state.rs (100%) rename {components/salsa-2022/src => src}/runtime/shared_state.rs (100%) rename {components/salsa-2022/src => src}/salsa_struct.rs (100%) rename {components/salsa-2022/src => src}/setter.rs (100%) rename {components/salsa-2022/src => src}/storage.rs (100%) rename {components/salsa-2022/src => src}/tracked_struct.rs (100%) rename {components/salsa-2022/src => src}/tracked_struct/struct_map.rs (100%) rename {components/salsa-2022/src => src}/tracked_struct/tracked_field.rs (100%) rename {components/salsa-2022/src => src}/update.rs (100%) rename {salsa-2022-tests/tests => tests}/accumulate-from-tracked-fn.rs (97%) rename {salsa-2022-tests/tests => tests}/accumulate-reuse-workaround.rs (97%) rename {salsa-2022-tests/tests => tests}/accumulate-reuse.rs (97%) rename {salsa-2022-tests/tests => tests}/accumulate.rs (98%) rename salsa-2022-tests/src/lib.rs => tests/common/mod.rs (93%) rename {salsa-2022-tests/tests => tests}/compile-fail/accumulator_fields_incompatibles.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/accumulator_fields_incompatibles.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/accumulator_incompatibles.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/accumulator_incompatibles.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/get-set-on-private-field.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/get-set-on-private-field.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/input_struct_id_fields_no_setters.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/input_struct_id_fields_no_setters.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/input_struct_incompatibles.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/input_struct_incompatibles.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/interned_struct_incompatibles.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/interned_struct_incompatibles.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/jars_incompatibles.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/jars_incompatibles.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/lru_can_not_be_used_with_specify.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/lru_can_not_be_used_with_specify.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/salsa_fields_incompatibles.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/salsa_fields_incompatibles.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/singleton_only_for_input.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/singleton_only_for_input.stderr (86%) rename {salsa-2022-tests/tests => tests}/compile-fail/span-input-setter.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/span-input-setter.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/span-tracked-getter.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/span-tracked-getter.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.stderr (94%) rename {salsa-2022-tests/tests => tests}/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.stderr (94%) rename {salsa-2022-tests/tests => tests}/compile-fail/tracked_fn_incompatibles.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/tracked_fn_incompatibles.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/tracked_impl_incompatibles.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/tracked_impl_incompatibles.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/tracked_method_incompatibles.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/tracked_method_incompatibles.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/tracked_method_on_untracked_impl.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/tracked_method_on_untracked_impl.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/tracked_struct_incompatibles.rs (100%) rename {salsa-2022-tests/tests => tests}/compile-fail/tracked_struct_incompatibles.stderr (100%) rename {salsa-2022-tests/tests => tests}/compile_fail.rs (100%) rename {salsa-2022-tests/tests => tests}/create-empty-database.rs (100%) rename {salsa-2022-tests/tests => tests}/create-large-jar-database.rs (100%) rename {salsa-2022-tests/tests => tests}/cycles.rs (100%) rename {salsa-2022-tests/tests => tests}/debug.rs (100%) rename {salsa-2022-tests/tests => tests}/deletion-cascade.rs (98%) rename {salsa-2022-tests/tests => tests}/deletion.rs (98%) rename {salsa-2022-tests/tests => tests}/elided-lifetime-in-tracked-fn.rs (97%) rename {salsa-2022-tests/tests => tests}/expect_reuse_field_x_of_a_tracked_struct_changes_but_fn_depends_on_field_y.rs (98%) rename {salsa-2022-tests/tests => tests}/expect_reuse_field_x_of_an_input_changes_but_fn_depends_on_field_y.rs (97%) rename {salsa-2022-tests/tests => tests}/hello_world.rs (98%) rename {salsa-2022-tests/tests => tests}/input_with_ids.rs (100%) rename {salsa-2022-tests/tests => tests}/interned-struct-with-lifetime.rs (96%) rename {salsa-2022-tests/tests => tests}/is_send_sync.rs (100%) rename {salsa-2022-tests/tests => tests}/lru.rs (99%) rename {salsa-2022-tests/tests => tests}/mutate_in_place.rs (96%) rename {salsa-2022-tests/tests => tests}/override_new_get_set.rs (100%) rename {salsa-2022-tests/tests => tests}/panic-when-creating-tracked-struct-outside-of-tracked-fn.rs (100%) rename {salsa-2022-tests/tests => tests}/parallel/main.rs (100%) rename {salsa-2022-tests/tests => tests}/parallel/parallel_cycle_all_recover.rs (100%) rename {salsa-2022-tests/tests => tests}/parallel/parallel_cycle_mid_recover.rs (100%) rename {salsa-2022-tests/tests => tests}/parallel/parallel_cycle_none_recover.rs (100%) rename {salsa-2022-tests/tests => tests}/parallel/parallel_cycle_one_recover.rs (100%) rename {salsa-2022-tests/tests => tests}/parallel/setup.rs (100%) rename {salsa-2022-tests/tests => tests}/parallel/signal.rs (100%) rename {salsa-2022-tests/tests => tests}/preverify-struct-with-leaked-data.rs (98%) rename {salsa-2022-tests/tests => tests}/singleton.rs (96%) rename {salsa-2022-tests/tests => tests}/specify-only-works-if-the-key-is-created-in-the-current-query.rs (100%) rename {salsa-2022-tests/tests => tests}/specify_tracked_fn_in_rev_1_but_not_2.rs (99%) rename {salsa-2022-tests/tests => tests}/tracked-struct-id-field-bad-eq.rs (100%) rename {salsa-2022-tests/tests => tests}/tracked-struct-id-field-bad-hash.rs (100%) rename {salsa-2022-tests/tests => tests}/tracked-struct-unchanged-in-new-rev.rs (100%) rename {salsa-2022-tests/tests => tests}/tracked-struct-value-field-bad-eq.rs (98%) rename {salsa-2022-tests/tests => tests}/tracked-struct-value-field-not-eq.rs (100%) rename {salsa-2022-tests/tests => tests}/tracked_fn_constant.rs (100%) rename {salsa-2022-tests/tests => tests}/tracked_fn_on_input.rs (100%) rename {salsa-2022-tests/tests => tests}/tracked_fn_on_tracked.rs (100%) rename {salsa-2022-tests/tests => tests}/tracked_fn_on_tracked_specify.rs (100%) rename {salsa-2022-tests/tests => tests}/tracked_fn_read_own_entity.rs (98%) rename {salsa-2022-tests/tests => tests}/tracked_fn_read_own_specify.rs (97%) rename {salsa-2022-tests/tests => tests}/tracked_method.rs (100%) rename {salsa-2022-tests/tests => tests}/tracked_struct_db1_lt.rs (94%) rename {salsa-2022-tests/tests => tests}/tracked_with_intern.rs (100%) rename {salsa-2022-tests/tests => tests}/tracked_with_struct_db.rs (100%) rename {salsa-2022-tests/tests => tests}/warnings/main.rs (100%) rename {salsa-2022-tests/tests => tests}/warnings/needless_borrow.rs (100%) rename {salsa-2022-tests/tests => tests}/warnings/needless_lifetimes.rs (100%) rename {salsa-2022-tests/tests => tests}/warnings/unused_variable_db.rs (100%) diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 000000000..ee39217e8 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "salsa" +version = "0.18.0" +authors = ["Salsa developers"] +edition = "2021" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/salsa-rs/salsa" +description = "A generic framework for on-demand, incrementalized computation (experimental)" + +[dependencies] +crossbeam = "0.8.1" +dashmap = "5.3.4" +rustc-hash = "1.1.0" +indexmap = "2" +hashlink = "0.8.0" +arc-swap = "1.6.0" +crossbeam-utils = { version = "0.8", default-features = false } +log = "0.4.5" +parking_lot = "0.12.1" +smallvec = "1.0.0" +salsa-macros = { path = "components/salsa-macros" } + +[dev-dependencies] +expect-test = "1.4.0" +parking_lot = "0.12.1" +test-log = "0.2.11" +env_logger = "*" +trybuild = "1.0" +rustversion = "1.0" + +[workspace] +members = ["components/salsa-macros"] diff --git a/components/salsa-2022/Cargo.toml b/components/salsa-2022/Cargo.toml deleted file mode 100644 index f179a6bda..000000000 --- a/components/salsa-2022/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "salsa-2022" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -crossbeam = "0.8.1" -dashmap = "5.3.4" -rustc-hash = "1.1.0" -indexmap = "2" -hashlink = "0.8.0" -arc-swap = "1.6.0" -crossbeam-utils = { version = "0.8", default-features = false } -log = "0.4.5" -parking_lot = "0.12.1" -smallvec = "1.0.0" -salsa-2022-macros = { path = "../salsa-2022-macros" } diff --git a/components/salsa-2022-macros/Cargo.toml b/components/salsa-macros/Cargo.toml similarity index 51% rename from components/salsa-2022-macros/Cargo.toml rename to components/salsa-macros/Cargo.toml index 990ff5a42..5aa058044 100644 --- a/components/salsa-2022-macros/Cargo.toml +++ b/components/salsa-macros/Cargo.toml @@ -1,7 +1,11 @@ [package] -name = "salsa-2022-macros" -version = "0.1.0" +name = "salsa-macros" +version = "0.18.0" +authors = ["Salsa developers"] edition = "2021" +license = "Apache-2.0 OR MIT" +repository = "https://github.com/salsa-rs/salsa" +description = "Procedural macros for the salsa crate" [lib] proc-macro = true diff --git a/components/salsa-2022-macros/src/accumulator.rs b/components/salsa-macros/src/accumulator.rs similarity index 100% rename from components/salsa-2022-macros/src/accumulator.rs rename to components/salsa-macros/src/accumulator.rs diff --git a/components/salsa-2022-macros/src/configuration.rs b/components/salsa-macros/src/configuration.rs similarity index 100% rename from components/salsa-2022-macros/src/configuration.rs rename to components/salsa-macros/src/configuration.rs diff --git a/components/salsa-2022-macros/src/db.rs b/components/salsa-macros/src/db.rs similarity index 100% rename from components/salsa-2022-macros/src/db.rs rename to components/salsa-macros/src/db.rs diff --git a/components/salsa-2022-macros/src/db_lifetime.rs b/components/salsa-macros/src/db_lifetime.rs similarity index 100% rename from components/salsa-2022-macros/src/db_lifetime.rs rename to components/salsa-macros/src/db_lifetime.rs diff --git a/components/salsa-2022-macros/src/debug.rs b/components/salsa-macros/src/debug.rs similarity index 100% rename from components/salsa-2022-macros/src/debug.rs rename to components/salsa-macros/src/debug.rs diff --git a/components/salsa-2022-macros/src/debug_with_db.rs b/components/salsa-macros/src/debug_with_db.rs similarity index 100% rename from components/salsa-2022-macros/src/debug_with_db.rs rename to components/salsa-macros/src/debug_with_db.rs diff --git a/components/salsa-2022-macros/src/input.rs b/components/salsa-macros/src/input.rs similarity index 100% rename from components/salsa-2022-macros/src/input.rs rename to components/salsa-macros/src/input.rs diff --git a/components/salsa-2022-macros/src/interned.rs b/components/salsa-macros/src/interned.rs similarity index 100% rename from components/salsa-2022-macros/src/interned.rs rename to components/salsa-macros/src/interned.rs diff --git a/components/salsa-2022-macros/src/jar.rs b/components/salsa-macros/src/jar.rs similarity index 100% rename from components/salsa-2022-macros/src/jar.rs rename to components/salsa-macros/src/jar.rs diff --git a/components/salsa-2022-macros/src/lib.rs b/components/salsa-macros/src/lib.rs similarity index 100% rename from components/salsa-2022-macros/src/lib.rs rename to components/salsa-macros/src/lib.rs diff --git a/components/salsa-2022-macros/src/options.rs b/components/salsa-macros/src/options.rs similarity index 100% rename from components/salsa-2022-macros/src/options.rs rename to components/salsa-macros/src/options.rs diff --git a/components/salsa-2022-macros/src/salsa_struct.rs b/components/salsa-macros/src/salsa_struct.rs similarity index 100% rename from components/salsa-2022-macros/src/salsa_struct.rs rename to components/salsa-macros/src/salsa_struct.rs diff --git a/components/salsa-2022-macros/src/tracked.rs b/components/salsa-macros/src/tracked.rs similarity index 100% rename from components/salsa-2022-macros/src/tracked.rs rename to components/salsa-macros/src/tracked.rs diff --git a/components/salsa-2022-macros/src/tracked_fn.rs b/components/salsa-macros/src/tracked_fn.rs similarity index 100% rename from components/salsa-2022-macros/src/tracked_fn.rs rename to components/salsa-macros/src/tracked_fn.rs diff --git a/components/salsa-2022-macros/src/tracked_struct.rs b/components/salsa-macros/src/tracked_struct.rs similarity index 100% rename from components/salsa-2022-macros/src/tracked_struct.rs rename to components/salsa-macros/src/tracked_struct.rs diff --git a/components/salsa-2022-macros/src/update.rs b/components/salsa-macros/src/update.rs similarity index 100% rename from components/salsa-2022-macros/src/update.rs rename to components/salsa-macros/src/update.rs diff --git a/components/salsa-2022-macros/src/xform.rs b/components/salsa-macros/src/xform.rs similarity index 100% rename from components/salsa-2022-macros/src/xform.rs rename to components/salsa-macros/src/xform.rs diff --git a/examples-2022/calc/Cargo.toml b/examples/calc/Cargo.toml similarity index 100% rename from examples-2022/calc/Cargo.toml rename to examples/calc/Cargo.toml diff --git a/examples-2022/calc/src/compile.rs b/examples/calc/src/compile.rs similarity index 100% rename from examples-2022/calc/src/compile.rs rename to examples/calc/src/compile.rs diff --git a/examples-2022/calc/src/db.rs b/examples/calc/src/db.rs similarity index 100% rename from examples-2022/calc/src/db.rs rename to examples/calc/src/db.rs diff --git a/examples-2022/calc/src/ir.rs b/examples/calc/src/ir.rs similarity index 100% rename from examples-2022/calc/src/ir.rs rename to examples/calc/src/ir.rs diff --git a/examples-2022/calc/src/main.rs b/examples/calc/src/main.rs similarity index 100% rename from examples-2022/calc/src/main.rs rename to examples/calc/src/main.rs diff --git a/examples-2022/calc/src/parser.rs b/examples/calc/src/parser.rs similarity index 100% rename from examples-2022/calc/src/parser.rs rename to examples/calc/src/parser.rs diff --git a/examples-2022/calc/src/type_check.rs b/examples/calc/src/type_check.rs similarity index 100% rename from examples-2022/calc/src/type_check.rs rename to examples/calc/src/type_check.rs diff --git a/examples-2022/lazy-input/Cargo.toml b/examples/lazy-input/Cargo.toml similarity index 100% rename from examples-2022/lazy-input/Cargo.toml rename to examples/lazy-input/Cargo.toml diff --git a/examples-2022/lazy-input/inputs/a b/examples/lazy-input/inputs/a similarity index 100% rename from examples-2022/lazy-input/inputs/a rename to examples/lazy-input/inputs/a diff --git a/examples-2022/lazy-input/inputs/aa b/examples/lazy-input/inputs/aa similarity index 100% rename from examples-2022/lazy-input/inputs/aa rename to examples/lazy-input/inputs/aa diff --git a/examples-2022/lazy-input/inputs/b b/examples/lazy-input/inputs/b similarity index 100% rename from examples-2022/lazy-input/inputs/b rename to examples/lazy-input/inputs/b diff --git a/examples-2022/lazy-input/inputs/start b/examples/lazy-input/inputs/start similarity index 100% rename from examples-2022/lazy-input/inputs/start rename to examples/lazy-input/inputs/start diff --git a/examples-2022/lazy-input/src/main.rs b/examples/lazy-input/src/main.rs similarity index 100% rename from examples-2022/lazy-input/src/main.rs rename to examples/lazy-input/src/main.rs diff --git a/salsa-2022-tests/Cargo.toml b/salsa-2022-tests/Cargo.toml deleted file mode 100644 index 6ac555cd1..000000000 --- a/salsa-2022-tests/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "salsa-2022-tests" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -salsa = { path = "../components/salsa-2022", package = "salsa-2022" } -expect-test = "1.4.0" -parking_lot = "0.12.1" -test-log = "0.2.11" -env_logger = "*" -trybuild = "1.0" -rustversion = "1.0" diff --git a/components/salsa-2022/src/accumulator.rs b/src/accumulator.rs similarity index 100% rename from components/salsa-2022/src/accumulator.rs rename to src/accumulator.rs diff --git a/components/salsa-2022/src/alloc.rs b/src/alloc.rs similarity index 100% rename from components/salsa-2022/src/alloc.rs rename to src/alloc.rs diff --git a/components/salsa-2022/src/cancelled.rs b/src/cancelled.rs similarity index 100% rename from components/salsa-2022/src/cancelled.rs rename to src/cancelled.rs diff --git a/components/salsa-2022/src/cycle.rs b/src/cycle.rs similarity index 100% rename from components/salsa-2022/src/cycle.rs rename to src/cycle.rs diff --git a/components/salsa-2022/src/database.rs b/src/database.rs similarity index 100% rename from components/salsa-2022/src/database.rs rename to src/database.rs diff --git a/components/salsa-2022/src/debug.rs b/src/debug.rs similarity index 100% rename from components/salsa-2022/src/debug.rs rename to src/debug.rs diff --git a/components/salsa-2022/src/durability.rs b/src/durability.rs similarity index 100% rename from components/salsa-2022/src/durability.rs rename to src/durability.rs diff --git a/components/salsa-2022/src/event.rs b/src/event.rs similarity index 100% rename from components/salsa-2022/src/event.rs rename to src/event.rs diff --git a/components/salsa-2022/src/function.rs b/src/function.rs similarity index 100% rename from components/salsa-2022/src/function.rs rename to src/function.rs diff --git a/components/salsa-2022/src/function/accumulated.rs b/src/function/accumulated.rs similarity index 100% rename from components/salsa-2022/src/function/accumulated.rs rename to src/function/accumulated.rs diff --git a/components/salsa-2022/src/function/backdate.rs b/src/function/backdate.rs similarity index 100% rename from components/salsa-2022/src/function/backdate.rs rename to src/function/backdate.rs diff --git a/components/salsa-2022/src/function/delete.rs b/src/function/delete.rs similarity index 100% rename from components/salsa-2022/src/function/delete.rs rename to src/function/delete.rs diff --git a/components/salsa-2022/src/function/diff_outputs.rs b/src/function/diff_outputs.rs similarity index 100% rename from components/salsa-2022/src/function/diff_outputs.rs rename to src/function/diff_outputs.rs diff --git a/components/salsa-2022/src/function/execute.rs b/src/function/execute.rs similarity index 100% rename from components/salsa-2022/src/function/execute.rs rename to src/function/execute.rs diff --git a/components/salsa-2022/src/function/fetch.rs b/src/function/fetch.rs similarity index 100% rename from components/salsa-2022/src/function/fetch.rs rename to src/function/fetch.rs diff --git a/components/salsa-2022/src/function/inputs.rs b/src/function/inputs.rs similarity index 100% rename from components/salsa-2022/src/function/inputs.rs rename to src/function/inputs.rs diff --git a/components/salsa-2022/src/function/lru.rs b/src/function/lru.rs similarity index 100% rename from components/salsa-2022/src/function/lru.rs rename to src/function/lru.rs diff --git a/components/salsa-2022/src/function/maybe_changed_after.rs b/src/function/maybe_changed_after.rs similarity index 100% rename from components/salsa-2022/src/function/maybe_changed_after.rs rename to src/function/maybe_changed_after.rs diff --git a/components/salsa-2022/src/function/memo.rs b/src/function/memo.rs similarity index 100% rename from components/salsa-2022/src/function/memo.rs rename to src/function/memo.rs diff --git a/components/salsa-2022/src/function/specify.rs b/src/function/specify.rs similarity index 100% rename from components/salsa-2022/src/function/specify.rs rename to src/function/specify.rs diff --git a/components/salsa-2022/src/function/store.rs b/src/function/store.rs similarity index 100% rename from components/salsa-2022/src/function/store.rs rename to src/function/store.rs diff --git a/components/salsa-2022/src/function/sync.rs b/src/function/sync.rs similarity index 100% rename from components/salsa-2022/src/function/sync.rs rename to src/function/sync.rs diff --git a/components/salsa-2022/src/hash.rs b/src/hash.rs similarity index 100% rename from components/salsa-2022/src/hash.rs rename to src/hash.rs diff --git a/components/salsa-2022/src/id.rs b/src/id.rs similarity index 100% rename from components/salsa-2022/src/id.rs rename to src/id.rs diff --git a/components/salsa-2022/src/ingredient.rs b/src/ingredient.rs similarity index 100% rename from components/salsa-2022/src/ingredient.rs rename to src/ingredient.rs diff --git a/components/salsa-2022/src/ingredient_list.rs b/src/ingredient_list.rs similarity index 100% rename from components/salsa-2022/src/ingredient_list.rs rename to src/ingredient_list.rs diff --git a/components/salsa-2022/src/input.rs b/src/input.rs similarity index 100% rename from components/salsa-2022/src/input.rs rename to src/input.rs diff --git a/components/salsa-2022/src/input_field.rs b/src/input_field.rs similarity index 100% rename from components/salsa-2022/src/input_field.rs rename to src/input_field.rs diff --git a/components/salsa-2022/src/interned.rs b/src/interned.rs similarity index 100% rename from components/salsa-2022/src/interned.rs rename to src/interned.rs diff --git a/components/salsa-2022/src/jar.rs b/src/jar.rs similarity index 100% rename from components/salsa-2022/src/jar.rs rename to src/jar.rs diff --git a/components/salsa-2022/src/key.rs b/src/key.rs similarity index 100% rename from components/salsa-2022/src/key.rs rename to src/key.rs diff --git a/components/salsa-2022/src/lib.rs b/src/lib.rs similarity index 78% rename from components/salsa-2022/src/lib.rs rename to src/lib.rs index f0099b532..81245007f 100644 --- a/components/salsa-2022/src/lib.rs +++ b/src/lib.rs @@ -43,11 +43,11 @@ pub use self::routes::IngredientIndex; pub use self::runtime::Runtime; pub use self::storage::DbWithJar; pub use self::storage::Storage; -pub use salsa_2022_macros::accumulator; -pub use salsa_2022_macros::db; -pub use salsa_2022_macros::input; -pub use salsa_2022_macros::interned; -pub use salsa_2022_macros::jar; -pub use salsa_2022_macros::tracked; -pub use salsa_2022_macros::DebugWithDb; -pub use salsa_2022_macros::Update; +pub use salsa_macros::accumulator; +pub use salsa_macros::db; +pub use salsa_macros::input; +pub use salsa_macros::interned; +pub use salsa_macros::jar; +pub use salsa_macros::tracked; +pub use salsa_macros::DebugWithDb; +pub use salsa_macros::Update; diff --git a/components/salsa-2022/src/plumbing.rs b/src/plumbing.rs similarity index 100% rename from components/salsa-2022/src/plumbing.rs rename to src/plumbing.rs diff --git a/components/salsa-2022/src/revision.rs b/src/revision.rs similarity index 100% rename from components/salsa-2022/src/revision.rs rename to src/revision.rs diff --git a/components/salsa-2022/src/routes.rs b/src/routes.rs similarity index 100% rename from components/salsa-2022/src/routes.rs rename to src/routes.rs diff --git a/components/salsa-2022/src/runtime.rs b/src/runtime.rs similarity index 100% rename from components/salsa-2022/src/runtime.rs rename to src/runtime.rs diff --git a/components/salsa-2022/src/runtime/active_query.rs b/src/runtime/active_query.rs similarity index 100% rename from components/salsa-2022/src/runtime/active_query.rs rename to src/runtime/active_query.rs diff --git a/components/salsa-2022/src/runtime/dependency_graph.rs b/src/runtime/dependency_graph.rs similarity index 100% rename from components/salsa-2022/src/runtime/dependency_graph.rs rename to src/runtime/dependency_graph.rs diff --git a/components/salsa-2022/src/runtime/local_state.rs b/src/runtime/local_state.rs similarity index 100% rename from components/salsa-2022/src/runtime/local_state.rs rename to src/runtime/local_state.rs diff --git a/components/salsa-2022/src/runtime/shared_state.rs b/src/runtime/shared_state.rs similarity index 100% rename from components/salsa-2022/src/runtime/shared_state.rs rename to src/runtime/shared_state.rs diff --git a/components/salsa-2022/src/salsa_struct.rs b/src/salsa_struct.rs similarity index 100% rename from components/salsa-2022/src/salsa_struct.rs rename to src/salsa_struct.rs diff --git a/components/salsa-2022/src/setter.rs b/src/setter.rs similarity index 100% rename from components/salsa-2022/src/setter.rs rename to src/setter.rs diff --git a/components/salsa-2022/src/storage.rs b/src/storage.rs similarity index 100% rename from components/salsa-2022/src/storage.rs rename to src/storage.rs diff --git a/components/salsa-2022/src/tracked_struct.rs b/src/tracked_struct.rs similarity index 100% rename from components/salsa-2022/src/tracked_struct.rs rename to src/tracked_struct.rs diff --git a/components/salsa-2022/src/tracked_struct/struct_map.rs b/src/tracked_struct/struct_map.rs similarity index 100% rename from components/salsa-2022/src/tracked_struct/struct_map.rs rename to src/tracked_struct/struct_map.rs diff --git a/components/salsa-2022/src/tracked_struct/tracked_field.rs b/src/tracked_struct/tracked_field.rs similarity index 100% rename from components/salsa-2022/src/tracked_struct/tracked_field.rs rename to src/tracked_struct/tracked_field.rs diff --git a/components/salsa-2022/src/update.rs b/src/update.rs similarity index 100% rename from components/salsa-2022/src/update.rs rename to src/update.rs diff --git a/salsa-2022-tests/tests/accumulate-from-tracked-fn.rs b/tests/accumulate-from-tracked-fn.rs similarity index 97% rename from salsa-2022-tests/tests/accumulate-from-tracked-fn.rs rename to tests/accumulate-from-tracked-fn.rs index ae8deb48e..767b62cf7 100644 --- a/salsa-2022-tests/tests/accumulate-from-tracked-fn.rs +++ b/tests/accumulate-from-tracked-fn.rs @@ -2,7 +2,8 @@ //! Then mutate the values so that the tracked function re-executes. //! Check that we accumulate the appropriate, new values. -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; use test_log::test; diff --git a/salsa-2022-tests/tests/accumulate-reuse-workaround.rs b/tests/accumulate-reuse-workaround.rs similarity index 97% rename from salsa-2022-tests/tests/accumulate-reuse-workaround.rs rename to tests/accumulate-reuse-workaround.rs index 4a81e85eb..8289b50f8 100644 --- a/salsa-2022-tests/tests/accumulate-reuse-workaround.rs +++ b/tests/accumulate-reuse-workaround.rs @@ -2,7 +2,8 @@ //! `accumulated` in a tracked function to get better //! reuse. -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; use test_log::test; diff --git a/salsa-2022-tests/tests/accumulate-reuse.rs b/tests/accumulate-reuse.rs similarity index 97% rename from salsa-2022-tests/tests/accumulate-reuse.rs rename to tests/accumulate-reuse.rs index ca164c57a..fc333fa44 100644 --- a/salsa-2022-tests/tests/accumulate-reuse.rs +++ b/tests/accumulate-reuse.rs @@ -3,7 +3,8 @@ //! Tests behavior when a query's only inputs //! are the accumulated values from another query. -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; use test_log::test; diff --git a/salsa-2022-tests/tests/accumulate.rs b/tests/accumulate.rs similarity index 98% rename from salsa-2022-tests/tests/accumulate.rs rename to tests/accumulate.rs index 80157ec94..cef66ea25 100644 --- a/salsa-2022-tests/tests/accumulate.rs +++ b/tests/accumulate.rs @@ -2,7 +2,8 @@ //! //! * entities not created in a revision are deleted, as is any memoized data keyed on them. -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; use test_log::test; diff --git a/salsa-2022-tests/src/lib.rs b/tests/common/mod.rs similarity index 93% rename from salsa-2022-tests/src/lib.rs rename to tests/common/mod.rs index 34e96e1f9..8e5d51ef7 100644 --- a/salsa-2022-tests/src/lib.rs +++ b/tests/common/mod.rs @@ -1,4 +1,7 @@ -/// Utility for tests that lets us log when notable events happen. +//! Utility for tests that lets us log when notable events happen. + +#![allow(dead_code)] + #[derive(Default)] pub struct Logger { logs: std::sync::Mutex>, diff --git a/salsa-2022-tests/tests/compile-fail/accumulator_fields_incompatibles.rs b/tests/compile-fail/accumulator_fields_incompatibles.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/accumulator_fields_incompatibles.rs rename to tests/compile-fail/accumulator_fields_incompatibles.rs diff --git a/salsa-2022-tests/tests/compile-fail/accumulator_fields_incompatibles.stderr b/tests/compile-fail/accumulator_fields_incompatibles.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/accumulator_fields_incompatibles.stderr rename to tests/compile-fail/accumulator_fields_incompatibles.stderr diff --git a/salsa-2022-tests/tests/compile-fail/accumulator_incompatibles.rs b/tests/compile-fail/accumulator_incompatibles.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/accumulator_incompatibles.rs rename to tests/compile-fail/accumulator_incompatibles.rs diff --git a/salsa-2022-tests/tests/compile-fail/accumulator_incompatibles.stderr b/tests/compile-fail/accumulator_incompatibles.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/accumulator_incompatibles.stderr rename to tests/compile-fail/accumulator_incompatibles.stderr diff --git a/salsa-2022-tests/tests/compile-fail/get-set-on-private-field.rs b/tests/compile-fail/get-set-on-private-field.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/get-set-on-private-field.rs rename to tests/compile-fail/get-set-on-private-field.rs diff --git a/salsa-2022-tests/tests/compile-fail/get-set-on-private-field.stderr b/tests/compile-fail/get-set-on-private-field.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/get-set-on-private-field.stderr rename to tests/compile-fail/get-set-on-private-field.stderr diff --git a/salsa-2022-tests/tests/compile-fail/input_struct_id_fields_no_setters.rs b/tests/compile-fail/input_struct_id_fields_no_setters.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/input_struct_id_fields_no_setters.rs rename to tests/compile-fail/input_struct_id_fields_no_setters.rs diff --git a/salsa-2022-tests/tests/compile-fail/input_struct_id_fields_no_setters.stderr b/tests/compile-fail/input_struct_id_fields_no_setters.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/input_struct_id_fields_no_setters.stderr rename to tests/compile-fail/input_struct_id_fields_no_setters.stderr diff --git a/salsa-2022-tests/tests/compile-fail/input_struct_incompatibles.rs b/tests/compile-fail/input_struct_incompatibles.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/input_struct_incompatibles.rs rename to tests/compile-fail/input_struct_incompatibles.rs diff --git a/salsa-2022-tests/tests/compile-fail/input_struct_incompatibles.stderr b/tests/compile-fail/input_struct_incompatibles.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/input_struct_incompatibles.stderr rename to tests/compile-fail/input_struct_incompatibles.stderr diff --git a/salsa-2022-tests/tests/compile-fail/interned_struct_incompatibles.rs b/tests/compile-fail/interned_struct_incompatibles.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/interned_struct_incompatibles.rs rename to tests/compile-fail/interned_struct_incompatibles.rs diff --git a/salsa-2022-tests/tests/compile-fail/interned_struct_incompatibles.stderr b/tests/compile-fail/interned_struct_incompatibles.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/interned_struct_incompatibles.stderr rename to tests/compile-fail/interned_struct_incompatibles.stderr diff --git a/salsa-2022-tests/tests/compile-fail/jars_incompatibles.rs b/tests/compile-fail/jars_incompatibles.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/jars_incompatibles.rs rename to tests/compile-fail/jars_incompatibles.rs diff --git a/salsa-2022-tests/tests/compile-fail/jars_incompatibles.stderr b/tests/compile-fail/jars_incompatibles.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/jars_incompatibles.stderr rename to tests/compile-fail/jars_incompatibles.stderr diff --git a/salsa-2022-tests/tests/compile-fail/lru_can_not_be_used_with_specify.rs b/tests/compile-fail/lru_can_not_be_used_with_specify.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/lru_can_not_be_used_with_specify.rs rename to tests/compile-fail/lru_can_not_be_used_with_specify.rs diff --git a/salsa-2022-tests/tests/compile-fail/lru_can_not_be_used_with_specify.stderr b/tests/compile-fail/lru_can_not_be_used_with_specify.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/lru_can_not_be_used_with_specify.stderr rename to tests/compile-fail/lru_can_not_be_used_with_specify.stderr diff --git a/salsa-2022-tests/tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.rs b/tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.rs rename to tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.rs diff --git a/salsa-2022-tests/tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.stderr b/tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.stderr rename to tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.stderr diff --git a/salsa-2022-tests/tests/compile-fail/salsa_fields_incompatibles.rs b/tests/compile-fail/salsa_fields_incompatibles.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/salsa_fields_incompatibles.rs rename to tests/compile-fail/salsa_fields_incompatibles.rs diff --git a/salsa-2022-tests/tests/compile-fail/salsa_fields_incompatibles.stderr b/tests/compile-fail/salsa_fields_incompatibles.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/salsa_fields_incompatibles.stderr rename to tests/compile-fail/salsa_fields_incompatibles.stderr diff --git a/salsa-2022-tests/tests/compile-fail/singleton_only_for_input.rs b/tests/compile-fail/singleton_only_for_input.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/singleton_only_for_input.rs rename to tests/compile-fail/singleton_only_for_input.rs diff --git a/salsa-2022-tests/tests/compile-fail/singleton_only_for_input.stderr b/tests/compile-fail/singleton_only_for_input.stderr similarity index 86% rename from salsa-2022-tests/tests/compile-fail/singleton_only_for_input.stderr rename to tests/compile-fail/singleton_only_for_input.stderr index 1069544a4..5be6a283e 100644 --- a/salsa-2022-tests/tests/compile-fail/singleton_only_for_input.stderr +++ b/tests/compile-fail/singleton_only_for_input.stderr @@ -16,6 +16,12 @@ error: `singleton` option not allowed here 33 | #[salsa::accumulator(singleton)] | ^^^^^^^^^ +error[E0432]: unresolved import `salsa_2022_tests` + --> tests/compile-fail/singleton_only_for_input.rs:5:5 + | +5 | use salsa_2022_tests::{HasLogger, Logger}; + | ^^^^^^^^^^^^^^^^ use of undeclared crate or module `salsa_2022_tests` + error[E0412]: cannot find type `MyTracked` in this scope --> tests/compile-fail/singleton_only_for_input.rs:10:21 | diff --git a/salsa-2022-tests/tests/compile-fail/span-input-setter.rs b/tests/compile-fail/span-input-setter.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/span-input-setter.rs rename to tests/compile-fail/span-input-setter.rs diff --git a/salsa-2022-tests/tests/compile-fail/span-input-setter.stderr b/tests/compile-fail/span-input-setter.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/span-input-setter.stderr rename to tests/compile-fail/span-input-setter.stderr diff --git a/salsa-2022-tests/tests/compile-fail/span-tracked-getter.rs b/tests/compile-fail/span-tracked-getter.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/span-tracked-getter.rs rename to tests/compile-fail/span-tracked-getter.rs diff --git a/salsa-2022-tests/tests/compile-fail/span-tracked-getter.stderr b/tests/compile-fail/span-tracked-getter.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/span-tracked-getter.stderr rename to tests/compile-fail/span-tracked-getter.stderr diff --git a/salsa-2022-tests/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.rs b/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.rs rename to tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.rs diff --git a/salsa-2022-tests/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.stderr b/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.stderr similarity index 94% rename from salsa-2022-tests/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.stderr rename to tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.stderr index f6eb291fc..eee7370cc 100644 --- a/salsa-2022-tests/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.stderr +++ b/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.stderr @@ -6,7 +6,7 @@ error[E0277]: the trait bound `MyInput: TrackedStructInDb` is not satisf | = help: the trait `TrackedStructInDb` is implemented for `MyTracked<'db>` note: required by a bound in `function::specify::>::specify_and_record` - --> $WORKSPACE/components/salsa-2022/src/function/specify.rs + --> src/function/specify.rs | | pub fn specify_and_record<'db>(&'db self, db: &'db DynDb<'db, C>, key: Id, value: C::Value<'db>) | ------------------ required by a bound in this associated function diff --git a/salsa-2022-tests/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.rs b/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.rs rename to tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.rs diff --git a/salsa-2022-tests/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.stderr b/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.stderr similarity index 94% rename from salsa-2022-tests/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.stderr rename to tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.stderr index 2d9009269..f91cca731 100644 --- a/salsa-2022-tests/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.stderr +++ b/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.stderr @@ -6,7 +6,7 @@ error[E0277]: the trait bound `MyInterned<'_>: TrackedStructInDb` is not | = help: the trait `TrackedStructInDb` is implemented for `MyTracked<'db>` note: required by a bound in `function::specify::>::specify_and_record` - --> $WORKSPACE/components/salsa-2022/src/function/specify.rs + --> src/function/specify.rs | | pub fn specify_and_record<'db>(&'db self, db: &'db DynDb<'db, C>, key: Id, value: C::Value<'db>) | ------------------ required by a bound in this associated function diff --git a/salsa-2022-tests/tests/compile-fail/tracked_fn_incompatibles.rs b/tests/compile-fail/tracked_fn_incompatibles.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/tracked_fn_incompatibles.rs rename to tests/compile-fail/tracked_fn_incompatibles.rs diff --git a/salsa-2022-tests/tests/compile-fail/tracked_fn_incompatibles.stderr b/tests/compile-fail/tracked_fn_incompatibles.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/tracked_fn_incompatibles.stderr rename to tests/compile-fail/tracked_fn_incompatibles.stderr diff --git a/salsa-2022-tests/tests/compile-fail/tracked_impl_incompatibles.rs b/tests/compile-fail/tracked_impl_incompatibles.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/tracked_impl_incompatibles.rs rename to tests/compile-fail/tracked_impl_incompatibles.rs diff --git a/salsa-2022-tests/tests/compile-fail/tracked_impl_incompatibles.stderr b/tests/compile-fail/tracked_impl_incompatibles.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/tracked_impl_incompatibles.stderr rename to tests/compile-fail/tracked_impl_incompatibles.stderr diff --git a/salsa-2022-tests/tests/compile-fail/tracked_method_incompatibles.rs b/tests/compile-fail/tracked_method_incompatibles.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/tracked_method_incompatibles.rs rename to tests/compile-fail/tracked_method_incompatibles.rs diff --git a/salsa-2022-tests/tests/compile-fail/tracked_method_incompatibles.stderr b/tests/compile-fail/tracked_method_incompatibles.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/tracked_method_incompatibles.stderr rename to tests/compile-fail/tracked_method_incompatibles.stderr diff --git a/salsa-2022-tests/tests/compile-fail/tracked_method_on_untracked_impl.rs b/tests/compile-fail/tracked_method_on_untracked_impl.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/tracked_method_on_untracked_impl.rs rename to tests/compile-fail/tracked_method_on_untracked_impl.rs diff --git a/salsa-2022-tests/tests/compile-fail/tracked_method_on_untracked_impl.stderr b/tests/compile-fail/tracked_method_on_untracked_impl.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/tracked_method_on_untracked_impl.stderr rename to tests/compile-fail/tracked_method_on_untracked_impl.stderr diff --git a/salsa-2022-tests/tests/compile-fail/tracked_struct_incompatibles.rs b/tests/compile-fail/tracked_struct_incompatibles.rs similarity index 100% rename from salsa-2022-tests/tests/compile-fail/tracked_struct_incompatibles.rs rename to tests/compile-fail/tracked_struct_incompatibles.rs diff --git a/salsa-2022-tests/tests/compile-fail/tracked_struct_incompatibles.stderr b/tests/compile-fail/tracked_struct_incompatibles.stderr similarity index 100% rename from salsa-2022-tests/tests/compile-fail/tracked_struct_incompatibles.stderr rename to tests/compile-fail/tracked_struct_incompatibles.stderr diff --git a/salsa-2022-tests/tests/compile_fail.rs b/tests/compile_fail.rs similarity index 100% rename from salsa-2022-tests/tests/compile_fail.rs rename to tests/compile_fail.rs diff --git a/salsa-2022-tests/tests/create-empty-database.rs b/tests/create-empty-database.rs similarity index 100% rename from salsa-2022-tests/tests/create-empty-database.rs rename to tests/create-empty-database.rs diff --git a/salsa-2022-tests/tests/create-large-jar-database.rs b/tests/create-large-jar-database.rs similarity index 100% rename from salsa-2022-tests/tests/create-large-jar-database.rs rename to tests/create-large-jar-database.rs diff --git a/salsa-2022-tests/tests/cycles.rs b/tests/cycles.rs similarity index 100% rename from salsa-2022-tests/tests/cycles.rs rename to tests/cycles.rs diff --git a/salsa-2022-tests/tests/debug.rs b/tests/debug.rs similarity index 100% rename from salsa-2022-tests/tests/debug.rs rename to tests/debug.rs diff --git a/salsa-2022-tests/tests/deletion-cascade.rs b/tests/deletion-cascade.rs similarity index 98% rename from salsa-2022-tests/tests/deletion-cascade.rs rename to tests/deletion-cascade.rs index 932336205..eb6c83920 100644 --- a/salsa-2022-tests/tests/deletion-cascade.rs +++ b/tests/deletion-cascade.rs @@ -3,7 +3,8 @@ //! * when we delete memoized data, also delete outputs from that data use salsa::DebugWithDb; -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; use test_log::test; diff --git a/salsa-2022-tests/tests/deletion.rs b/tests/deletion.rs similarity index 98% rename from salsa-2022-tests/tests/deletion.rs rename to tests/deletion.rs index cc0394151..fb5298729 100644 --- a/salsa-2022-tests/tests/deletion.rs +++ b/tests/deletion.rs @@ -3,7 +3,8 @@ //! * entities not created in a revision are deleted, as is any memoized data keyed on them. use salsa::DebugWithDb; -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; use test_log::test; diff --git a/salsa-2022-tests/tests/elided-lifetime-in-tracked-fn.rs b/tests/elided-lifetime-in-tracked-fn.rs similarity index 97% rename from salsa-2022-tests/tests/elided-lifetime-in-tracked-fn.rs rename to tests/elided-lifetime-in-tracked-fn.rs index 3bae26671..5ce24f478 100644 --- a/salsa-2022-tests/tests/elided-lifetime-in-tracked-fn.rs +++ b/tests/elided-lifetime-in-tracked-fn.rs @@ -1,7 +1,8 @@ //! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; use test_log::test; diff --git a/salsa-2022-tests/tests/expect_reuse_field_x_of_a_tracked_struct_changes_but_fn_depends_on_field_y.rs b/tests/expect_reuse_field_x_of_a_tracked_struct_changes_but_fn_depends_on_field_y.rs similarity index 98% rename from salsa-2022-tests/tests/expect_reuse_field_x_of_a_tracked_struct_changes_but_fn_depends_on_field_y.rs rename to tests/expect_reuse_field_x_of_a_tracked_struct_changes_but_fn_depends_on_field_y.rs index d3b7c4654..dab6c0fcc 100644 --- a/salsa-2022-tests/tests/expect_reuse_field_x_of_a_tracked_struct_changes_but_fn_depends_on_field_y.rs +++ b/tests/expect_reuse_field_x_of_a_tracked_struct_changes_but_fn_depends_on_field_y.rs @@ -3,7 +3,8 @@ //! compiles and executes successfully. #![allow(dead_code)] -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; diff --git a/salsa-2022-tests/tests/expect_reuse_field_x_of_an_input_changes_but_fn_depends_on_field_y.rs b/tests/expect_reuse_field_x_of_an_input_changes_but_fn_depends_on_field_y.rs similarity index 97% rename from salsa-2022-tests/tests/expect_reuse_field_x_of_an_input_changes_but_fn_depends_on_field_y.rs rename to tests/expect_reuse_field_x_of_an_input_changes_but_fn_depends_on_field_y.rs index 0096210bd..76643443c 100644 --- a/salsa-2022-tests/tests/expect_reuse_field_x_of_an_input_changes_but_fn_depends_on_field_y.rs +++ b/tests/expect_reuse_field_x_of_an_input_changes_but_fn_depends_on_field_y.rs @@ -3,7 +3,8 @@ //! compiles and executes successfully. #![allow(dead_code)] -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; diff --git a/salsa-2022-tests/tests/hello_world.rs b/tests/hello_world.rs similarity index 98% rename from salsa-2022-tests/tests/hello_world.rs rename to tests/hello_world.rs index d47263064..e14d4b6bb 100644 --- a/salsa-2022-tests/tests/hello_world.rs +++ b/tests/hello_world.rs @@ -1,7 +1,8 @@ //! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; use test_log::test; diff --git a/salsa-2022-tests/tests/input_with_ids.rs b/tests/input_with_ids.rs similarity index 100% rename from salsa-2022-tests/tests/input_with_ids.rs rename to tests/input_with_ids.rs diff --git a/salsa-2022-tests/tests/interned-struct-with-lifetime.rs b/tests/interned-struct-with-lifetime.rs similarity index 96% rename from salsa-2022-tests/tests/interned-struct-with-lifetime.rs rename to tests/interned-struct-with-lifetime.rs index e44f498e4..2e7bee80c 100644 --- a/salsa-2022-tests/tests/interned-struct-with-lifetime.rs +++ b/tests/interned-struct-with-lifetime.rs @@ -1,7 +1,8 @@ //! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. use salsa::DebugWithDb; -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use expect_test::expect; use test_log::test; diff --git a/salsa-2022-tests/tests/is_send_sync.rs b/tests/is_send_sync.rs similarity index 100% rename from salsa-2022-tests/tests/is_send_sync.rs rename to tests/is_send_sync.rs diff --git a/salsa-2022-tests/tests/lru.rs b/tests/lru.rs similarity index 99% rename from salsa-2022-tests/tests/lru.rs rename to tests/lru.rs index 2b84f3cc3..51fb625e8 100644 --- a/salsa-2022-tests/tests/lru.rs +++ b/tests/lru.rs @@ -7,7 +7,8 @@ use std::sync::{ }; use salsa::Database; -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use test_log::test; #[salsa::jar(db = Db)] diff --git a/salsa-2022-tests/tests/mutate_in_place.rs b/tests/mutate_in_place.rs similarity index 96% rename from salsa-2022-tests/tests/mutate_in_place.rs rename to tests/mutate_in_place.rs index e48795499..32327620d 100644 --- a/salsa-2022-tests/tests/mutate_in_place.rs +++ b/tests/mutate_in_place.rs @@ -1,7 +1,8 @@ //! Test that a setting a field on a `#[salsa::input]` //! overwrites and returns the old value. -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use test_log::test; diff --git a/salsa-2022-tests/tests/override_new_get_set.rs b/tests/override_new_get_set.rs similarity index 100% rename from salsa-2022-tests/tests/override_new_get_set.rs rename to tests/override_new_get_set.rs diff --git a/salsa-2022-tests/tests/panic-when-creating-tracked-struct-outside-of-tracked-fn.rs b/tests/panic-when-creating-tracked-struct-outside-of-tracked-fn.rs similarity index 100% rename from salsa-2022-tests/tests/panic-when-creating-tracked-struct-outside-of-tracked-fn.rs rename to tests/panic-when-creating-tracked-struct-outside-of-tracked-fn.rs diff --git a/salsa-2022-tests/tests/parallel/main.rs b/tests/parallel/main.rs similarity index 100% rename from salsa-2022-tests/tests/parallel/main.rs rename to tests/parallel/main.rs diff --git a/salsa-2022-tests/tests/parallel/parallel_cycle_all_recover.rs b/tests/parallel/parallel_cycle_all_recover.rs similarity index 100% rename from salsa-2022-tests/tests/parallel/parallel_cycle_all_recover.rs rename to tests/parallel/parallel_cycle_all_recover.rs diff --git a/salsa-2022-tests/tests/parallel/parallel_cycle_mid_recover.rs b/tests/parallel/parallel_cycle_mid_recover.rs similarity index 100% rename from salsa-2022-tests/tests/parallel/parallel_cycle_mid_recover.rs rename to tests/parallel/parallel_cycle_mid_recover.rs diff --git a/salsa-2022-tests/tests/parallel/parallel_cycle_none_recover.rs b/tests/parallel/parallel_cycle_none_recover.rs similarity index 100% rename from salsa-2022-tests/tests/parallel/parallel_cycle_none_recover.rs rename to tests/parallel/parallel_cycle_none_recover.rs diff --git a/salsa-2022-tests/tests/parallel/parallel_cycle_one_recover.rs b/tests/parallel/parallel_cycle_one_recover.rs similarity index 100% rename from salsa-2022-tests/tests/parallel/parallel_cycle_one_recover.rs rename to tests/parallel/parallel_cycle_one_recover.rs diff --git a/salsa-2022-tests/tests/parallel/setup.rs b/tests/parallel/setup.rs similarity index 100% rename from salsa-2022-tests/tests/parallel/setup.rs rename to tests/parallel/setup.rs diff --git a/salsa-2022-tests/tests/parallel/signal.rs b/tests/parallel/signal.rs similarity index 100% rename from salsa-2022-tests/tests/parallel/signal.rs rename to tests/parallel/signal.rs diff --git a/salsa-2022-tests/tests/preverify-struct-with-leaked-data.rs b/tests/preverify-struct-with-leaked-data.rs similarity index 98% rename from salsa-2022-tests/tests/preverify-struct-with-leaked-data.rs rename to tests/preverify-struct-with-leaked-data.rs index 7d542a65f..38104f19a 100644 --- a/salsa-2022-tests/tests/preverify-struct-with-leaked-data.rs +++ b/tests/preverify-struct-with-leaked-data.rs @@ -5,7 +5,8 @@ use std::cell::Cell; use expect_test::expect; use salsa::DebugWithDb; -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use test_log::test; thread_local! { diff --git a/salsa-2022-tests/tests/singleton.rs b/tests/singleton.rs similarity index 96% rename from salsa-2022-tests/tests/singleton.rs rename to tests/singleton.rs index d72b19ed3..adf339ed9 100644 --- a/salsa-2022-tests/tests/singleton.rs +++ b/tests/singleton.rs @@ -4,7 +4,8 @@ use expect_test::expect; use salsa::DebugWithDb; -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use test_log::test; diff --git a/salsa-2022-tests/tests/specify-only-works-if-the-key-is-created-in-the-current-query.rs b/tests/specify-only-works-if-the-key-is-created-in-the-current-query.rs similarity index 100% rename from salsa-2022-tests/tests/specify-only-works-if-the-key-is-created-in-the-current-query.rs rename to tests/specify-only-works-if-the-key-is-created-in-the-current-query.rs diff --git a/salsa-2022-tests/tests/specify_tracked_fn_in_rev_1_but_not_2.rs b/tests/specify_tracked_fn_in_rev_1_but_not_2.rs similarity index 99% rename from salsa-2022-tests/tests/specify_tracked_fn_in_rev_1_but_not_2.rs rename to tests/specify_tracked_fn_in_rev_1_but_not_2.rs index 998815692..a96e65057 100644 --- a/salsa-2022-tests/tests/specify_tracked_fn_in_rev_1_but_not_2.rs +++ b/tests/specify_tracked_fn_in_rev_1_but_not_2.rs @@ -3,7 +3,8 @@ use expect_test::expect; use salsa::DebugWithDb; -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use test_log::test; #[salsa::jar(db = Db)] diff --git a/salsa-2022-tests/tests/tracked-struct-id-field-bad-eq.rs b/tests/tracked-struct-id-field-bad-eq.rs similarity index 100% rename from salsa-2022-tests/tests/tracked-struct-id-field-bad-eq.rs rename to tests/tracked-struct-id-field-bad-eq.rs diff --git a/salsa-2022-tests/tests/tracked-struct-id-field-bad-hash.rs b/tests/tracked-struct-id-field-bad-hash.rs similarity index 100% rename from salsa-2022-tests/tests/tracked-struct-id-field-bad-hash.rs rename to tests/tracked-struct-id-field-bad-hash.rs diff --git a/salsa-2022-tests/tests/tracked-struct-unchanged-in-new-rev.rs b/tests/tracked-struct-unchanged-in-new-rev.rs similarity index 100% rename from salsa-2022-tests/tests/tracked-struct-unchanged-in-new-rev.rs rename to tests/tracked-struct-unchanged-in-new-rev.rs diff --git a/salsa-2022-tests/tests/tracked-struct-value-field-bad-eq.rs b/tests/tracked-struct-value-field-bad-eq.rs similarity index 98% rename from salsa-2022-tests/tests/tracked-struct-value-field-bad-eq.rs rename to tests/tracked-struct-value-field-bad-eq.rs index e6665d254..cecfcbbdc 100644 --- a/salsa-2022-tests/tests/tracked-struct-value-field-bad-eq.rs +++ b/tests/tracked-struct-value-field-bad-eq.rs @@ -4,7 +4,8 @@ use expect_test::expect; use salsa::DebugWithDb; -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use test_log::test; #[salsa::jar(db = Db)] diff --git a/salsa-2022-tests/tests/tracked-struct-value-field-not-eq.rs b/tests/tracked-struct-value-field-not-eq.rs similarity index 100% rename from salsa-2022-tests/tests/tracked-struct-value-field-not-eq.rs rename to tests/tracked-struct-value-field-not-eq.rs diff --git a/salsa-2022-tests/tests/tracked_fn_constant.rs b/tests/tracked_fn_constant.rs similarity index 100% rename from salsa-2022-tests/tests/tracked_fn_constant.rs rename to tests/tracked_fn_constant.rs diff --git a/salsa-2022-tests/tests/tracked_fn_on_input.rs b/tests/tracked_fn_on_input.rs similarity index 100% rename from salsa-2022-tests/tests/tracked_fn_on_input.rs rename to tests/tracked_fn_on_input.rs diff --git a/salsa-2022-tests/tests/tracked_fn_on_tracked.rs b/tests/tracked_fn_on_tracked.rs similarity index 100% rename from salsa-2022-tests/tests/tracked_fn_on_tracked.rs rename to tests/tracked_fn_on_tracked.rs diff --git a/salsa-2022-tests/tests/tracked_fn_on_tracked_specify.rs b/tests/tracked_fn_on_tracked_specify.rs similarity index 100% rename from salsa-2022-tests/tests/tracked_fn_on_tracked_specify.rs rename to tests/tracked_fn_on_tracked_specify.rs diff --git a/salsa-2022-tests/tests/tracked_fn_read_own_entity.rs b/tests/tracked_fn_read_own_entity.rs similarity index 98% rename from salsa-2022-tests/tests/tracked_fn_read_own_entity.rs rename to tests/tracked_fn_read_own_entity.rs index 0821c839c..0aa354dcc 100644 --- a/salsa-2022-tests/tests/tracked_fn_read_own_entity.rs +++ b/tests/tracked_fn_read_own_entity.rs @@ -2,7 +2,8 @@ //! compiles and executes successfully. use expect_test::expect; -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use test_log::test; #[salsa::jar(db = Db)] diff --git a/salsa-2022-tests/tests/tracked_fn_read_own_specify.rs b/tests/tracked_fn_read_own_specify.rs similarity index 97% rename from salsa-2022-tests/tests/tracked_fn_read_own_specify.rs rename to tests/tracked_fn_read_own_specify.rs index 8633572e5..5785464af 100644 --- a/salsa-2022-tests/tests/tracked_fn_read_own_specify.rs +++ b/tests/tracked_fn_read_own_specify.rs @@ -1,6 +1,7 @@ use expect_test::expect; use salsa::{Database as SalsaDatabase, DebugWithDb}; -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; #[salsa::jar(db = Db)] struct Jar(MyInput, MyTracked<'_>, tracked_fn, tracked_fn_extra); diff --git a/salsa-2022-tests/tests/tracked_method.rs b/tests/tracked_method.rs similarity index 100% rename from salsa-2022-tests/tests/tracked_method.rs rename to tests/tracked_method.rs diff --git a/salsa-2022-tests/tests/tracked_struct_db1_lt.rs b/tests/tracked_struct_db1_lt.rs similarity index 94% rename from salsa-2022-tests/tests/tracked_struct_db1_lt.rs rename to tests/tracked_struct_db1_lt.rs index d71d0cd2a..b5214f1bb 100644 --- a/salsa-2022-tests/tests/tracked_struct_db1_lt.rs +++ b/tests/tracked_struct_db1_lt.rs @@ -1,7 +1,8 @@ //! Test that tracked structs with lifetimes not named `'db` //! compile successfully. -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use test_log::test; diff --git a/salsa-2022-tests/tests/tracked_with_intern.rs b/tests/tracked_with_intern.rs similarity index 100% rename from salsa-2022-tests/tests/tracked_with_intern.rs rename to tests/tracked_with_intern.rs diff --git a/salsa-2022-tests/tests/tracked_with_struct_db.rs b/tests/tracked_with_struct_db.rs similarity index 100% rename from salsa-2022-tests/tests/tracked_with_struct_db.rs rename to tests/tracked_with_struct_db.rs diff --git a/salsa-2022-tests/tests/warnings/main.rs b/tests/warnings/main.rs similarity index 100% rename from salsa-2022-tests/tests/warnings/main.rs rename to tests/warnings/main.rs diff --git a/salsa-2022-tests/tests/warnings/needless_borrow.rs b/tests/warnings/needless_borrow.rs similarity index 100% rename from salsa-2022-tests/tests/warnings/needless_borrow.rs rename to tests/warnings/needless_borrow.rs diff --git a/salsa-2022-tests/tests/warnings/needless_lifetimes.rs b/tests/warnings/needless_lifetimes.rs similarity index 100% rename from salsa-2022-tests/tests/warnings/needless_lifetimes.rs rename to tests/warnings/needless_lifetimes.rs diff --git a/salsa-2022-tests/tests/warnings/unused_variable_db.rs b/tests/warnings/unused_variable_db.rs similarity index 100% rename from salsa-2022-tests/tests/warnings/unused_variable_db.rs rename to tests/warnings/unused_variable_db.rs From 5ba40cf36c7ee5d06f00ba1011274b0c7d55c22c Mon Sep 17 00:00:00 2001 From: Matthijs Brobbel Date: Tue, 18 Jun 2024 09:52:37 +0200 Subject: [PATCH 3/7] Fix CI and examples --- .github/workflows/test.yml | 97 ++++++++++++++------------- Cargo.toml | 18 +++-- examples/calc/Cargo.toml | 15 ----- examples/calc/{src => }/compile.rs | 0 examples/calc/{src => }/db.rs | 0 examples/calc/{src => }/ir.rs | 0 examples/calc/{src => }/main.rs | 0 examples/calc/{src => }/parser.rs | 0 examples/calc/{src => }/type_check.rs | 0 examples/lazy-input/Cargo.toml | 14 ---- examples/lazy-input/{src => }/main.rs | 2 +- 11 files changed, 62 insertions(+), 84 deletions(-) delete mode 100644 examples/calc/Cargo.toml rename examples/calc/{src => }/compile.rs (100%) rename examples/calc/{src => }/db.rs (100%) rename examples/calc/{src => }/ir.rs (100%) rename examples/calc/{src => }/main.rs (100%) rename examples/calc/{src => }/parser.rs (100%) rename examples/calc/{src => }/type_check.rs (100%) delete mode 100644 examples/lazy-input/Cargo.toml rename examples/lazy-input/{src => }/main.rs (99%) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e87fb3770..e8ca0de38 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -3,15 +3,15 @@ name: Test on: push: branches: - - master - - staging - - trying + - master + - staging + - trying pull_request: paths: - - '**.rs' - - '**/Cargo.*' - - '.github/workflows/**.yml' - - 'salsa-2022-tests/tests/compile-fail/**.stderr' + - "**.rs" + - "**/Cargo.*" + - ".github/workflows/**.yml" + - "tests/compile-fail/**.stderr" merge_group: jobs: @@ -20,52 +20,52 @@ jobs: strategy: matrix: rust: - - stable - - beta + - stable + - beta experimental: - - false + - false include: - - rust: nightly - experimental: true + - rust: nightly + experimental: true continue-on-error: ${{ matrix.experimental }} runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Setup Rust toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ matrix.rust }} - components: rustfmt, clippy - default: true - - name: Format - uses: actions-rs/cargo@v1 - with: - command: fmt - args: -- --check - - name: Clippy - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --all-features --all-targets --all - - name: Test - uses: actions-rs/cargo@v1 - with: - command: test - args: --all-features --all-targets --all - - name: Test docs - uses: actions-rs/cargo@v1 - with: - command: test - args: --doc - - name: Check (without default features) - uses: actions-rs/cargo@v1 - with: - command: check - args: --no-default-features + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Rust toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ matrix.rust }} + components: rustfmt, clippy + default: true + - name: Format + uses: actions-rs/cargo@v1 + with: + command: fmt + args: -- --check + - name: Clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --all-features --all-targets --workspace + - name: Test + uses: actions-rs/cargo@v1 + with: + command: test + args: --all-features --all-targets --workspace + - name: Test docs + uses: actions-rs/cargo@v1 + with: + command: test + args: --doc + - name: Check (without default features) + uses: actions-rs/cargo@v1 + with: + command: check + args: --no-default-features miri: - name: "Miri" + name: Miri runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -75,4 +75,7 @@ jobs: rustup override set nightly cargo miri setup - name: Test with Miri - run: cargo miri test --no-fail-fast -p salsa-2022 -p salsa-2022-tests -p calc -p lazy-input + run: cargo miri test --no-fail-fast + - name: Run examples with Miri + run: | + cargo miri run --example calc diff --git a/Cargo.toml b/Cargo.toml index ee39217e8..f1a2d7a8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,25 +8,29 @@ repository = "https://github.com/salsa-rs/salsa" description = "A generic framework for on-demand, incrementalized computation (experimental)" [dependencies] +arc-swap = "1.6.0" crossbeam = "0.8.1" +crossbeam-utils = { version = "0.8", default-features = false } dashmap = "5.3.4" -rustc-hash = "1.1.0" -indexmap = "2" hashlink = "0.8.0" -arc-swap = "1.6.0" -crossbeam-utils = { version = "0.8", default-features = false } +indexmap = "2" log = "0.4.5" parking_lot = "0.12.1" -smallvec = "1.0.0" +rustc-hash = "1.1.0" salsa-macros = { path = "components/salsa-macros" } +smallvec = "1.0.0" [dev-dependencies] +derive-new = "0.5.9" +env_logger = "*" expect-test = "1.4.0" +eyre = "0.6.8" +notify-debouncer-mini = "0.2.1" +ordered-float = "3.0" parking_lot = "0.12.1" +rustversion = "1.0" test-log = "0.2.11" -env_logger = "*" trybuild = "1.0" -rustversion = "1.0" [workspace] members = ["components/salsa-macros"] diff --git a/examples/calc/Cargo.toml b/examples/calc/Cargo.toml deleted file mode 100644 index 6301adcee..000000000 --- a/examples/calc/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "calc" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -derive-new = "0.5.9" -salsa = { path = "../../components/salsa-2022", package = "salsa-2022" } -ordered-float = "3.0" -test-log = { version = "0.2.15", features = ["trace"] } - -[dev-dependencies] -expect-test = "1.4.0" diff --git a/examples/calc/src/compile.rs b/examples/calc/compile.rs similarity index 100% rename from examples/calc/src/compile.rs rename to examples/calc/compile.rs diff --git a/examples/calc/src/db.rs b/examples/calc/db.rs similarity index 100% rename from examples/calc/src/db.rs rename to examples/calc/db.rs diff --git a/examples/calc/src/ir.rs b/examples/calc/ir.rs similarity index 100% rename from examples/calc/src/ir.rs rename to examples/calc/ir.rs diff --git a/examples/calc/src/main.rs b/examples/calc/main.rs similarity index 100% rename from examples/calc/src/main.rs rename to examples/calc/main.rs diff --git a/examples/calc/src/parser.rs b/examples/calc/parser.rs similarity index 100% rename from examples/calc/src/parser.rs rename to examples/calc/parser.rs diff --git a/examples/calc/src/type_check.rs b/examples/calc/type_check.rs similarity index 100% rename from examples/calc/src/type_check.rs rename to examples/calc/type_check.rs diff --git a/examples/lazy-input/Cargo.toml b/examples/lazy-input/Cargo.toml deleted file mode 100644 index bf7781569..000000000 --- a/examples/lazy-input/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "lazy-input" -version = "0.1.0" -edition = "2021" - -[dependencies] -crossbeam-channel = "0.5.6" -dashmap = "5.4.0" -eyre = "0.6.8" -notify-debouncer-mini = "0.2.1" -salsa = { path = "../../components/salsa-2022", package = "salsa-2022" } - -[dev-dependencies] -expect-test = "1.4.0" diff --git a/examples/lazy-input/src/main.rs b/examples/lazy-input/main.rs similarity index 99% rename from examples/lazy-input/src/main.rs rename to examples/lazy-input/main.rs index d31c25397..ff305d5e2 100644 --- a/examples/lazy-input/src/main.rs +++ b/examples/lazy-input/main.rs @@ -1,6 +1,6 @@ use std::{path::PathBuf, sync::Mutex, time::Duration}; -use crossbeam_channel::{unbounded, Sender}; +use crossbeam::channel::{unbounded, Sender}; use dashmap::{mapref::entry::Entry, DashMap}; use eyre::{eyre, Context, Report, Result}; use notify_debouncer_mini::{ From b3838b1ca780a7328e9c51f9defb8cd4f4d79319 Mon Sep 17 00:00:00 2001 From: Matthijs Brobbel Date: Tue, 18 Jun 2024 09:59:17 +0200 Subject: [PATCH 4/7] Update book --- book/_redirects | 2 - book/netlify.sh | 31 +-------------- book/src/caveat.md | 5 --- book/src/common_patterns/on_demand_inputs.md | 2 - book/src/overview.md | 4 +- book/src/plumbing.md | 18 ++++----- book/src/plumbing/database_and_runtime.md | 20 +++++----- book/src/plumbing/jars_and_ingredients.md | 40 ++++++++++---------- book/src/plumbing/tracked_structs.md | 14 +++---- book/src/tutorial.md | 2 - components/salsa-macros/Cargo.toml | 2 +- 11 files changed, 46 insertions(+), 94 deletions(-) delete mode 100644 book/_redirects delete mode 100644 book/src/caveat.md diff --git a/book/_redirects b/book/_redirects deleted file mode 100644 index 3db106781..000000000 --- a/book/_redirects +++ /dev/null @@ -1,2 +0,0 @@ -# Redirects from what the browser requests to what we serve -/ /salsa2022 \ No newline at end of file diff --git a/book/netlify.sh b/book/netlify.sh index 7f6c3f264..01959d07d 100755 --- a/book/netlify.sh +++ b/book/netlify.sh @@ -14,33 +14,4 @@ curl -L https://github.com/Michael-F-Bryan/mdbook-linkcheck/releases/download/v$ unzip mdbook-linkcheck.v$MDBOOK_LINKCHECK_VERSION.x86_64-unknown-linux-gnu.zip -d ~/.cargo/bin chmod +x ~/.cargo/bin/mdbook-linkcheck -# ====================================================================== -# The following script automates the deployment of both the latest and a -# specified older version of the 'salsa' documentation using mdbook - -# Store the current branch or commit -original_branch=$(git rev-parse --abbrev-ref HEAD) -if [ "$original_branch" == "HEAD" ]; then - original_branch=$(git rev-parse HEAD) -fi - -mkdir -p versions # Create a root directory for all versions - -# Declare an associative array to map commits to custom version directory names -declare -A commit_to_version=( ["$original_branch"]="salsa2022" ["754eea8b5f8a31b1100ba313d59e41260b494225"]="salsa" ) - -# Loop over the keys (commit hashes or branch names) in the associative array -for commit in "${!commit_to_version[@]}"; do - git checkout $commit - mdbook build - version_dir="versions/${commit_to_version[$commit]}" - mkdir -p $version_dir - mv book/html/* $version_dir - rm -rf book -done - -# Return to the original branch or commit -git checkout $original_branch - -# Copy _redirects to the root directory -cp _redirects versions +mdbook build diff --git a/book/src/caveat.md b/book/src/caveat.md deleted file mode 100644 index 156de317c..000000000 --- a/book/src/caveat.md +++ /dev/null @@ -1,5 +0,0 @@ -> ⚠️ **IN-PROGRESS VERSION OF SALSA.** ⚠️ -> -> This page describes the unreleased "Salsa 2022" version, which is a major departure from older versions of salsa. The code here works but is only available on github and from the `salsa-2022` crate. -> -> If you are looking for the older version of salsa, simply visit [this link](https://salsa-rs.netlify.app/salsa) diff --git a/book/src/common_patterns/on_demand_inputs.md b/book/src/common_patterns/on_demand_inputs.md index 0fc3f50c5..6c37e7c68 100644 --- a/book/src/common_patterns/on_demand_inputs.md +++ b/book/src/common_patterns/on_demand_inputs.md @@ -1,7 +1,5 @@ # On-Demand (Lazy) Inputs -{{#include ../caveat.md}} - Salsa inputs work best if you can easily provide all of the inputs upfront. However sometimes the set of inputs is not known beforehand. diff --git a/book/src/overview.md b/book/src/overview.md index 2909aed5d..2c28146a0 100644 --- a/book/src/overview.md +++ b/book/src/overview.md @@ -1,7 +1,5 @@ # Salsa overview -{{#include caveat.md}} - This page contains a brief overview of the pieces of a Salsa program. For a more detailed look, check out the [tutorial](./tutorial.md), which walks through the creation of an entire project end-to-end. @@ -151,7 +149,7 @@ Tracked functions can return any clone-able type. A clone is required since, whe **Tracked structs** are intermediate structs created during your computation. Like inputs, their fields are stored inside the database, and the struct itself just wraps an id. Unlike inputs, they can only be created inside a tracked function, and their fields can never change once they are created (until the next revision, at least). -Getter methods are provided to read the fields, but there are no setter methods. +Getter methods are provided to read the fields, but there are no setter methods. Example: ```rust diff --git a/book/src/plumbing.md b/book/src/plumbing.md index d5ee58c92..f6eaab739 100644 --- a/book/src/plumbing.md +++ b/book/src/plumbing.md @@ -1,7 +1,5 @@ # Plumbing -{{#include caveat.md}} - This chapter documents the code that salsa generates and its "inner workings". We refer to this as the "plumbing". @@ -9,11 +7,11 @@ We refer to this as the "plumbing". The plumbing section is broken up into chapters: -* The [jars and ingredients](./plumbing/jars_and_ingredients.md) covers how each salsa item (like a tracked function) specifies what data it needs and runtime, and how links between items work. -* The [database and runtime](./plumbing/database_and_runtime.md) covers the data structures that are used at runtime to coordinate workers, trigger cancellation, track which functions are active and what dependencies they have accrued, and so forth. -* The [query operations](./plumbing/query_ops.md) chapter describes how the major operations on function ingredients work. This text was written for an older version of salsa but the logic is the same: - * The [maybe changed after](./plumbing/maybe_changed_after.md) operation determines when a memoized value for a tracked function is out of date. - * The [fetch](./plumbing/fetch.md) operation computes the most recent value. - * The [derived queries flowchart](./plumbing/derived_flowchart.md) depicts the logic in flowchart form. - * The [cycle handling](./plumbing/cycles.md) handling chapter describes what happens when cycles occur. -* The [terminology](./plumbing/terminology.md) section describes various words that appear throughout. \ No newline at end of file +- The [jars and ingredients](./plumbing/jars_and_ingredients.md) covers how each salsa item (like a tracked function) specifies what data it needs and runtime, and how links between items work. +- The [database and runtime](./plumbing/database_and_runtime.md) covers the data structures that are used at runtime to coordinate workers, trigger cancellation, track which functions are active and what dependencies they have accrued, and so forth. +- The [query operations](./plumbing/query_ops.md) chapter describes how the major operations on function ingredients work. This text was written for an older version of salsa but the logic is the same: + - The [maybe changed after](./plumbing/maybe_changed_after.md) operation determines when a memoized value for a tracked function is out of date. + - The [fetch](./plumbing/fetch.md) operation computes the most recent value. + - The [derived queries flowchart](./plumbing/derived_flowchart.md) depicts the logic in flowchart form. + - The [cycle handling](./plumbing/cycles.md) handling chapter describes what happens when cycles occur. +- The [terminology](./plumbing/terminology.md) section describes various words that appear throughout. diff --git a/book/src/plumbing/database_and_runtime.md b/book/src/plumbing/database_and_runtime.md index 06f0636f2..cc889c546 100644 --- a/book/src/plumbing/database_and_runtime.md +++ b/book/src/plumbing/database_and_runtime.md @@ -13,8 +13,8 @@ struct MyDatabase { This data is divided into two categories: -* Salsa-governed storage, contained in the `Storage` field. This data is mandatory. -* Other fields (like `maybe_other_fields`) defined by the user. This can be anything. This allows for you to give access to special resources or whatever. +- Salsa-governed storage, contained in the `Storage` field. This data is mandatory. +- Other fields (like `maybe_other_fields`) defined by the user. This can be anything. This allows for you to give access to special resources or whatever. ## Parallel handles @@ -28,10 +28,10 @@ The `Snapshot` method returns a `Snapshot` type, which prevents these clones The salsa `Storage` struct contains all the data that salsa itself will use and work with. There are three key bits of data: -* The `Shared` struct, which contains the data stored across all snapshots. This is primarily the ingredients described in the [jars and ingredients chapter](./jars_and_ingredients.md), but it also contains some synchronization information (a cond var). This is used for cancellation, as described below. - * The data in the `Shared` struct is only shared across threads when other threads are active. Some operations, like mutating an input, require an `&mut` handle to the `Shared` struct. This is obtained by using the `Arc::get_mut` methods; obviously this is only possible when all snapshots and threads have ceased executing, since there must be a single handle to the `Arc`. -* The `Routes` struct, which contains the information to find any particular ingredient -- this is also shared across all handles, and its construction is also described in the [jars and ingredients chapter](./jars_and_ingredients.md). The routes are separated out from the `Shared` struct because they are truly immutable at all times, and we want to be able to hold a handle to them while getting `&mut` access to the `Shared` struct. -* The `Runtime` struct, which is specific to a particular database instance. It contains the data for a single active thread, along with some links to shared data of its own. +- The `Shared` struct, which contains the data stored across all snapshots. This is primarily the ingredients described in the [jars and ingredients chapter](./jars_and_ingredients.md), but it also contains some synchronization information (a cond var). This is used for cancellation, as described below. + - The data in the `Shared` struct is only shared across threads when other threads are active. Some operations, like mutating an input, require an `&mut` handle to the `Shared` struct. This is obtained by using the `Arc::get_mut` methods; obviously this is only possible when all snapshots and threads have ceased executing, since there must be a single handle to the `Arc`. +- The `Routes` struct, which contains the information to find any particular ingredient -- this is also shared across all handles, and its construction is also described in the [jars and ingredients chapter](./jars_and_ingredients.md). The routes are separated out from the `Shared` struct because they are truly immutable at all times, and we want to be able to hold a handle to them while getting `&mut` access to the `Shared` struct. +- The `Runtime` struct, which is specific to a particular database instance. It contains the data for a single active thread, along with some links to shared data of its own. ## Incrementing the revision counter and getting mutable access to the jars @@ -43,20 +43,20 @@ Each of the snapshots however onlys another handle on the `Arc` in `Storage` tha Whenever the user attempts to do an `&mut`-operation, such as modifying an input field, that needs to first cancel any parallel snapshots and wait for those parallel threads to finish. Once the snapshots have completed, we can use `Arc::get_mut` to get an `&mut` reference to the ingredient data. -This allows us to get `&mut` access without any unsafe code and +This allows us to get `&mut` access without any unsafe code and guarantees that we have successfully managed to cancel the other worker threads (or gotten ourselves into a deadlock). The code to acquire `&mut` access to the database is the `jars_mut` method: ```rust -{{#include ../../../components/salsa-2022/src/storage.rs:jars_mut}} +{{#include ../../../src/storage.rs:jars_mut}} ``` The key initial point is that it invokes `cancel_other_workers` before proceeding: ```rust -{{#include ../../../components/salsa-2022/src/storage.rs:cancel_other_workers}} +{{#include ../../../src/storage.rs:cancel_other_workers}} ``` ## The Salsa runtime @@ -68,5 +68,3 @@ It also tracks the current revision and information about when values with low o Basically, the ingredient structures store the "data at rest" -- like memoized values -- and things that are "per ingredient". The runtime stores the "active, in-progress" data, such as which queries are on the stack, and/or the dependencies accessed by the currently active query. - - diff --git a/book/src/plumbing/jars_and_ingredients.md b/book/src/plumbing/jars_and_ingredients.md index 2928bac31..108935cd5 100644 --- a/book/src/plumbing/jars_and_ingredients.md +++ b/book/src/plumbing/jars_and_ingredients.md @@ -1,10 +1,8 @@ # Jars and ingredients -{{#include ../caveat.md}} - This page covers how data is organized in Salsa and how links between Salsa items (e.g., dependency tracking) work. -## Salsa items and ingredients +## Salsa items and ingredients A **Salsa item** is some item annotated with a Salsa annotation that can be included in a jar. For example, a tracked function is a Salsa item: @@ -117,7 +115,7 @@ struct MyDatabase { ...the `salsa::db` macro would generate a `HasJars` impl that (among other things) contains `type Jars = (Jar1, ..., JarN)`: ```rust,ignore -{{#include ../../../components/salsa-2022-macros/src/db.rs:HasJars}} +{{#include ../../../components/salsa-macros/src/db.rs:HasJars}} ``` In turn, the `salsa::Storage` type ultimately contains a struct `Shared` that embeds `DB::Jars`, thus embedding all the data for each jar. @@ -131,7 +129,7 @@ This is a 32-bit number that identifies a particular ingredient from a particula ### Routes -In addition to an index, each ingredient in the database also has a corresponding *route*. +In addition to an index, each ingredient in the database also has a corresponding _route_. A route is a closure that, given a reference to the `DB::Jars` tuple, returns a `&dyn Ingredient` reference. The route table allows us to go from the `IngredientIndex` for a particular ingredient @@ -145,7 +143,7 @@ A `DatabaseKeyIndex` identifies a specific value stored in some specific ingredi It combines an [`IngredientIndex`] with a `key_index`, which is a `salsa::Id`: ```rust,ignore -{{#include ../../../components/salsa-2022/src/key.rs:DatabaseKeyIndex}} +{{#include ../../../src/key.rs:DatabaseKeyIndex}} ``` A `DependencyIndex` is similar, but the `key_index` is optional. @@ -153,11 +151,11 @@ This is used when we sometimes wish to refer to the ingredient as a whole, and n These kinds of indices are used to store connetions between ingredients. For example, each memoized value has to track its inputs. -Those inputs are stored as dependency indices. +Those inputs are stored as dependency indices. We can then do things like ask, "did this input change since revision R?" by -* using the ingredient index to find the route and get a `&dyn Ingredient` -* and then invoking the `maybe_changed_since` method on that trait object. +- using the ingredient index to find the route and get a `&dyn Ingredient` +- and then invoking the `maybe_changed_since` method on that trait object. ### `HasJarsDyn` @@ -166,23 +164,23 @@ The user's code always interacts with a `dyn crate::Db` value, where `crate::Db` Ideally, we would have `salsa::Database` extend `salsa::HasJars`, which is the main trait that gives access to the jars data. But we don't want to do that because `HasJars` defines an associated type `Jars`, and that would mean that every reference to `dyn crate::Db` would have to specify the jars type using something like `dyn crate::Db`. This would be unergonomic, but what's worse, it would actually be impossible: the final Jars type combines the jars from multiple crates, and so it is not known to any individual jar crate. -To workaround this, `salsa::Database` in fact extends *another* trait, `HasJarsDyn`, that doesn't reveal the `Jars` or ingredient types directly, but just has various method that can be performed on an ingredient, given its `IngredientIndex`. +To workaround this, `salsa::Database` in fact extends _another_ trait, `HasJarsDyn`, that doesn't reveal the `Jars` or ingredient types directly, but just has various method that can be performed on an ingredient, given its `IngredientIndex`. Traits like `Ingredient` require knowing the full `DB` type. If we had one function ingredient directly invoke a method on `Ingredient`, that would imply that it has to be fully generic and only instantiated at the final crate, when the full database type is available. We solve this via the `HasJarsDyn` trait. The `HasJarsDyn` trait exports a method that combines the "find ingredient, invoking method" steps into one method: ```rust,ignore aasaaasdfijjAasdfa -{{#include ../../../components/salsa-2022/src/storage.rs:HasJarsDyn}} +{{#include ../../../src/storage.rs:HasJarsDyn}} ``` So, technically, to check if an input has changed, an ingredient: -* Invokes `HasJarsDyn::maybe_changed_after` on the `dyn Database` -* The impl for this method (generated by `#[salsa::db]`): - * gets the route for the ingredient from the ingredient index - * uses the route to get a `&dyn Ingredient` - * invokes `maybe_changed_after` on that ingredient +- Invokes `HasJarsDyn::maybe_changed_after` on the `dyn Database` +- The impl for this method (generated by `#[salsa::db]`): + - gets the route for the ingredient from the ingredient index + - uses the route to get a `&dyn Ingredient` + - invokes `maybe_changed_after` on that ingredient ### Initializing the database @@ -190,7 +188,7 @@ The last thing to dicsuss is how the database is initialized. The `Default` implementation for `Storage` does the work: ```rust,ignore -{{#include ../../../components/salsa-2022/src/storage.rs:default}} +{{#include ../../../src/storage.rs:default}} ``` First, it creates an empty `Routes` instance. @@ -198,16 +196,16 @@ Then it invokes the `DB::create_jars` method. The implementation of this method is defined by the `#[salsa::db]` macro; it invokes `salsa::plumbing::create_jars_inplace` to allocate memory for the jars, and then invokes the `Jar::init_jar` method on each of the jars to initialize them: ```rust,ignore -{{#include ../../../components/salsa-2022-macros/src/db.rs:create_jars}} +{{#include ../../../components/salsa-macros/src/db.rs:create_jars}} ``` -This implementation for `init_jar` is generated by the `#[salsa::jar]` macro, and simply walks over the representative type for each salsa item and asks *it* to create its ingredients +This implementation for `init_jar` is generated by the `#[salsa::jar]` macro, and simply walks over the representative type for each salsa item and asks _it_ to create its ingredients ```rust,ignore -{{#include ../../../components/salsa-2022-macros/src/jar.rs:init_jar}} +{{#include ../../../components/salsa-macros/src/jar.rs:init_jar}} ``` The code to create the ingredients for any particular item is generated by their associated macros (e.g., `#[salsa::tracked]`, `#[salsa::input]`), but it always follows a particular structure. To create an ingredient, we first invoke `Routes::push`, which creates the routes to that ingredient and assigns it an `IngredientIndex`. We can then invoke a function such as `FunctionIngredient::new` to create the structure. -The *routes* to an ingredient are defined as closures that, given the `DB::Jars`, can find the data for a particular ingredient. +The _routes_ to an ingredient are defined as closures that, given the `DB::Jars`, can find the data for a particular ingredient. diff --git a/book/src/plumbing/tracked_structs.md b/book/src/plumbing/tracked_structs.md index 8fbf46a9d..15bdf4217 100644 --- a/book/src/plumbing/tracked_structs.md +++ b/book/src/plumbing/tracked_structs.md @@ -20,11 +20,11 @@ contains both the field values but also the revisions when they last changed val ## Each tracked struct has a globally unique id -This will begin by creating a *globally unique, 32-bit id* for the tracked struct. It is created by interning a combination of +This will begin by creating a _globally unique, 32-bit id_ for the tracked struct. It is created by interning a combination of -* the currently executing query; -* a u64 hash of the `#[id]` fields; -* a *disambiguator* that makes this hash unique within the current query. i.e., when a query starts executing, it creates an empty map, and the first time a tracked struct with a given hash is created, it gets disambiguator 0. The next one will be given 1, etc. +- the currently executing query; +- a u64 hash of the `#[id]` fields; +- a _disambiguator_ that makes this hash unique within the current query. i.e., when a query starts executing, it creates an empty map, and the first time a tracked struct with a given hash is created, it gets disambiguator 0. The next one will be given 1, etc. ## Each tracked struct has a `ValueStruct` storing its data @@ -32,10 +32,10 @@ The struct and field ingredients share access to a hashmap that maps each field id to a value struct: ```rust,ignore -{{#include ../../../components/salsa-2022/src/tracked_struct.rs:ValueStruct}} +{{#include ../../../src/tracked_struct.rs:ValueStruct}} ``` -The value struct stores the values of the fields but also the revisions when +The value struct stores the values of the fields but also the revisions when that field last changed. Each time the struct is recreated in a new revision, the old and new values for its fields are compared and a new revision is created. @@ -46,5 +46,5 @@ but also various important operations such as extracting the hashable id fields and updating the "revisions" to track when a field last changed: ```rust,ignore -{{#include ../../../components/salsa-2022/src/tracked_struct.rs:Configuration}} +{{#include ../../../src/tracked_struct.rs:Configuration}} ``` diff --git a/book/src/tutorial.md b/book/src/tutorial.md index b34393102..b59c5471e 100644 --- a/book/src/tutorial.md +++ b/book/src/tutorial.md @@ -1,7 +1,5 @@ # Tutorial: calc -{{#include caveat.md}} - This tutorial walks through an end-to-end example of using Salsa. It does not assume you know anything about salsa, but reading the [overview](./overview.md) first is probably a good idea to get familiar with the basic concepts. diff --git a/components/salsa-macros/Cargo.toml b/components/salsa-macros/Cargo.toml index 5aa058044..97b8578ca 100644 --- a/components/salsa-macros/Cargo.toml +++ b/components/salsa-macros/Cargo.toml @@ -11,9 +11,9 @@ description = "Procedural macros for the salsa crate" proc-macro = true [dependencies] +eyre = "0.6.5" heck = "0.4" proc-macro2 = "1.0" quote = "1.0" -eyre = "0.6.5" syn = { version = "2.0.64", features = ["full", "visit-mut"] } synstructure = "0.13.1" From 0f2d5a516cbba8e89116735ff8c7e07c4e2b7918 Mon Sep 17 00:00:00 2001 From: Matthijs Brobbel Date: Tue, 18 Jun 2024 10:03:53 +0200 Subject: [PATCH 5/7] More book updates and a compile fail test fix --- book/src/common_patterns/on_demand_inputs.md | 6 +-- book/src/tutorial/accumulators.md | 4 +- book/src/tutorial/db.md | 8 ++-- book/src/tutorial/debug.md | 6 +-- book/src/tutorial/ir.md | 38 +++++++-------- book/src/tutorial/jar.md | 12 ++--- book/src/tutorial/parser.md | 13 +++--- .../compile-fail/singleton_only_for_input.rs | 13 ++---- .../singleton_only_for_input.stderr | 46 ++++++++++++------- 9 files changed, 78 insertions(+), 68 deletions(-) diff --git a/book/src/common_patterns/on_demand_inputs.md b/book/src/common_patterns/on_demand_inputs.md index 6c37e7c68..c64edd18c 100644 --- a/book/src/common_patterns/on_demand_inputs.md +++ b/book/src/common_patterns/on_demand_inputs.md @@ -13,12 +13,12 @@ That is, when a query requests the text of a file for the first time: This is possible to achieve in salsa, by caching the inputs in your database structs and adding a method to the database trait to retrieve them out of this cache. -A complete, runnable file-watching example can be found in [the lazy-input example](https://github.com/salsa-rs/salsa/tree/master/examples-2022/lazy-input). +A complete, runnable file-watching example can be found in [the lazy-input example](https://github.com/salsa-rs/salsa/tree/master/examples/lazy-input). The setup looks roughly like this: ```rust,ignore -{{#include ../../../examples-2022/lazy-input/src/main.rs:db}} +{{#include ../../../examples/lazy-input/main.rs:db}} ``` - We declare a method on the `Db` trait that gives us a `File` input on-demand (it only requires a `&dyn Db` not a `&mut dyn Db`). @@ -31,5 +31,5 @@ Here we implement a simple driving loop, that recompiles the code whenever a fil You can use the logs to check that only the queries that could have changed are re-evaluated. ```rust,ignore -{{#include ../../../examples-2022/lazy-input/src/main.rs:main}} +{{#include ../../../examples/lazy-input/main.rs:main}} ``` diff --git a/book/src/tutorial/accumulators.md b/book/src/tutorial/accumulators.md index c0c03fb6b..0df25ed4e 100644 --- a/book/src/tutorial/accumulators.md +++ b/book/src/tutorial/accumulators.md @@ -10,7 +10,7 @@ Salsa defines a mechanism for managing this called an **accumulator**. In our case, we define an accumulator struct called `Diagnostics` in the `ir` module: ```rust -{{#include ../../../examples-2022/calc/src/ir.rs:diagnostic}} +{{#include ../../../examples/calc/ir.rs:diagnostic}} ``` Accumulator structs are always newtype structs with a single field, in this case of type `Diagnostic`. @@ -22,7 +22,7 @@ or any functions that they called The `Parser::report_error` method contains an example of pushing a diagnostic: ```rust -{{#include ../../../examples-2022/calc/src/parser.rs:report_error}} +{{#include ../../../examples/calc/parser.rs:report_error}} ``` To get the set of diagnostics produced by `parse_errors`, or any other memoized function, diff --git a/book/src/tutorial/db.md b/book/src/tutorial/db.md index c008b17b2..04a77e92e 100644 --- a/book/src/tutorial/db.md +++ b/book/src/tutorial/db.md @@ -7,10 +7,10 @@ the one which starts up the program, supplies the inputs, and relays the outputs In `calc`, the database struct is in the [`db`] module, and it looks like this: -[`db`]: https://github.com/salsa-rs/salsa/blob/master/examples-2022/calc/src/db.rs +[`db`]: https://github.com/salsa-rs/salsa/blob/master/examples/calc/db.rs ```rust -{{#include ../../../examples-2022/calc/src/db.rs:db_struct}} +{{#include ../../../examples/calc/db.rs:db_struct}} ``` The `#[salsa::db(...)]` attribute takes a list of all the jars to include. @@ -24,7 +24,7 @@ The `salsa::db` attribute autogenerates a bunch of impls for things like the `sa In addition to the struct itself, we must add an impl of `salsa::Database`: ```rust -{{#include ../../../examples-2022/calc/src/db.rs:db_impl}} +{{#include ../../../examples/calc/db.rs:db_impl}} ``` ## Implementing the `salsa::ParallelDatabase` trait @@ -32,7 +32,7 @@ In addition to the struct itself, we must add an impl of `salsa::Database`: If you want to permit accessing your database from multiple threads at once, then you also need to implement the `ParallelDatabase` trait: ```rust -{{#include ../../../examples-2022/calc/src/db.rs:par_db_impl}} +{{#include ../../../examples/calc/db.rs:par_db_impl}} ``` ## Implementing the traits for each jar diff --git a/book/src/tutorial/debug.md b/book/src/tutorial/debug.md index 512983f0c..c3a5253ff 100644 --- a/book/src/tutorial/debug.md +++ b/book/src/tutorial/debug.md @@ -25,7 +25,7 @@ The `DebugWithDb` trait is automatically derived for all `#[input]`, `#[interned For consistency, it is sometimes useful to have a `DebugWithDb` implementation even for types, like `Op`, that are just ordinary enums. You can do that like so: ```rust -{{#include ../../../examples-2022/calc/src/ir.rs:op_debug_impl}} +{{#include ../../../examples/calc/ir.rs:op_debug_impl}} ``` ## Writing the unit test @@ -34,11 +34,11 @@ Now that we have our `DebugWithDb` impls in place, we can write a simple unit te The `parse_string` function below creates a database, sets the source text, and then invokes the parser: ```rust -{{#include ../../../examples-2022/calc/src/parser.rs:parse_string}} +{{#include ../../../examples/calc/parser.rs:parse_string}} ``` Combined with the [`expect-test`](https://crates.io/crates/expect-test) crate, we can then write unit tests like this one: ```rust -{{#include ../../../examples-2022/calc/src/parser.rs:parse_print}} +{{#include ../../../examples/calc/parser.rs:parse_print}} ``` diff --git a/book/src/tutorial/ir.md b/book/src/tutorial/ir.md index 0105535af..9bd4f083c 100644 --- a/book/src/tutorial/ir.md +++ b/book/src/tutorial/ir.md @@ -9,9 +9,9 @@ now we are going to define them for real. In addition to regular Rust types, we will make use of various **Salsa structs**. A Salsa struct is a struct that has been annotated with one of the Salsa annotations: -* [`#[salsa::input]`](#input-structs), which designates the "base inputs" to your computation; -* [`#[salsa::tracked]`](#tracked-structs), which designate intermediate values created during your computation; -* [`#[salsa::interned]`](#interned-structs), which designate small values that are easy to compare for equality. +- [`#[salsa::input]`](#input-structs), which designates the "base inputs" to your computation; +- [`#[salsa::tracked]`](#tracked-structs), which designate intermediate values created during your computation; +- [`#[salsa::interned]`](#interned-structs), which designate small values that are easy to compare for equality. All Salsa structs store the actual values of their fields in the Salsa database. This permits us to track when the values of those fields change to figure out what work will need to be re-executed. @@ -23,7 +23,7 @@ You must also list the struct in the jar definition itself, or you will get erro ## Input structs -The first thing we will define is our **input**. +The first thing we will define is our **input**. Every Salsa program has some basic inputs that drive the rest of the computation. The rest of the program must be some deterministic function of those base inputs, such that when those inputs change, we can try to efficiently recompute the new result of that function. @@ -31,7 +31,7 @@ such that when those inputs change, we can try to efficiently recompute the new Inputs are defined as Rust structs with a `#[salsa::input]` annotation: ```rust -{{#include ../../../examples-2022/calc/src/ir.rs:input}} +{{#include ../../../examples/calc/ir.rs:input}} ``` In our compiler, we have just one simple input, the `SourceProgram`, which has a `text` field (the string). @@ -58,12 +58,12 @@ For an input, a `&mut db` reference is required, along with the values for each let source = SourceProgram::new(&mut db, "print 11 + 11".to_string()); ``` -You can read the value of the field with `source.text(&db)`, +You can read the value of the field with `source.text(&db)`, and you can set the value of the field with `source.set_text(&mut db, "print 11 * 2".to_string())`. ### Database revisions -Whenever a function takes an `&mut` reference to the database, +Whenever a function takes an `&mut` reference to the database, that means that it can only be invoked from outside the incrementalized part of your program, as explained in [the overview](../overview.md#goal-of-salsa). When you change the value of an input field, that increments a 'revision counter' in the database, @@ -73,12 +73,12 @@ When we talk about a "revision" of the database, we are referring to the state o ### Representing the parsed program Next we will define a **tracked struct**. -Whereas inputs represent the *start* of a computation, tracked structs represent intermediate values created during your computation. +Whereas inputs represent the _start_ of a computation, tracked structs represent intermediate values created during your computation. In this case, the parser is going to take in the `SourceProgram` struct that we saw and return a `Program` that represents the fully parsed program: ```rust -{{#include ../../../examples-2022/calc/src/ir.rs:program}} +{{#include ../../../examples/calc/ir.rs:program}} ``` Like with an input, the fields of a tracked struct are also stored in the database. @@ -90,9 +90,9 @@ then subsequent parts of the computation won't need to re-execute. Apart from the fields being immutable, the API for working with a tracked struct is quite similar to an input: -* You can create a new value by using `new`, but with a tracked struct, you only need an `&dyn` database, not `&mut` (e.g., `Program::new(&db, some_staements)`) -* You use a getter to read the value of a field, just like with an input (e.g., `my_func.statements(db)` to read the `statements` field). - * In this case, the field is tagged as `#[return_ref]`, which means that the getter will return a `&Vec`, instead of cloning the vector. +- You can create a new value by using `new`, but with a tracked struct, you only need an `&dyn` database, not `&mut` (e.g., `Program::new(&db, some_staements)`) +- You use a getter to read the value of a field, just like with an input (e.g., `my_func.statements(db)` to read the `statements` field). + - In this case, the field is tagged as `#[return_ref]`, which means that the getter will return a `&Vec`, instead of cloning the vector. ### The `'db` lifetime @@ -113,18 +113,18 @@ We will also use a tracked struct to represent each function: The `Function` struct is going to be created by the parser to represent each of the functions defined by the user: ```rust -{{#include ../../../examples-2022/calc/src/ir.rs:functions}} +{{#include ../../../examples/calc/ir.rs:functions}} ``` If we had created some `Function` instance `f`, for example, we might find that `the f.body` field changes because the user changed the definition of `f`. This would mean that we have to re-execute those parts of the code that depended on `f.body` -(but not those parts of the code that depended on the body of *other* functions). +(but not those parts of the code that depended on the body of _other_ functions). Apart from the fields being immutable, the API for working with a tracked struct is quite similar to an input: -* You can create a new value by using `new`, but with a tracked struct, you only need an `&dyn` database, not `&mut` (e.g., `Function::new(&db, some_name, some_args, some_body)`) -* You use a getter to read the value of a field, just like with an input (e.g., `my_func.args(db)` to read the `args` field). +- You can create a new value by using `new`, but with a tracked struct, you only need an `&dyn` database, not `&mut` (e.g., `Function::new(&db, some_name, some_args, some_body)`) +- You use a getter to read the value of a field, just like with an input (e.g., `my_func.args(db)` to read the `args` field). ### id fields @@ -136,7 +136,7 @@ For more details, see the [algorithm](../reference/algorithm.md) page of the ref ## Interned structs -The final kind of Salsa struct are *interned structs*. +The final kind of Salsa struct are _interned structs_. As with input and tracked structs, the data for an interned struct is stored in the database. Unlike those structs, if you intern the same data twice, you get back the **same integer**. @@ -146,7 +146,7 @@ it's also inefficient to have to compare them for equality via string comparison Therefore, we define two interned structs, `FunctionId` and `VariableId`, each with a single field that stores the string: ```rust -{{#include ../../../examples-2022/calc/src/ir.rs:interned_ids}} +{{#include ../../../examples/calc/ir.rs:interned_ids}} ``` When you invoke e.g. `FunctionId::new(&db, "my_string".to_string())`, you will get back a `FunctionId` that is just a newtype'd integer. @@ -172,7 +172,7 @@ while an interned value is in active use. We won't use any special "Salsa structs" for expressions and statements: ```rust -{{#include ../../../examples-2022/calc/src/ir.rs:statements_and_expressions}} +{{#include ../../../examples/calc/ir.rs:statements_and_expressions}} ``` Since statements and expressions are not tracked, this implies that we are only attempting to get incremental re-use at the granularity of functions -- diff --git a/book/src/tutorial/jar.md b/book/src/tutorial/jar.md index a7f663826..9f453ae92 100644 --- a/book/src/tutorial/jar.md +++ b/book/src/tutorial/jar.md @@ -19,24 +19,24 @@ This permits the crates to define private functions and other things that are me To define a jar struct, you create a tuple struct with the `#[salsa::jar]` annotation: ```rust -{{#include ../../../examples-2022/calc/src/main.rs:jar_struct}} +{{#include ../../../examples/calc/main.rs:jar_struct}} ``` Although it's not required, it's highly recommended to put the `jar` struct at the root of your crate, so that it can be referred to as `crate::Jar`. -All of the other Salsa annotations reference a jar struct, and they all default to the path `crate::Jar`. +All of the other Salsa annotations reference a jar struct, and they all default to the path `crate::Jar`. If you put the jar somewhere else, you will have to override that default. ## Defining the database trait -The `#[salsa::jar]` annotation also includes a `db = Db` field. +The `#[salsa::jar]` annotation also includes a `db = Db` field. The value of this field (normally `Db`) is the name of a trait that represents the database. -Salsa programs never refer *directly* to the database; instead, they take a `&dyn Db` argument. +Salsa programs never refer _directly_ to the database; instead, they take a `&dyn Db` argument. This allows for separate compilation, where you have a database that contains the data for two jars, but those jars don't depend on one another. The database trait for our `calc` crate is very simple: ```rust -{{#include ../../../examples-2022/calc/src/main.rs:jar_db}} +{{#include ../../../examples/calc/main.rs:jar_db}} ``` When you define a database trait like `Db`, the one thing that is required is that it must have a supertrait `salsa::DbWithJar`, @@ -57,7 +57,7 @@ a common choice is to write a blanket impl for any type that implements `DbWithJ and that's what we do here: ```rust -{{#include ../../../examples-2022/calc/src/main.rs:jar_db_impl}} +{{#include ../../../examples/calc/main.rs:jar_db_impl}} ``` ## Summary diff --git a/book/src/tutorial/parser.md b/book/src/tutorial/parser.md index b6056d51e..43e86f51d 100644 --- a/book/src/tutorial/parser.md +++ b/book/src/tutorial/parser.md @@ -17,12 +17,12 @@ We're going to focus only on the Salsa-related aspects. The starting point for the parser is the `parse_statements` function: ```rust -{{#include ../../../examples-2022/calc/src/parser.rs:parse_statements}} +{{#include ../../../examples/calc/parser.rs:parse_statements}} ``` This function is annotated as `#[salsa::tracked]`. That means that, when it is called, Salsa will track what inputs it reads as well as what value it returns. -The return value is *memoized*, +The return value is _memoized_, which means that if you call this function again without changing the inputs, Salsa will just clone the result rather than re-execute it. @@ -38,11 +38,11 @@ In the case of `parse_statements`, it directly reads `ProgramSource::text`, so i By choosing which functions to mark as `#[tracked]`, you control how much reuse you get. In our case, we're opting to mark the outermost parsing function as tracked, but not the inner ones. This means that if the input changes, we will always re-parse the entire input and re-create the resulting statements and so forth. -We'll see later that this *doesn't* mean we will always re-run the type checker and other parts of the compiler. +We'll see later that this _doesn't_ mean we will always re-run the type checker and other parts of the compiler. This trade-off makes sense because (a) parsing is very cheap, so the overhead of tracking and enabling finer-grained reuse doesn't pay off and because (b) since strings are just a big blob-o-bytes without any structure, it's rather hard to identify which parts of the IR need to be reparsed. -Some systems do choose to do more granular reparsing, often by doing a "first pass" over the string to give it a bit of structure, +Some systems do choose to do more granular reparsing, often by doing a "first pass" over the string to give it a bit of structure, e.g. to identify the functions, but deferring the parsing of the body of each function until later. Setting up a scheme like this is relatively easy in Salsa and uses the same principles that we will use later to avoid re-executing the type checker. @@ -63,11 +63,10 @@ It's generally better to structure tracked functions as functions of a single Sa ### The `return_ref` annotation -You may have noticed that `parse_statements` is tagged with `#[salsa::tracked(return_ref)]`. +You may have noticed that `parse_statements` is tagged with `#[salsa::tracked(return_ref)]`. Ordinarily, when you call a tracked function, the result you get back is cloned out of the database. The `return_ref` attribute means that a reference into the database is returned instead. So, when called, `parse_statements` will return an `&Vec` rather than cloning the `Vec`. This is useful as a performance optimization. -(You may recall the `return_ref` annotation from the [ir](./ir.md) section of the tutorial, +(You may recall the `return_ref` annotation from the [ir](./ir.md) section of the tutorial, where it was placed on struct fields, with roughly the same meaning.) - diff --git a/tests/compile-fail/singleton_only_for_input.rs b/tests/compile-fail/singleton_only_for_input.rs index ff0c2d05b..bb25abe9a 100644 --- a/tests/compile-fail/singleton_only_for_input.rs +++ b/tests/compile-fail/singleton_only_for_input.rs @@ -1,13 +1,14 @@ -//! Compile Singleton struct test: +//! Compile Singleton struct test: //! //! Singleton flags are only allowed for input structs. If applied on any other Salsa struct compilation must fail -use salsa_2022_tests::{HasLogger, Logger}; +mod common; +use common::{HasLogger, Logger}; use test_log::test; #[salsa::jar(db = Db)] -struct Jar(MyInput, MyTracked, Integers, create_tracked_structs ); +struct Jar(MyInput, MyTracked, Integers, create_tracked_structs); trait Db: salsa::DbWithJar + HasLogger {} @@ -16,13 +17,11 @@ struct MyInput { field: u32, } - #[salsa::tracked(singleton)] struct MyTracked { field: u32, } - #[salsa::tracked(singleton)] fn create_tracked_structs(db: &dyn Db, input: MyInput) -> Vec { (0..input.field(db)) @@ -33,7 +32,6 @@ fn create_tracked_structs(db: &dyn Db, input: MyInput) -> Vec { #[salsa::accumulator(singleton)] struct Integers(u32); - #[salsa::db(Jar)] #[derive(Default)] struct Database { @@ -51,5 +49,4 @@ impl HasLogger for Database { } } - -fn main() {} \ No newline at end of file +fn main() {} diff --git a/tests/compile-fail/singleton_only_for_input.stderr b/tests/compile-fail/singleton_only_for_input.stderr index 5be6a283e..dbfade51b 100644 --- a/tests/compile-fail/singleton_only_for_input.stderr +++ b/tests/compile-fail/singleton_only_for_input.stderr @@ -1,3 +1,12 @@ +error[E0583]: file not found for module `common` + --> tests/compile-fail/singleton_only_for_input.rs:5:1 + | +5 | mod common; + | ^^^^^^^^^^^ + | + = help: to create the module `common`, create file "$DIR/tests/compile-fail/common.rs" or "$DIR/tests/compile-fail/common/mod.rs" + = note: if there is a `mod common` elsewhere in the crate already, import it with `use crate::...` instead + error: `singleton` option not allowed here --> tests/compile-fail/singleton_only_for_input.rs:20:18 | @@ -5,45 +14,50 @@ error: `singleton` option not allowed here | ^^^^^^^^^ error: `singleton` option not allowed here - --> tests/compile-fail/singleton_only_for_input.rs:26:18 + --> tests/compile-fail/singleton_only_for_input.rs:25:18 | -26 | #[salsa::tracked(singleton)] +25 | #[salsa::tracked(singleton)] | ^^^^^^^^^ error: `singleton` option not allowed here - --> tests/compile-fail/singleton_only_for_input.rs:33:22 + --> tests/compile-fail/singleton_only_for_input.rs:32:22 | -33 | #[salsa::accumulator(singleton)] +32 | #[salsa::accumulator(singleton)] | ^^^^^^^^^ -error[E0432]: unresolved import `salsa_2022_tests` - --> tests/compile-fail/singleton_only_for_input.rs:5:5 +error[E0432]: unresolved imports `common::HasLogger`, `common::Logger` + --> tests/compile-fail/singleton_only_for_input.rs:6:14 + | +6 | use common::{HasLogger, Logger}; + | ^^^^^^^^^ ^^^^^^ no `Logger` in `common` + | | + | no `HasLogger` in `common` | -5 | use salsa_2022_tests::{HasLogger, Logger}; - | ^^^^^^^^^^^^^^^^ use of undeclared crate or module `salsa_2022_tests` + = help: consider importing this struct instead: + env_logger::Logger error[E0412]: cannot find type `MyTracked` in this scope - --> tests/compile-fail/singleton_only_for_input.rs:10:21 + --> tests/compile-fail/singleton_only_for_input.rs:11:21 | -10 | struct Jar(MyInput, MyTracked, Integers, create_tracked_structs ); +11 | struct Jar(MyInput, MyTracked, Integers, create_tracked_structs); | ^^^^^^^^^ not found in this scope error[E0412]: cannot find type `Integers` in this scope - --> tests/compile-fail/singleton_only_for_input.rs:10:32 + --> tests/compile-fail/singleton_only_for_input.rs:11:32 | -10 | struct Jar(MyInput, MyTracked, Integers, create_tracked_structs ); +11 | struct Jar(MyInput, MyTracked, Integers, create_tracked_structs); | ^^^^^^^^ not found in this scope error[E0412]: cannot find type `create_tracked_structs` in this scope - --> tests/compile-fail/singleton_only_for_input.rs:10:42 + --> tests/compile-fail/singleton_only_for_input.rs:11:42 | -10 | struct Jar(MyInput, MyTracked, Integers, create_tracked_structs ); +11 | struct Jar(MyInput, MyTracked, Integers, create_tracked_structs); | ^^^^^^^^^^^^^^^^^^^^^^ not found in this scope warning: unused import: `test_log::test` - --> tests/compile-fail/singleton_only_for_input.rs:7:5 + --> tests/compile-fail/singleton_only_for_input.rs:8:5 | -7 | use test_log::test; +8 | use test_log::test; | ^^^^^^^^^^^^^^ | = note: `#[warn(unused_imports)]` on by default From a0d7b0ee5523d83f17ed7bba6b2aead06b504973 Mon Sep 17 00:00:00 2001 From: Matthijs Brobbel Date: Tue, 18 Jun 2024 10:35:36 +0200 Subject: [PATCH 6/7] Fix book deployment --- book/netlify.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/book/netlify.sh b/book/netlify.sh index 01959d07d..8b83ce610 100755 --- a/book/netlify.sh +++ b/book/netlify.sh @@ -15,3 +15,5 @@ unzip mdbook-linkcheck.v$MDBOOK_LINKCHECK_VERSION.x86_64-unknown-linux-gnu.zip - chmod +x ~/.cargo/bin/mdbook-linkcheck mdbook build +mkdir versions +mv book/html/* versions From d0e9b79e6aeaca520b92b2ccc3559eab7bb9c0dd Mon Sep 17 00:00:00 2001 From: Matthijs Brobbel Date: Tue, 18 Jun 2024 10:39:26 +0200 Subject: [PATCH 7/7] Remove references to old selection example --- book/src/SUMMARY.md | 1 - book/src/common_patterns/selection.md | 78 --------------------------- book/src/tuning.md | 10 +--- 3 files changed, 2 insertions(+), 87 deletions(-) delete mode 100644 book/src/common_patterns/selection.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index c239bc4fe..675dd3e0b 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -19,7 +19,6 @@ - [Durability](./reference/durability.md) - [Algorithm](./reference/algorithm.md) - [Common patterns](./common_patterns.md) - - [Selection](./common_patterns/selection.md) - [On-demand (Lazy) inputs](./common_patterns/on_demand_inputs.md) - [Tuning](./tuning.md) - [Cycle handling](./cycles.md) diff --git a/book/src/common_patterns/selection.md b/book/src/common_patterns/selection.md deleted file mode 100644 index 9188b3248..000000000 --- a/book/src/common_patterns/selection.md +++ /dev/null @@ -1,78 +0,0 @@ -# Selection - -The "selection" (or "firewall") pattern is when you have a query Qsel that reads from some -other Qbase and extracts some small bit of information from Qbase that it returns. -In particular, Qsel does not combine values from other queries. In some sense, -then, Qsel is redundant -- you could have just extracted the information -the information from Qbase yourself, and done without the salsa machinery. But -Qsel serves a role in that it limits the amount of re-execution that is required -when Qbase changes. - -## Example: the base query - -For example, imagine that you have a query `parse` that parses the input text of a request -and returns a `ParsedResult`, which contains a header and a body: - -```rust,ignore -{{#include ../../../examples/selection/main.rs:request}} -``` - -## Example: a selecting query - -And now you have a number of derived queries that only look at the header. -For example, one might extract the "content-type' header: - -```rust,ignore -{{#include ../../../examples/selection/util1.rs:util1}} -``` - -## Why prefer a selecting query? - -This `content_type` query is an instance of the *selection* pattern. It only -"selects" a small bit of information from the `ParsedResult`. You might not have -made it a query at all, but instead made it a method on `ParsedResult`. - -But using a query for `content_type` has an advantage: now if there are downstream -queries that only depend on the `content_type` (or perhaps on other headers extracted -via a similar pattern), those queries will not have to be re-executed when the request -changes *unless* the content-type header changes. Consider the dependency graph: - -```text -request_text --> parse --> content_type --> (other queries) -``` - -When the `request_text` changes, we are always going to have to re-execute `parse`. -If that produces a new parsed result, we are *also* going to re-execute `content_type`. -But if the result of `content_type` has not changed, then we will *not* re-execute -the other queries. - -## More levels of selection - -In fact, in our example we might consider introducing another level of selection. -Instead of having `content_type` directly access the results of `parse`, it might be better -to insert a selecting query that just extracts the header: - -```rust,ignore -{{#include ../../../examples/selection/util2.rs:util2}} -``` - -This will result in a dependency graph like so: - -```text -request_text --> parse --> header --> content_type --> (other queries) -``` - -The advantage of this is that changes that only effect the "body" or -only consume small parts of the request will -not require us to re-execute `content_type` at all. This would be particularly -valuable if there are a lot of dependent headers. - -## A note on cloning and efficiency - -In this example, we used common Rust types like `Vec` and `String`, -and we cloned them quite frequently. This will work just fine in Salsa, -but it may not be the most efficient choice. This is because each clone -is going to produce a deep copy of the result. As a simple fix, you -might convert your data structures to use `Arc` (e.g., `Arc>`), -which makes cloning cheap. - diff --git a/book/src/tuning.md b/book/src/tuning.md index 3f313ab4c..05b0a8d5b 100644 --- a/book/src/tuning.md +++ b/book/src/tuning.md @@ -25,14 +25,9 @@ Interning is especially useful for queries that involve nested, tree-like data structures. See: -- The [`compiler` example](https://github.com/salsa-rs/salsa/blob/master/examples/compiler/main.rs), -which uses interning. - -## Granularity of Incrementality -See: -- [common patterns: selection](./common_patterns/selection.md) and -- The [`selection` example](https://github.com/salsa-rs/salsa/blob/master/examples/selection/main.rs) +- The [`compiler` example](https://github.com/salsa-rs/salsa/blob/master/examples/compiler/main.rs), + which uses interning. ## Cancellation @@ -45,4 +40,3 @@ salsa won't be able to cancel it automatically. You may wish to check for cancel by invoking `db.unwind_if_cancelled()`. For more details on cancellation, see the tests for cancellation behavior in the Salsa repo. -