diff --git a/.gitignore b/.gitignore index e9a4f001cc9..65c832f25bd 100644 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,7 @@ priv/mibs/EJABBERD-MIB.bin ebin/ejabberd.app rel/configure.vars.config rel/vars.config +rel/vars-toml.config compile_commands.json # Compilation/test ephemera *.d diff --git a/Makefile b/Makefile index ac1293f1ec0..b162fdf2548 100644 --- a/Makefile +++ b/Makefile @@ -15,6 +15,7 @@ clean: -rm -rf _build -rm rel/configure.vars.config -rm rel/vars.config + -rm rel/vars-toml.config # REBAR_CT_EXTRA_ARGS comes from a test runner ct: @@ -25,7 +26,7 @@ ct: eunit: @$(RUN) $(REBAR) eunit -rel: certs configure.out rel/vars.config +rel: certs configure.out rel/vars.config rel/vars-toml.config . ./configure.out && $(REBAR) as prod release shell: certs etc/mongooseim.cfg @@ -41,6 +42,9 @@ rock: rel/vars.config: rel/vars.config.in rel/configure.vars.config cat $^ > $@ +rel/vars-toml.config: rel/vars-toml.config.in rel/configure.vars.config + cat $^ > $@ + ## Don't allow these files to go out of sync! configure.out rel/configure.vars.config: ./tools/configure with-all without-jingle-sip @@ -54,7 +58,7 @@ devrel: $(DEVNODES) print_devnodes: @echo $(DEVNODES) -$(DEVNODES): certs configure.out rel/vars.config +$(DEVNODES): certs configure.out rel/vars.config rel/vars-toml.config @echo "building $@" (. ./configure.out && \ DEVNODE=true $(RUN) $(REBAR) as $@ release) diff --git a/big_tests/default.spec b/big_tests/default.spec index 3de4fabfe3c..219005bfabb 100644 --- a/big_tests/default.spec +++ b/big_tests/default.spec @@ -21,6 +21,7 @@ {suites, "tests", cluster_commands_SUITE}. {suites, "tests", component_SUITE}. {suites, "tests", conf_reload_SUITE}. +{suites, "tests", config_format_SUITE}. {suites, "tests", connect_SUITE}. {suites, "tests", disco_and_caps_SUITE}. {suites, "tests", ejabberdctl_SUITE}. diff --git a/big_tests/run_common_test.erl b/big_tests/run_common_test.erl index ee00ad5e1be..be20a43ba42 100644 --- a/big_tests/run_common_test.erl +++ b/big_tests/run_common_test.erl @@ -165,8 +165,8 @@ run_test(Test, PresetsToRun, CoverOpts) -> prepare_cover(Test, CoverOpts), error_logger:info_msg("Presets to run ~p", [PresetsToRun]), {ConfigFile, Props} = get_ct_config(Test), - case proplists:lookup(ejabberd_presets, Props) of - {ejabberd_presets, Presets} -> + case get_presets(Props) of + {ok, Presets} -> Presets1 = case PresetsToRun of all -> Presets; @@ -186,7 +186,7 @@ run_test(Test, PresetsToRun, CoverOpts) -> save_count(Test, Presets1), analyze_coverage(Test, CoverOpts), R; - _ -> + {error, not_found} -> error_logger:info_msg("Presets were not found in the config file ~ts", [ConfigFile]), R = do_run_quick_test(Test, CoverOpts), @@ -194,6 +194,19 @@ run_test(Test, PresetsToRun, CoverOpts) -> R end. +get_presets(Props) -> + case proplists:lookup(presets, Props) of + {presets, Presets} -> + case proplists:lookup(toml, Presets) of + {toml, Preset} -> + {ok, Preset}; + _ -> + {error, not_found} + end; + _ -> + {error, not_found} + end. + get_ct_config(Opts) -> Spec = proplists:get_value(spec, Opts), Props = read_file(Spec), @@ -263,24 +276,36 @@ is_test_host_enabled(HostName) -> lists:member(atom_to_binary(HostName, utf8), BinHosts) end. -enable_preset_on_node(Node, PresetVars, HostVars) -> +enable_preset_on_node(Node, PresetVars, HostVarsFilePrefix) -> {ok, Cwd} = call(Node, file, get_cwd, []), - Cfg = filename:join([repo_dir(), "rel", "files", "mongooseim.cfg"]), - Vars = filename:join([repo_dir(), "rel", HostVars]), - CfgFile = filename:join([Cwd, "etc", "mongooseim.cfg"]), - {ok, Template} = handle_file_error(Cfg, file:read_file(Cfg)), - {ok, Default} = handle_file_error(Vars, file:consult(Vars)), - NewVars = lists:foldl(fun ({Var, Val}, Acc) -> - lists:keystore(Var, 1, Acc, {Var, Val}) - end, Default, PresetVars), - %% Render twice to replace variables in variables - Tmp = bbmustache:render(Template, NewVars, [{key_type, atom}]), - NewCfgFile = bbmustache:render(Tmp, NewVars, [{key_type, atom}]), - ok = call(Node, file, write_file, [CfgFile, NewCfgFile]), + TemplatePath = filename:join([repo_dir(), "rel", "files", "mongooseim.toml"]), + DefaultVarsPath = filename:join([repo_dir(), "rel", "vars-toml.config"]), + NodeVarsPath = filename:join([repo_dir(), "rel", HostVarsFilePrefix ++ ".vars-toml.config"]), + + {ok, Template} = handle_file_error(TemplatePath, file:read_file(TemplatePath)), + {ok, DefaultVars} = handle_file_error(DefaultVarsPath, file:consult(DefaultVarsPath)), + {ok, NodeVars} = handle_file_error(NodeVarsPath, file:consult(NodeVarsPath)), + + TemplatedConfig = template_config(Template, [DefaultVars, NodeVars, PresetVars]), + CfgPath = filename:join([Cwd, "etc", "mongooseim.toml"]), + ok = call(Node, file, write_file, [CfgPath, TemplatedConfig]), call(Node, application, stop, [mongooseim]), call(Node, application, start, [mongooseim]), ok. +template_config(Template, Vars) -> + MergedVars = merge_vars(Vars), + %% Render twice to replace variables in variables + Tmp = bbmustache:render(Template, MergedVars, [{key_type, atom}]), + bbmustache:render(Tmp, MergedVars, [{key_type, atom}]). + +merge_vars([Vars1, Vars2|Rest]) -> + Vars = lists:foldl(fun ({Var, Val}, Acc) -> + lists:keystore(Var, 1, Acc, {Var, Val}) + end, Vars1, Vars2), + merge_vars([Vars|Rest]); +merge_vars([Vars]) -> Vars. + call(Node, M, F, A) -> case rpc:call(Node, M, F, A) of {badrpc, Reason} -> diff --git a/big_tests/test.config b/big_tests/test.config index 29f6ca52a92..2d60568e08d 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -17,7 +17,7 @@ %% so that we rein the "bag of things" approach {hosts, [{mim, [{node, mongooseim@localhost}, {domain, <<"localhost">>}, - {vars, "mim1.vars.config"}, + {vars, "mim1"}, {cluster, mim}, {secondary_domain, <<"localhost.bis">>}, {reloaded_domain, <<"sogndal">>}, @@ -36,7 +36,7 @@ {http_notifications_port, 8000}]}, {mim2, [{node, ejabberd2@localhost}, {domain, <<"localhost">>}, - {vars, "mim2.vars.config"}, + {vars, "mim2"}, {cluster, mim}, {c2s_tls_port, 5233}, {metrics_rest_port, 5289}, @@ -44,20 +44,20 @@ {service_port, 8899}]}, {mim3, [{node, mongooseim3@localhost}, {domain, <<"localhost">>}, - {vars, "mim3.vars.config"}, + {vars, "mim3"}, {c2s_tls_port, 5263}, {cluster, mim}]}, %% used to test s2s features {fed, [{node, fed1@localhost}, {domain, <<"fed1">>}, - {vars, "fed1.vars.config"}, + {vars, "fed1"}, {incoming_s2s_port, 5299}, {c2s_port, 5242}, {cluster, fed}]}, %% used to test global distribution features {reg, [{node, reg1@localhost}, {domain, <<"reg1">>}, - {vars, "reg1.vars.config"}, + {vars, "reg1"}, {service_port, 9990}, {c2s_port, 5252}, {gd_endpoint_port, 7777}, @@ -201,7 +201,197 @@ {auth_method, <<"SASL-ANON">>}]} ]}. -{ejabberd_presets, [ +{presets, + [{toml, + [ + {internal_mnesia, + %% dbs variable is used by ./tools/test_runner/presets_to_dbs.sh script + [{dbs, [redis, minio]}, + {outgoing_pools, "[outgoing_pools.redis.global_distrib] + scope = \"global\" + workers = 10"}, + {mod_offline, "[modules.mod_offline]"}]}, + {pgsql_mnesia, + [{dbs, [redis, pgsql]}, + {auth_method, "\"rdbms\""}, + {outgoing_pools, "[outgoing_pools.redis.global_distrib] + scope = \"global\" + workers = 10 +[outgoing_pools.rdbms.default] + scope = \"global\" + workers = 5 + connection.driver = \"pgsql\" + connection.host = \"localhost\" + connection.database = \"ejabberd\" + connection.username = \"ejabberd\" + connection.password = \"mongooseim_secret\" + connection.tls.required = true + connection.tls.verify_peer = true + connection.tls.cacertfile = \"priv/ssl/cacert.pem\" + connection.tls.server_name_indication = false"}, + {mod_last, "[modules.mod_last] + backend = \"rdbms\""}, + {mod_privacy, "[modules.mod_privacy] + backend = \"rdbms\""}, + {mod_private, "[modules.mod_private] + backend = \"rdbms\""}, + {mod_offline, "[modules.mod_offline] + backend = \"rdbms\""}, + {mod_vcard, "[modules.mod_vcard] + backend = \"rdbms\" + host = \"vjud.@HOST@\""}, + {mod_roster, "[modules.mod_roster] + backend = \"rdbms\""}]}, + {odbc_mssql_mnesia, + [{dbs, [redis, mssql]}, + {auth_method, "\"rdbms\""}, + {rdbms_server_type, "rdbms_server_type = \"mssql\""}, + {outgoing_pools, "[outgoing_pools.redis.global_distrib] + scope = \"global\" + workers = 10 +[outgoing_pools.rdbms.default] + scope = \"global\" + workers = 5 + connection.driver = \"odbc\" + connection.settings = \"DSN=mongoose-mssql;UID=sa;PWD=mongooseim_secret+ESL123\""}, + {mod_last, "[modules.mod_last] + backend = \"rdbms\""}, + {mod_privacy, "[modules.mod_privacy] + backend = \"rdbms\""}, + {mod_private, "[modules.mod_private] + backend = \"rdbms\""}, + {mod_offline, "[modules.mod_offline] + backend = \"rdbms\""}, + {mod_vcard, "[modules.mod_vcard] + backend = \"rdbms\" + host = \"vjud.@HOST@\""}, + {mod_roster, "[modules.mod_roster] + backend = \"rdbms\""}]}, + {mysql_redis, + [{dbs, [redis, mysql]}, + {sm_backend, "\"redis\""}, + {auth_method, "\"rdbms\""}, + {outgoing_pools, "[outgoing_pools.redis.global_distrib] + scope = \"global\" + workers = 10 +[outgoing_pools.redis.default] + scope = \"global\" + workers = 10 + strategy = \"random_worker\" +[outgoing_pools.rdbms.default] + scope = \"global\" + workers = 5 + connection.driver = \"mysql\" + connection.host = \"localhost\" + connection.database = \"ejabberd\" + connection.username = \"ejabberd\" + connection.password = \"mongooseim_secret\" + connection.tls.verify_peer = true + connection.tls.cacertfile = \"priv/ssl/cacert.pem\" + connection.tls.versions = [\"tlsv1.2\"]"}, + {mod_last, "[modules.mod_last] + backend = \"rdbms\""}, + {mod_privacy, "[modules.mod_privacy] + backend = \"rdbms\""}, + {mod_private, "[modules.mod_private] + backend = \"mysql\""}, + {mod_offline, "[modules.mod_offline] + backend = \"rdbms\""}, + {mod_vcard, "[modules.mod_vcard] + backend = \"rdbms\" + host = \"vjud.@HOST@\""}, + {mod_roster, "[modules.mod_roster] + backend = \"rdbms\""}]}, + {ldap_mnesia, + [{dbs, [redis, ldap]}, + {auth_method, "\"ldap\""}, + {outgoing_pools, "[outgoing_pools.redis.global_distrib] + scope = \"global\" + workers = 10 +[outgoing_pools.ldap.default] + scope = \"global\" + workers = 5 + connection.port = 3636 + connection.rootdn = \"cn=admin,dc=esl,dc=com\" + connection.password = \"mongooseim_secret\" + connection.encrypt = \"tls\" + connection.tls.versions = [\"tlsv1.2\"] + connection.tls.verify_peer = true + connection.tls.cacertfile = \"priv/ssl/cacert.pem\" + connection.tls.certfile = \"priv/ssl/fake_cert.pem\" + connection.tls.keyfile = \"priv/ssl/fake_key.pem\" +[outgoing_pools.ldap.bind] + scope = \"global\" + workers = 5 + connection.port = 3636 + connection.encrypt = \"tls\" + connection.tls.versions = [\"tlsv1.2\"] + connection.tls.verify_peer = true + connection.tls.cacertfile = \"priv/ssl/cacert.pem\" + connection.tls.certfile = \"priv/ssl/fake_cert.pem\" + connection.tls.keyfile = \"priv/ssl/fake_key.pem\""}, + {mod_offline, "[modules.mod_offline]"}, + {password_format, "password.format = \"scram\""}, + {auth_ldap, "ldap.base = \"ou=Users,dc=esl,dc=com\" + ldap.filter = \"(objectClass=inetOrgPerson)\""}, + {mod_vcard, "[modules.mod_vcard] + backend = \"ldap\" + host = \"vjud.@HOST@\" + ldap_base = \"ou=Users,dc=esl,dc=com\" + ldap_filter = \"(objectClass=inetOrgPerson)\""}]}, + {riak_mnesia, + [{dbs, [redis, riak]}, + {auth_method, "\"riak\""}, + %% Specify a list of ciphers to avoid + %% "no function clause matching tls_v1:enum_to_oid(28)" error + %% on Riak's side running with Erlang R16. + %% https://github.com/basho/riak-erlang-client/issues/232#issuecomment-178612129 + %% We also set ciphers in tools/setup_riak on the server side. + {outgoing_pools, "[outgoing_pools.redis.global_distrib] + scope = \"global\" + workers = 10 +[outgoing_pools.riak.default] + scope = \"global\" + workers = 5 + strategy = \"next_worker\" + connection.address = \"127.0.0.1\" + connection.port = 8087 + connection.username = \"ejabberd\" + connection.password = \"mongooseim_secret\" + connection.tls.ciphers = [\"AES256-SHA\", \"DHE-RSA-AES128-SHA256\"] + connection.tls.server_name_indication = false + connection.cacertfile = \"priv/ssl/cacert.pem\""}, + {mod_last, "[modules.mod_last] + backend = \"riak\""}, + {mod_privacy, "[modules.mod_privacy] + backend = \"riak\""}, + {mod_private, "[modules.mod_private] + backend = \"riak\""}, + {mod_offline, "[modules.mod_offline] + backend = \"riak\""}, + {mod_vcard, "[modules.mod_vcard] + backend = \"riak\" + host = \"vjud.@HOST@\""}, + {mod_roster, "[modules.mod_roster] + backend = \"riak\""} + ]}, + {elasticsearch_and_cassandra_mnesia, + [{dbs, [redis, elasticsearch, cassandra]}, + {outgoing_pools, "[outgoing_pools.redis.global_distrib] + scope = \"global\" + workers = 10 +[outgoing_pools.cassandra.default] + scope = \"global\" + workers = 20 + connection.tls.cacertfile = \"priv/ssl/cacert.pem\" + connection.tls.verify_peer = true +[outgoing_pools.elastic.default] + scope = \"global\""}, + {mod_offline, "[modules.mod_offline]"} + ]} + ]}, + {cfg, % preset vars for the 'cfg' format - used only by the config equivalence tests + [ {internal_mnesia, %% dbs variable is used by ./tools/test_runner/presets_to_dbs.sh script [{dbs, [redis, minio]}, @@ -333,7 +523,8 @@ {auth_method, "internal"}, {mod_offline, "{mod_offline, []},"} ]} -]}. + ]} + ]}. {timetrap,{seconds,30}}. {sensible_maximum_repeats, 100}. diff --git a/big_tests/tests/accounts_SUITE.erl b/big_tests/tests/accounts_SUITE.erl index b76794cb1e2..6b4b92da5ec 100644 --- a/big_tests/tests/accounts_SUITE.erl +++ b/big_tests/tests/accounts_SUITE.erl @@ -71,7 +71,7 @@ required_modules() -> [{mod_register, mod_register_options()}]. mod_register_options() -> - [{welcome_message, {""}}, + [{welcome_message, {"", ""}}, {ip_access, [{allow, "127.0.0.0/8"}, {deny, "0.0.0.0/0"}]}, {access, register}, diff --git a/big_tests/tests/component_SUITE.erl b/big_tests/tests/component_SUITE.erl index a8afd63a236..775e7b2a424 100644 --- a/big_tests/tests/component_SUITE.erl +++ b/big_tests/tests/component_SUITE.erl @@ -525,7 +525,7 @@ get_components(Opts, Config) -> add_domain(Config) -> Node = default_node(), - Hosts = {hosts, "[\"localhost\", \"sogndal\"]"}, + Hosts = {hosts, "\"localhost\", \"sogndal\""}, backup_ejabberd_config_file(Node, Config), ejabberd_node_utils:modify_config_file([Hosts], Config), reload_through_ctl(Node, Config), diff --git a/big_tests/tests/conf_reload_SUITE.erl b/big_tests/tests/conf_reload_SUITE.erl index 2967075ae63..1fb9748bcd7 100644 --- a/big_tests/tests/conf_reload_SUITE.erl +++ b/big_tests/tests/conf_reload_SUITE.erl @@ -160,7 +160,7 @@ change_domain_in_config_file(Config) -> [mk_value_for_hosts_pattern(?RELOADED_DOMAIN)], Config). mk_value_for_hosts_pattern(Domain) -> - {hosts, "[\"" ++ binary_to_list(Domain) ++ "\"]"}. + {hosts, "\"" ++ binary_to_list(Domain) ++ "\""}. run_config_file_modification_fun(Config) -> Fun = ?config(modify_config_file_fun, Config), diff --git a/big_tests/tests/config_format_SUITE.erl b/big_tests/tests/config_format_SUITE.erl new file mode 100644 index 00000000000..3e8d4cc5290 --- /dev/null +++ b/big_tests/tests/config_format_SUITE.erl @@ -0,0 +1,155 @@ +-module(config_format_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-import(distributed_helper, [mim/0, + require_rpc_nodes/1, + rpc/4]). + +-define(eq(Expected, Actual), + ?assertEqual(Expected, Actual)). + +all() -> + [equivalence]. + +suite() -> + distributed_helper:require_rpc_nodes([mim]) ++ escalus:suite(). + +init_per_suite(Config0) -> + Config1 = escalus:init_per_suite(Config0), + ejabberd_node_utils:init(Config1). + +end_per_suite(Config) -> + escalus:end_per_suite(Config). + +init_per_testcase(CaseName, Config0) -> + Config1 = escalus:init_per_testcase(CaseName, Config0), + ejabberd_node_utils:backup_config_file(Config1), + Config1. + +end_per_testcase(CaseName, Config) -> + escalus:end_per_testcase(CaseName, Config), + ejabberd_node_utils:restore_config_file(Config), + ejabberd_node_utils:restart_application(mongooseim). + +%% Test cases + +equivalence(Config) -> + TOMLOpts = get_config_tables(), + ejabberd_node_utils:modify_config_file(mim, [], Config, cfg), + ejabberd_node_utils:restart_application(mongooseim), + CfgOpts = get_config_tables(), + compare_unordered_lists(CfgOpts, TOMLOpts, fun handle_config_option/2). + +%% Test case helpers + +get_config_tables() -> + Tables = [config, local_config, acl], + Opts = lists:flatmap(fun(Tab) -> rpc(mim(), ets, tab2list, [Tab]) end, Tables), + lists:filter(fun filter_config/1, Opts). + +filter_config({config, required_files, _}) -> false; % not supported in TOML +filter_config({local_config, node_start, _}) -> false; +filter_config(_) -> true. + +handle_config_option({Config, K1, V1}, {Config, K2, V2}) when Config =:= config; + Config =:= local_config -> + ?eq(K1, K2), + compare_values(K1, V1, V2); +handle_config_option(Opt1, Opt2) -> + ?eq(Opt1, Opt2). + +compare_values(listen, V1, V2) -> + compare_unordered_lists(V1, V2, fun handle_listener/2); +compare_values({auth_opts, _}, V1, V2) -> + compare_unordered_lists(V1, V2, fun handle_auth_opt/2); +compare_values(outgoing_pools, V1, V2) -> + compare_unordered_lists(V1, V2, fun handle_conn_pool/2); +compare_values({modules, _}, V1, V2) -> + compare_unordered_lists(V1, V2, fun handle_item_with_opts/2); +compare_values({services, _}, V1, V2) -> + compare_unordered_lists(V1, V2, fun handle_item_with_opts/2); +compare_values({auth_method, _}, V1, V2) when is_atom(V1) -> + ?eq([V1], V2); +compare_values({s2s_addr, _}, {_, _, _, _} = IP1, IP2) -> + ?eq(inet:ntoa(IP1), IP2); +compare_values(K, V1, V2) -> + ?eq({K, V1}, {K, V2}). + +handle_listener({P1, M1, O1}, {P2, M2, O2}) -> + ?eq(P1, P2), + ?eq(M1, M2), + compare_unordered_lists(O1, O2, fun handle_listener_option/2). + +handle_listener_option({modules, M1}, {modules, M2}) -> + compare_unordered_lists(M1, M2, fun handle_listener_module/2); +handle_listener_option({transport_options, O1}, {transport_options, O2}) -> + compare_unordered_lists(O1, O2); +handle_listener_option(V1, V2) -> ?eq(V1, V2). + +handle_listener_module({H1, P1, M1}, M2) -> + handle_listener_module({H1, P1, M1, []}, M2); +handle_listener_module({H1, P1, M1, O1}, {H2, P2, M2, O2}) -> + ?eq(H1, H2), + ?eq(P1, P2), + ?eq(M1, M2), + compare_listener_module_options(M1, O1, O2). + +compare_listener_module_options(mod_websockets, + [{ejabberd_service, S1}], [{ejabberd_service, S2}]) -> + compare_unordered_lists(S1, S2); +compare_listener_module_options(_, O1, O2) -> + ?eq(O1, O2). + +handle_auth_opt({cyrsasl_external, M}, {cyrsasl_external, [M]}) -> ok; +handle_auth_opt(V1, V2) -> ?eq(V1, V2). + +handle_item_with_opts({M1, O1}, {M2, O2}) -> + ?eq(M1, M2), + compare_unordered_lists(O1, O2). + +handle_conn_pool({Type1, Scope1, Tag1, POpts1, COpts1}, + {Type2, Scope2, Tag2, POpts2, COpts2}) -> + ?eq(Type1, Type2), + ?eq(Scope1, Scope2), + ?eq(Tag1, Tag2), + compare_unordered_lists(POpts1, POpts2), + compare_unordered_lists(COpts1, COpts2, fun handle_conn_opt/2). + +handle_conn_opt({server, {D1, H1, DB1, U1, P1, O1}}, + {server, {D2, H2, DB2, U2, P2, O2}}) -> + ?eq(D1, D2), + ?eq(H1, H2), + ?eq(DB1, DB2), + ?eq(U1, U2), + ?eq(P1, P2), + compare_unordered_lists(O1, O2, fun handle_db_server_opt/2); +handle_conn_opt({tls_options, Opts1}, {tls_options, Opts2}) -> + compare_unordered_lists(Opts1, Opts2); +handle_conn_opt(V1, V2) -> ?eq(V1, V2). + +handle_db_server_opt({ssl_opts, O1}, {ssl_opts, O2}) -> + compare_unordered_lists(O1, O2); +handle_db_server_opt(V1, V2) -> ?eq(V1, V2). + +compare_unordered_lists(L1, L2) -> + compare_unordered_lists(L1, L2, fun(V1, V2) -> ?eq(V1, V2) end). + +compare_unordered_lists(L1, L2, F) -> + SL1 = lists:sort(L1), + SL2 = lists:sort(L2), + compare_ordered_lists(SL1, SL2, F). + +compare_ordered_lists([H1|T1], [H1|T2], F) -> + compare_ordered_lists(T1, T2, F); +compare_ordered_lists([H1|T1], [H2|T2], F) -> + try F(H1, H2) + catch C:R:S -> + ct:fail({C, R, S}) + end, + compare_ordered_lists(T1, T2, F); +compare_ordered_lists([], [], _) -> + ok. diff --git a/big_tests/tests/ejabberd_node_utils.erl b/big_tests/tests/ejabberd_node_utils.erl index 1bef3a38983..2264d0a9804 100644 --- a/big_tests/tests/ejabberd_node_utils.erl +++ b/big_tests/tests/ejabberd_node_utils.erl @@ -33,14 +33,11 @@ cwd(Node, Config) -> ?config({ejabberd_cwd, Node}, Config). -current_config_path(Node, Config) -> - filename:join([cwd(Node, Config), "etc", "mongooseim.cfg"]). +backup_config_path(Node, Config, Format) -> + filename:join([cwd(Node, Config), "etc", config_file_name(Format) ++ ".bak"]). -backup_config_path(Node, Config) -> - filename:join([cwd(Node, Config), "etc","mongooseim.cfg.bak"]). - -config_template_path(Config) -> - filename:join([path_helper:repo_dir(Config), "rel", "files", "mongooseim.cfg"]). +config_template_path(Config, Format) -> + filename:join([path_helper:repo_dir(Config), "rel", "files", config_file_name(Format)]). config_vars_path(File, Config) -> filename:join([path_helper:repo_dir(Config), "rel", File]). @@ -48,6 +45,9 @@ config_vars_path(File, Config) -> ctl_path(Node, Config) -> filename:join([cwd(Node, Config), "bin", "mongooseimctl"]). +config_file_name(toml) -> "mongooseim.toml"; +config_file_name(cfg) -> "mongooseim.cfg". + -type ct_config() :: list({Key :: term(), Value :: term()}). %%-------------------------------------------------------------------- @@ -85,8 +85,8 @@ backup_config_file(Config) -> -spec backup_config_file(distributed_helper:rpc_spec(), ct_config()) -> ct_config(). backup_config_file(#{node := Node} = RPCSpec, Config) -> - {ok, _} = call_fun(RPCSpec, file, copy, [current_config_path(Node, Config), - backup_config_path(Node, Config)]). + {ok, _} = call_fun(RPCSpec, file, copy, [get_config_path(RPCSpec), + backup_config_path(Node, Config, toml)]). -spec restore_config_file(ct_config()) -> 'ok'. restore_config_file(Config) -> @@ -95,8 +95,8 @@ restore_config_file(Config) -> -spec restore_config_file(distributed_helper:rpc_spec(), ct_config()) -> 'ok'. restore_config_file(#{node := Node} = RPCSpec, Config) -> - ok = call_fun(RPCSpec, file, rename, [backup_config_path(Node, Config), - current_config_path(Node, Config)]). + ok = call_fun(RPCSpec, file, rename, [backup_config_path(Node, Config, toml), + update_config_path(RPCSpec, toml)]). -spec call_fun(module(), atom(), []) -> term() | {badrpc, term()}. call_fun(M, F, A) -> @@ -148,36 +148,69 @@ file_exists(Node, Filename) -> ConfigVariable :: atom(), Value :: string(). modify_config_file(CfgVarsToChange, Config) -> - Node = distributed_helper:mim(), - modify_config_file(Node, "vars.config", CfgVarsToChange, Config). + modify_config_file(mim, CfgVarsToChange, Config, toml). --spec modify_config_file(distributed_helper:rpc_spec(), string(), [{ConfigVariable, Value}], ct_config()) -> ok when +-spec modify_config_file(Host, [{ConfigVariable, Value}], ct_config(), toml | cfg) -> ok when + Host :: atom(), ConfigVariable :: atom(), Value :: string(). -modify_config_file(#{node := Node} = RPCSpec, VarsFile, CfgVarsToChange, Config) -> - CurrentCfgPath = current_config_path(Node, Config), - {ok, CfgTemplate} = file:read_file(config_template_path(Config)), - CfgVarsPath = config_vars_path("vars.config", Config), - {ok, DefaultVars} = file:consult(CfgVarsPath), - {ok, NodeVars} = file:consult(config_vars_path(VarsFile, Config)), - PresetVars = case proplists:get_value(preset, Config) of - undefined -> - []; - Name -> - Presets = ct:get_config(ejabberd_presets), - proplists:get_value(list_to_existing_atom(Name), Presets) - end, - CfgVars1 = dict:to_list(dict:merge(fun(_, V, _) -> V end, - dict:from_list(NodeVars), - dict:from_list(DefaultVars))), - CfgVars = dict:to_list(dict:merge(fun(_, V, _) -> V end, - dict:from_list(PresetVars), - dict:from_list(CfgVars1))), - UpdatedCfgVars = update_config_variables(CfgVarsToChange, CfgVars), +modify_config_file(Host, VarsToChange, Config, Format) -> + VarsFile = vars_file(Format), + NodeVarsFile = ct:get_config({hosts, Host, vars}, Config) ++ "." ++ vars_file(Format), + TemplatePath = config_template_path(Config, Format), + DefaultVarsPath = config_vars_path(VarsFile, Config), + NodeVarsPath = config_vars_path(NodeVarsFile, Config), + + {ok, Template} = file:read_file(TemplatePath), + {ok, DefaultVars} = file:consult(DefaultVarsPath), + {ok, NodeVars} = file:consult(NodeVarsPath), + PresetVars = preset_vars(Config, Format), + + TemplatedConfig = template_config(Template, [DefaultVars, NodeVars, PresetVars, VarsToChange]), + + RPCSpec = distributed_helper:Host(), + NewCfgPath = update_config_path(RPCSpec, Format), + ok = ejabberd_node_utils:call_fun(RPCSpec, file, write_file, [NewCfgPath, TemplatedConfig]). + +template_config(Template, Vars) -> + MergedVars = merge_vars(Vars), %% Render twice to replace variables in variables - UpdatedCfgFileTmp = bbmustache:render(CfgTemplate, UpdatedCfgVars, [{key_type, atom}]), - UpdatedCfgFile = bbmustache:render(UpdatedCfgFileTmp, UpdatedCfgVars, [{key_type, atom}]), - ok = ejabberd_node_utils:call_fun(RPCSpec, file, write_file, [CurrentCfgPath, UpdatedCfgFile]). + Tmp = bbmustache:render(Template, MergedVars, [{key_type, atom}]), + bbmustache:render(Tmp, MergedVars, [{key_type, atom}]). + +merge_vars([Vars1, Vars2|Rest]) -> + Vars = lists:foldl(fun ({Var, Val}, Acc) -> + lists:keystore(Var, 1, Acc, {Var, Val}) + end, Vars1, Vars2), + merge_vars([Vars|Rest]); +merge_vars([Vars]) -> Vars. + +update_config_path(RPCSpec, Format) -> + CurrentCfgPath = get_config_path(RPCSpec), + CurrentConfigFileName = filename:basename(CurrentCfgPath), + case config_file_name(Format) of + CurrentConfigFileName -> + CurrentCfgPath; + NewConfigFileName -> + Path = filename:join(filename:dirname(CurrentCfgPath), NewConfigFileName), + set_config_path(RPCSpec, Path), + Path + end. + +get_config_path(RPCSpec) -> + ejabberd_node_utils:call_fun(RPCSpec, os, getenv, ["EJABBERD_CONFIG_PATH"]). + +set_config_path(RPCSpec, Path) -> + ejabberd_node_utils:call_fun(RPCSpec, os, putenv, ["EJABBERD_CONFIG_PATH", Path]). + +vars_file(toml) -> "vars-toml.config"; +vars_file(cfg) -> "vars.config". + +preset_vars(Config, Format) -> + case proplists:get_value(preset, Config) of + undefined -> []; + Name -> ct:get_config({presets, Format, list_to_existing_atom(Name)}) + end. -spec get_cwd(node(), ct_config()) -> string(). get_cwd(Node, Config) -> @@ -190,8 +223,3 @@ get_cwd(Node, Config) -> set_ejabberd_node_cwd(#{node := Node} = RPCSpec, Config) -> {ok, Cwd} = call_fun(RPCSpec, file, get_cwd, []), [{{ejabberd_cwd, Node}, Cwd} | Config]. - -update_config_variables(CfgVarsToChange, CfgVars) -> - lists:foldl(fun({Var, Val}, Acc) -> - lists:keystore(Var, 1, Acc,{Var, Val}) - end, CfgVars, CfgVarsToChange). diff --git a/big_tests/tests/extdisco_SUITE.erl b/big_tests/tests/extdisco_SUITE.erl index 099f37f0128..7568e09641c 100644 --- a/big_tests/tests/extdisco_SUITE.erl +++ b/big_tests/tests/extdisco_SUITE.erl @@ -73,7 +73,7 @@ init_per_group(multiple_extdisco_configured, Config) -> ExternalServices = [stun_service(), stun_service(), turn_service()], set_external_services(ExternalServices, Config); init_per_group(external_service_required_elements_configured, Config) -> - ExternalServices = [{ftp, [{host, "3.3.3.3"}]}], + ExternalServices = [[{type, ftp},{host, "3.3.3.3"}]], set_external_services(ExternalServices, Config); init_per_group(_GroupName, Config) -> Config. @@ -297,22 +297,20 @@ external_service_required_elements_configured(Config) -> %%----------------------------------------------------------------- stun_service() -> - {stun, [ + [{type, stun}, {host, "1.1.1.1"}, - {port, "3478"}, + {port, 3478}, {transport, "udp"}, {username, "username"}, - {password, "secret"} - ]}. + {password, "secret"}]. turn_service() -> - {turn, [ + [{type, turn}, {host, "2.2.2.2"}, - {port, "3478"}, + {port, 3478}, {transport, "tcp"}, {username, "username"}, - {password, "secret"} - ]}. + {password, "secret"}]. domain() -> ct:get_config({hosts, mim, domain}). diff --git a/big_tests/tests/reload_helper.erl b/big_tests/tests/reload_helper.erl index 17cf0a5d72d..24686f31a19 100644 --- a/big_tests/tests/reload_helper.erl +++ b/big_tests/tests/reload_helper.erl @@ -25,8 +25,7 @@ -import(distributed_helper, [rpc/4]). --define(CTL_RELOAD_OUTPUT_PREFIX, - "done"). +-define(CTL_RELOAD_OUTPUT, "done"). backup_ejabberd_config_file(#{node := Node} = RPCSpec, Config) -> {ok, _} = rpc(RPCSpec, file, copy, [node_cfg(Node, current, Config), @@ -47,10 +46,10 @@ reload_through_ctl(#{node := Node} = RPCSpec, Config) -> ok = reload_output_contains_done(ReloadCmd, OutputStr). reload_output_contains_done(ReloadCmd, OutputStr) -> - case re:run(list_to_binary(OutputStr), <<"\n", ?CTL_RELOAD_OUTPUT_PREFIX, "\n">>) of - {match, _} -> + case lists:member(?CTL_RELOAD_OUTPUT, string:split(OutputStr, "\n", all)) of + true -> ok; - _ -> + false -> ct:pal("ReloadCmd: ~p", [ReloadCmd]), ct:pal("OutputStr: ~ts", [OutputStr]), error(config_reload_failed, [OutputStr]) @@ -62,9 +61,9 @@ update_config_variables(CfgVarsToChange, CfgVars) -> end, CfgVars, CfgVarsToChange). node_cfg(N, current, C) -> - filename:join(ejabberd_node_utils:node_cwd(N, C), "etc/mongooseim.cfg"); + filename:join(ejabberd_node_utils:node_cwd(N, C), "etc/mongooseim.toml"); node_cfg(N, backup, C) -> - filename:join(ejabberd_node_utils:node_cwd(N, C), "etc/mongooseim.cfg.bak"). + filename:join(ejabberd_node_utils:node_cwd(N, C), "etc/mongooseim.toml.bak"). node_ctl(N, C) -> filename:join(ejabberd_node_utils:node_cwd(N, C), "bin/mongooseimctl"). diff --git a/big_tests/tests/sasl_external_SUITE.erl b/big_tests/tests/sasl_external_SUITE.erl index f32743955fe..b411418f423 100644 --- a/big_tests/tests/sasl_external_SUITE.erl +++ b/big_tests/tests/sasl_external_SUITE.erl @@ -104,22 +104,18 @@ init_per_group(just_tls, Config) -> init_per_group(fast_tls, Config) -> [{tls_module, "fast_tls"} | Config]; init_per_group(ca_signed, Config) -> - SSLOpts = case escalus_config:get_config(tls_module, Config) of - "just_tls" -> - "{ssl_options, [{verify_fun, {peer, false}}]},"; - "fast_tls" -> - "" - end, - [{signed, ca}, {ssl_options, SSLOpts}, {verify_mode, ""} | Config]; + [{signed, ca}, + {ssl_options, "\n tls.disconnect_on_failure = false"}, + {verify_mode, "\n tls.verify_mode = \"peer\""} | Config]; init_per_group(self_signed, Config) -> - SSLOpts = "{ssl_options, [{verify_fun, {selfsigned_peer, true}}]},", - [{signed, self}, {ssl_options, SSLOpts}, {verify_mode, "{verify_mode, selfsigned_peer},"} | Config]; + [{signed, self}, + {verify_mode, "\n tls.verify_mode = \"selfsigned_peer\""} | Config]; init_per_group(standard, Config) -> - modify_config_and_restart("standard", Config), + modify_config_and_restart("\"standard\"", Config), Config; init_per_group(standard_keep_auth, Config) -> Config1 = [{auth_methods, []} | Config], - modify_config_and_restart("standard", Config1), + modify_config_and_restart("\"standard\"", Config1), case mongoose_helper:supports_sasl_module(cyrsasl_external) of false -> {skip, "SASL External not supported"}; true -> Config1 @@ -127,41 +123,47 @@ init_per_group(standard_keep_auth, Config) -> init_per_group(registered, Config) -> escalus:create_users(Config, [{bob, generate_user_tcp(Config, username("bob", Config))}]); init_per_group(use_common_name, Config) -> - modify_config_and_restart("use_common_name", Config), + modify_config_and_restart("\"standard\", \"common_name\"", Config), Config; init_per_group(allow_just_user_identity, Config) -> - modify_config_and_restart("allow_just_user_identity", Config), + modify_config_and_restart("\"standard\", \"auth_id\"", Config), Config; init_per_group(demo_verification_module, Config) -> - modify_config_and_restart("[{mod, cyrsasl_external_verification}]", Config), + modify_config_and_restart("\"cyrsasl_external_verification\"", Config), Config; init_per_group(self_signed_certs_allowed, Config) -> - modify_config_and_restart("standard", Config), + modify_config_and_restart("\"standard\"", Config), Config; init_per_group(self_signed_certs_not_allowed, Config) -> - modify_config_and_restart("standard", Config), + modify_config_and_restart("\"standard\"", Config), Config; init_per_group(_, Config) -> Config. modify_config_and_restart(CyrsaslExternalConfig, Config) -> - SSLOpts = escalus_config:get_config(ssl_options, Config, ""), TLSModule = escalus_config:get_config(tls_module, Config, "just_tls"), VerifyMode = escalus_config:get_config(verify_mode, Config, ""), - AuthMethods = escalus_config:get_config(auth_methods, Config, [{auth_method, "pki"}]), + SSLOpts = case TLSModule of + "just_tls" -> escalus_config:get_config(ssl_options, Config, "") ++ VerifyMode; + "fast_tls" -> "" + end, + AuthMethods = escalus_config:get_config(auth_methods, Config, [{auth_method, "\"pki\""}]), CACertFile = filename:join([path_helper:repo_dir(Config), "tools", "ssl", "ca-clients", "cacert.pem"]), - NewConfigValues = [{tls_config, "{certfile, \"priv/ssl/fake_server.pem\"}," - "starttls, verify_peer," - "{cafile, \"" ++ CACertFile ++ "\"}," + NewConfigValues = [{tls_config, "tls.certfile = \"priv/ssl/fake_server.pem\"\n" + " tls.mode = \"starttls\"\n" + " tls.verify_peer = true\n" + " tls.cacertfile = \"" ++ CACertFile ++ "\"" ++ SSLOpts}, - {tls_module, "{tls_module, " ++ TLSModule ++ "},"}, - {https_config, "{ssl, [{certfile, \"priv/ssl/fake_cert.pem\"}," - "{keyfile, \"priv/ssl/fake_key.pem\"}, {password, \"\"}," - "{verify, verify_peer}," ++ VerifyMode ++ - "{cacertfile, \"" ++ CACertFile ++ "\"}]},"}, - {cyrsasl_external, ", {cyrsasl_external," ++ CyrsaslExternalConfig ++ "}"}, - {sasl_mechanisms, "{sasl_mechanisms, [cyrsasl_external]}."} | AuthMethods], + {tls_module, "tls.module = \"" ++ TLSModule ++ "\""}, + {https_config, "tls.certfile = \"priv/ssl/fake_cert.pem\"\n" + " tls.keyfile = \"priv/ssl/fake_key.pem\"\n" + " tls.password = \"\"\n" + " tls.verify_peer = true\n" + " tls.cacertfile = \"" ++ CACertFile ++ "\"" + ++ VerifyMode}, + {cyrsasl_external, CyrsaslExternalConfig}, + {sasl_mechanisms, "sasl_mechanisms = [\"external\"]"} | AuthMethods], ejabberd_node_utils:modify_config_file(NewConfigValues, Config), ejabberd_node_utils:restart_application(mongooseim). diff --git a/doc/Advanced-configuration.md b/doc/Advanced-configuration.md index ead8b2ef901..b3f8113ce05 100644 --- a/doc/Advanced-configuration.md +++ b/doc/Advanced-configuration.md @@ -1,357 +1,39 @@ -For advanced configuration use the following files: +The following files are used to configure MongooseIM: -* `mongooseim.cfg` for pure MongooseIM settings, +* `mongooseim.toml` for MongooseIM settings, * `vm.args` to affect the Erlang VM behaviour (performance tuning, node name), * `app.config` to change low-level logging parameters and settings of other Erlang applications. -Since you've gotten this far, we assume you're already familiar with Erlang syntax. +# mongooseim.toml -# mongooseim.cfg +This [TOML](https://github.com/toml-lang/toml) file contains the configuration options for the MongooseIM server. It is located at `[MongooseIM repo root]/rel/files/` if you are building from source or `[MongooseIM install root]/etc/` if you are using a pre-built version. -This file consists of multiple erlang tuples terminated with a period. -In order to configure it, go to `[MongooseIM repo root]/rel/files/` (if you're building from source) or `[MongooseIM install root]/etc/` if you're using a pre-built version. +The file is divided into the following sections: -The tuple order is important, unless no `host_config` option is set. -Retaining the default layout is recommended so that the experienced MongooseIM users can smoothly traverse the file. +* [**general**](advanced-configuration/general.md) - Served XMPP domains, log level, server language and some other miscellaneous settings. +* [**listen**](advanced-configuration/listen.md) - Configured listeners, receiving incoming XMPP and HTTP connections. +* [**auth**](advanced-configuration/auth.md) - Supported client authentication methods and their options. +* [**outgoing_pools**](advanced-configuration/outgoing-connections.md) - Outgoing connections to external services, including databases, message queues and HTTP services. +* [**services**](advanced-configuration/Services.md) - Internal services like an administration API and system metrics. +* [**modules**](advanced-configuration/Modules.md) - [XMPP extension](https://xmpp.org/extensions/) modules, which extend the basic functionality provided by XMPP. +* [**shaper**](advanced-configuration/shaper.md) - Traffic shapers that limit the incoming XMPP traffic, providing a safety valve to protect the server. +* [**acl**](advanced-configuration/acl.md) - Access classes to which connecting users are assigned. +* [**access**](advanced-configuration/access.md) - Access rules, specifying the privileges of the defined access classes. +* [**s2s**](advanced-configuration/s2s.md) - Server-to-server connection options, used for XMPP federation. +* [**host_config**](advanced-configuration/host_config.md) - Configuration options that need to be different for a specific XMPP domain. -`mongooseim.cfg` is full of useful comments and in most cases they should be sufficient help in changing the configuration. +The section names above are links to the detailed documentation of each section. -## Options - -* All options except `hosts`, `host`, `host_config`, `listen` and `outgoing_connections` may be used in the `host_config` tuple. - -* There are two kinds of local options - those that are kept separately for each domain in the config file (defined inside `host_config`) and the options local for a node in the cluster. - -* "global" options are shared by all cluster nodes and all domains. - -* Options labeled as "multi" (in this page) can be declared multiple times in a row, e.g. one per domain. - -* Section names below correspond with the ones in the file. - -### Override stored options - -* **override_global, override_local, override_acls** - optional - * **Description:** Will cause MongooseIM to erase all global/local/acl options in database respectively. This ensures that ALL settings of a specific type will be reloaded on startup. - -### Debugging - -* **loglevel** (local) - * **Description:** Log level configured with an integer: 0 (disabled), 1 (critical), 2 (error), 3 (warning), 4 (info), 5 (debug). Recommended values for production systems are 2 or 3 (5 is for development). - - -### Served hostnames - -* **hosts** (global) - * **Description:** List of domains supported by this cluster. - * **Warning:** Extension modules and database backends will be started separately for every domain. When increasing the number of domains please make sure you have enough resources available (e.g. connection limit set in DBMS). - * **Example:** `["localhost", "domain2"]` - -* **route_subdomain** (local) - * **Description:** If a stanza is addressed to a subdomain of the served domain and this option is set to `s2s`, such a stanza will be transmitted over s2s. Without it, MongooseIM will try to route the stanza to one of the internal services. - * **Note:** `s2s` is the only valid value. Any other will simply disable the feature. - -### Listening ports - -* **listen** (local) - * **Description:** List of modules handling the incoming connections. By default, 3 are enabled: `ejabberd_cowboy`, `ejabberd_c2s` and `ejabberd_s2s_in`. They accept XMPP, BOSH, Websocket and S2S connections (plus queries to metrics API). - * **Syntax:** List of tuples: `{Port, Module, ModuleSpecificOptions}` - * **See also:** [Listener modules](advanced-configuration/Listener-modules.md) - -* **s2s_use_starttls** (global) - * **Description:** Controls StartTLS feature for S2S connections. - * **Values:** - * `false` - * `optional` - * `required` - * `required_trusted` - uses OpenSSL's function [SSL_get_verify_result](http://www.openssl.org/docs/ssl/SSL_get_verify_result.html) - -* **s2s_certfile** (global) - * **Description:** Path to X509 PEM file with a certificate and a private key inside (not protected by any password). Required if `s2s_use_starttls` is enabled. - -* **s2s_ciphers** (global) - * **Description:** Defines a list of accepted SSL ciphers in **outgoing** S2S connection. - Please refer to the [OpenSSL documentation](http://www.openssl.org/docs/apps/ciphers.html) for the cipher string format. - * **Default:** `"TLSv1.2:TLSv1.3"` - -* **domain_certfile** (multi, global) - * **Description:** Overrides common certificates with new ones specific for chosen XMPP domains. - Applies to S2S and C2S connections. - * **Syntax:** `{domain_certfile, "example.com", "/path/to/example.com.pem"}.` - -* **s2s_default_policy** (local) - * **Description:** Default policy for a new S2S (server-to-server) **both incoming and outgoing** connection to/from an unknown remote server. - -* **s2s_host** (multi, local) - * **Description:** Allows black/whitelisting S2S destinations. - * **Syntax:** `{ {s2s_host, "somehost.com"}, allow|deny }.` - -* **outgoing_s2s_port** (local) - * **Description:** Defines a port to be used for outgoing S2S connections. Cannot be random. - * **Default:** 5269 - -* **s2s_addr** (multi, global) - * **Description:** Override DNS lookup for a specific non-local XMPP domain and use a predefined server IP and port for S2S connection. - * **Syntax:** `"{ {s2s_addr, \"some-domain\"}, { {10,20,30,40}, 7890 } }."` - -* **outgoing_s2s_options** (global) - * **Description:** Specifies the order of address families to try when establishing S2S connection and the connection timeout (in milliseconds or atom `infinity`). - * **Default:** `{outgoing_s2s_options, [ipv4, ipv6], 10000}.` - * **Family values:** `inet4`/`ipv4`, `inet6`/`ipv6` - -* **s2s_shared** (global) - * **Description:** S2S shared secret used in [Server Dialback](https://xmpp.org/extensions/xep-0220.html) extension. - * **Syntax:** `{s2s_shared, <<"shared secret">>}`. - * **Default:** 10 strong random bytes, hex-encoded. - -* **s2s_dns_options** (local) - * **Description:** Parameters used in DNS lookups for outgoing S2S connections. - * **Syntax:** `{s2s_dns_options, [{Opt, Val}, ...]}.` - * **Supported options** - * `timeout` (integer, seconds, default: 10) - A timeout for DNS lookup. - * `retries` (integer, default: 2) - How many DNS lookups will be attempted. - * **Example:** `{s2s_dns_options, [{timeout, 30}, {retries, 1}]}.` - -* **s2s_max_retry_delay** (local) - * **Description:** How many seconds MIM node should wait until next attempt to connect to remote XMPP cluster. - * **Syntax:** `{s2s_max_retry_delay, Delay}.` - * **Default:** 300 - * **Example:** `{s2s_max_retry_delay, 30}.` - -### Session backend - -* **sm_backend** (global) - * **Description:** Backend for storing user session data. - Currently all nodes in a cluster must have access to a complete session database. - Valid backends are `mnesia` and `redis`. - Mnesia is sufficient in most cases, use Redis only in large deployments when you notice issues with the mnesia backend. - * **Mnesia:** `{sm_backend, {mnesia, []}}` - * **Redis:** `{sm_backend, {redis, []}}` - Requires redis pool defined in `outgoing_pools`:
`{redis, global, default, ..., ...}`. - See [redis section in outgoing connections doc](./advanced-configuration/outgoing-connections.md#redis-connection-setup) - -### Authentication - -* **auth_method** (local) - * **Description:** Chooses an authentication module or a list of modules. Modules from the list are queried one after another until one of them replies positively. - * **Valid values:** `internal` (Mnesia), `rdbms`, `external`, `anonymous`, `ldap`, `jwt`, `riak`, `http`, `pki` - * **Warning:** Authentication backends support only specific SASL mechanisms, see [auth backends capabilities](#authentication-backend-capabilities). - * **Examples:** `rdbms`, `[internal, anonymous]` - -* **auth_opts** (local) - * **Description:** Provides different parameters that will be applied to a chosen authentication method. - `auth_password_format` and `auth_scram_iterations` are common to `http`, `rdbms`, `internal` and `riak`. - - * **auth_password_format** - * **Description:** Decide whether user passwords will be kept plain or hashed in the database. - Currently, popular XMPP clients support the SCRAM method and it is strongly recommended to use the hashed version. - MongooseIM supports SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for SCRAM hashing which can be provided as an argument and this will result in storing and supporting only hashes specified in the configuration. - The older XMPP clients can still use the `PLAIN` mechanism. `DIGEST-MD5` is not available with `scram`. - * **Values:** `plain`, `scram`, `{scram, [sha256]}` (`scram` and `{scram, [sha, sha224, sha256, sha384, sha512]}` are equivalent configurations) - * **Default:** `scram` - - * **auth_scram_iterations** - * **Description:** Hash function round count. - This is a tradeoff between latency and security. - The higher the value, the more difficult breaking the hashes is: it is a work factor: increasing the count increases the work it requires to compute a full hash, which effectively slows down brute-force attacks. - But it adds load on both client and server, so this parameter should be tuned as high as the business-rules allow. - Note that increasing the security of a password has a higher impact over the security of the algorithm, without impacting its load. - See more information in this [NIST guide, Appendix A.2.2](https://csrc.nist.gov/publications/detail/sp/800-132/final), - * **Default:** 10000, as recommended in this [XEP](https://xmpp.org/extensions/xep-0438.html#pbkdf2) and this [NIST Guidelines](https://pages.nist.gov/800-63-3/sp800-63b.html#sec5) - - * [`external` backend options](authentication-backends/External-authentication-module.md#configuration-options) - - * [`http` backend options](authentication-backends/HTTP-authentication-module.md#configuration-options) - - * [`jwt` backend options](authentication-backends/JWT-authentication-module.md#configuration-options) - - * [`ldap` backend options](authentication-backends/LDAP-authentication-module.md#configuration-options) - - * [`riak` backend options](authentication-backends/Riak-authentication-module.md#configuration-options) - -* **sasl_mechanisms** (local) - * **Description:** Specifies a list of allowed SASL mechanisms. - It affects the methods announced during stream negotiation and is enforced eventually (user can't pick mechanism not listed here but available in the source code). - All SCRAM-SHA mechanisms support channel binding and are advertised as a separate authentication mechanisms that is suffixed with `-PLUS`. - Please note that the list of advertised authentication mechanisms is filtered out by the supported password formats to assure that it is possible to authenticate using authentication mechanisms that are offered. - * **Warning:** This list is still filtered by [auth backends capabilities](#authentication-backend-capabilities) - * **Valid values:** `cyrsasl_scram_sha512_plus, cyrsasl_scram_sha512, cyrsasl_scram_sh384_plus, cyrsasl_scram_sh384, cyrsasl_scram_sha256_plus, cyrsasl_scram_sha256, cyrsasl_scram_sha224_plus, cyrsasl_scram_sha224, cyrsasl_scram_sha1_plus, cyrsasl_scram_sha1, cyrsasl_plain, cyrsasl_anonymous, cyrsasl_oauth, cyrsasl_external, cyrsasl_digest` - * **Default:** `[cyrsasl_scram_sha512_plus, cyrsasl_scram_sha512, cyrsasl_scram_sh384_plus, cyrsasl_scram_sh384, cyrsasl_scram_sha256_plus, cyrsasl_scram_sha256, cyrsasl_scram_sha224_plus, cyrsasl_scram_sha224, cyrsasl_scram_sha1_plus, cyrsasl_scram_sha1, cyrsasl_plain, cyrsasl_anonymous, cyrsasl_oauth]` - - Please note that configuring the `sasl_mechanisms` parameter will take precedence over the default list. - Should more than one parameter be configured in the list of `sasl_mechanisms`, the order of how they are listed in the config will be taken as the order in which they are advertised. - - * **Examples:** `[cyrsasl_plain]`, `[cyrsasl_scram_sha256_plus, cyrsasl_anonymous]` - * **Deprecations:** Please note that the DIGEST-MD5 authentication method `cyrsasl_digest` is deprecated and will be removed in the next release. - -* **extauth_instances** (local) - * **Description:** Specifies a number of workers serving external authentication requests. - * **Syntax:** `{extauth_instances, Count}.` - * **Default:** 1 - -#### Authentication backend capabilities - -The table below shows the supported SASL mechanisms for each authentication backend module. - -| | cyrsasl
plain | cyrsasl
digest | cyrsasl
scram_sha* | cyrsasl
anonymous | cyrsasl
external | -|-----------|:----------------:|:-----------------:|:---------------------:|:--------------------:|:-------------------:| -| internal | x | x | x | | | -| rdbms | x | x | x | | | -| external | x | | | | | -| anonymous | x | x | x | x | | -| ldap | x | | | | x | -| jwt | x | | | | | -| riak | x | x | x | | | -| http | x | x | x | | | -| pki | | | | | x | - -`cyrsasl_oauth` does not use the auth backends at all and requires the `mod_auth_token` module enabled instead. - -`cyrsasl_digest` is deprecated and will be removed in the next release. - -### Outgoing connections setup - -* **outgoing_pools** (local) - * **Description** Declares pools for outgoing connections. - See more in [outgoing connections configuration](./advanced-configuration/outgoing-connections.md) - * **Syntax** `[{Type, Host, Tag, PoolOptions, ConnectionOptions}]` - * **Example**: -```erlang - [{riak, global, default, [], [{address, "127.0.0.1"}]}, - {http, host, auth, [], [{server, "127.0.0.1"}]} -``` - -### RDBMS connection setup - -RDBMS connection pools are set using [outgoing connections configuration](./advanced-configuration/outgoing-connections.md). -There are some additional options that influence all database connections in the server: - -* **pgsql_users_number_estimate** (local) - * **Description:** PostgreSQL's internal structure can make the row counting slow. - Enabling this option uses alternative query to `SELECT COUNT`, that might be not as accurate but is always fast. - * **Syntax:** `{pgsql_users_number_estimate, false | true}` - * **Default:** `false` - -* **rdbms_server_type** (local) - * **Description:** Specifies RDBMS type. Some modules may optimise queries for certain DBs (e.g. `mod_mam_rdbms_user` uses different query for `mssql`). - * **Syntax:** `{rdbms_server_type, Type}` - * **Supported values:** `mssql`, `pgsql` or `undefined` - * **Default:** `undefined` - -### Traffic shapers - -* **shaper** (multi, global) - * **Description:** Define a class of a shaper which is a mechanism for limiting traffic to prevent DoS attack or calming down too noisy clients. - * **Syntax:** `{shaper, AtomName, {maxrate, BytesPerSecond}}` - -* **max_fsm_queue** (local) - * **Description:** When enabled, will terminate certain processes (e.g. client handlers) that exceed message limit, to prevent resource exhaustion. - This option is set for C2S, outgoing S2S and component connections and can be overridden for particular `ejabberd_s2s` or `ejabberd_service` listeners in their configurations. - **Use with caution!** - * **Syntax:** `{max_fsm_queue, MaxFsmQueueLength}` - -### Access control lists - -* **acl** (multi) - * **Description:** Define access control list class. - * **Syntax:** `{acl, AtomName, Definition}` - * **Regexp format:** Syntax for `_regexp` can be found in [Erlang documentation](http://www.erlang.org/doc/man/re.html) - it's based on AWK syntax. For `_glob` use `sh` regexp syntax. - * **Valid definitions:** - * `all` - * `{user, U}` - check if the username equals `U` and the domain either equals the one specified by the module executing the check or (if the module does a `global` check) is on the served domains list (`hosts` option) - * `{user, U, S}` - check if the username equals `U` and the domain equals `S` - * `{server, S}` - check if the domain equals `S` - * `{resource, R}` - check if the resource equals `R` - * `{user_regexp, UR}` - perform a regular expression `UR` check on the username and check the server name like in `user` - * `{user_regexp, UR, S}` - perform a regular expression `UR` check on the username and check if the domain equals `S` - * `{server_regexp, SR}` - perform a regular expression `SR` check on a domain - * `{resource_regexp, RR}` - perform a regular expression `SR` check on a resource - * `{node_regexp, UR, SR}` - username must match `UR` and domain must match `SR` - * `{user_glob, UR}` - like `_regexp` variant but with `sh` syntax - * `{server_glob, UR}` - like `_regexp` variant but with `sh` syntax - * `{resource_glob, UR}` - like `_regexp` variant but with `sh` syntax - * `{node_glob, UR}` - like `_regexp` variant but with `sh` syntax - -### Access rules - -* **access** (multi, global) - * **Description:** Define an access rule for internal checks. The configuration file contains all built-in ones with proper comments. - * **Syntax:** `{access, AtomName, [{Value, AclName}]}` - -* **registration_timeout** (local) - * **Description:** Limits the registration frequency from a single IP. Valid values are `infinity` or a number of seconds. - -* **mongooseimctl_access_commands** (local) - * **Description:** Defines access rules to chosen `mongooseimctl` commands. - * **Syntax:** `{mongooseimctl_access_commands, [Rule1, Rule2, ...]}.` - * **Rule syntax:** `{AccessRule, Commands, ArgumentRestrictions}` - * `AccessRule` - A name of a rule defined with `acl` config key. - * `Commands` - A list of command names (e.g. `["restart", "stop"]`) or `all`. - * `ArgumentRestrictions` - A list of permitted argument values (e.g. `[{domain, "localhost"}]`). - * **Example:** `{mongooseimctl_access_commands, [{local, ["join_cluster"], [{node, "mongooseim@prime"}]}]}.` - -### Default language - -* **language** (global) - * **Description:** Default language for messages sent by the server to users. You can get a full list of supported codes by executing `cd [MongooseIM root] ; ls priv/*.msg | awk '{split($0,a,"/"); split(a[4],b,"."); print b[1]}'` (`en` is not listed there) - * **Default:** `en` - -### Miscellaneous - -* **all_metrics_are_global** (local) - * **Description:** When enabled, all per-host metrics are merged into global equivalents. It means it is no longer possible to view individual host1, host2, host3, ... metrics, only sums are available. This option significantly reduces CPU and (especially) memory footprint in setups with exceptionally many domains (thousands, tens of thousands). - * **Default:** `false` - -* **routing_modules** (local) - * **Description:** Provides an ordered list of modules used for routing messages. If one of the modules accepts packet for processing, the remaining ones are not called. - * **Syntax:** `{routing_modules, ModulesList}.` - * **Valid modules:** - * `mongoose_router_global` - Calls `filter_packet` hook. - * `mongoose_router_localdomain` - Routes packets addressed to a domain supported by the local cluster. - * `mongoose_router_external_localnode` - Delivers packet to an XMPP component connected to the node, which processes the request. - * `mongoose_router_external` - Delivers packet to an XMPP component connected to the local cluster. - * `ejabberd_s2s` - Forwards a packet to another XMPP cluster over XMPP Federation. - * **Default:** `[mongoose_router_global, mongoose_router_localdomain, mongoose_router_external_localnode, mongoose_router_external, ejabberd_s2s]` - * **Example:** `{routing_modules, [mongoose_router_global, mongoose_router_localdomain]}.` - -* **replaced_wait_timeout** (local) - * **Description:** When a user session is replaced (due to a full JID conflict) by a new one, this parameter specifies the time MongooseIM waits for the old sessions to close. The default value is sufficient in most cases. If you observe `replaced_wait_timeout` warning in logs, then most probably the old sessions are frozen for some reason and it should be investigated. - * **Syntax:** `{replaced_wait_timeout, TimeInMilliseconds}` - * **Default:** `2000` - -* **cowboy_server_name** (local) - * **Description:** If configured, replaces Cowboy's default name returned in the `server` HTTP response header. It may be used for extra security, as it makes it harder for the malicious user to learn what HTTP software is running under a specific port. This option applies to **all** listeners started by the `ejabberd_cowboy` module. - * **Syntax:** `{cowboy_server_name, NewName}` - * **Default:** no value, i.e. `Cowboy` is used as a header value - * **Example:** `{cowboy_server_name, "Apache"}` - -* **hide_service_name** (local) - * **Description:** According to RFC 6210, even when a client sends invalid data after opening a connection, the server must open an XML stream and return a stream error anyway. For extra security, this option may be enabled. It changes MIM behaviour to simply close the connection without any errors returned (effectively hiding the server's identity). - * **Syntax:** `{hide_service_name, Boolean}` - * **Default:** `false` - * **Example:** `{hide_service_name, true}` - -### Modules - -For a specific configuration, please refer to [Modules](advanced-configuration/Modules.md) page. - -* **modules** (local) - * **Description:** List of enabled modules with their options. - -### Services - -For a specific configuration, please refer to [Services](advanced-configuration/Services.md) page. - -* **services** (local) - * **Description:** List of enabled services with their options. +## Option scope -### Per-domain configuration +Each configuration option has its **scope**, which is one of the following: -The `host_config` allows configuring most options separately for specific domains served by the cluster. It is best to put `host_config` tuple right after the global section it overrides/complements or even at the end of `mongooseim.cfg`. +* **local** - configured separately for each node in the cluster - each node can have a different value, +* **global** - configured for the entire cluster - all nodes share the same value. -* **host_config** (multi, local) - * **Syntax:** `{host_config, Domain, [ {{add, modules}, [{mod_some, Opts}]}, {access, c2s, [{deny, local}]}, ... ]}.` +The scope of each option is defined in the documentation above - either at the top of the section page or for each option individually. # vm.args @@ -376,8 +58,6 @@ A file with Erlang application configuration. To configure it, go to `[MongooseI By default only the following applications can be found there: * `logger` - check [Logger's documentation](https://erlang.org/doc/man/logger.html) for more information. -* `ejabberd` - * `config` (default: `"etc/mongooseim.cfg"`) - path to MongooseIM config file. * `ssl` * `session_lifetime` (default specified in the file: `600` seconds) - This parameter says for how long should the ssl session remain in the cache for further re-use, should `ssl session resumption` happen. diff --git a/doc/Basic-configuration.md b/doc/Basic-configuration.md deleted file mode 100644 index 9adee73abe3..00000000000 --- a/doc/Basic-configuration.md +++ /dev/null @@ -1,77 +0,0 @@ -## vars.config - -The `vars.config` file can be found under the `[MongooseIM root]/rel/` directory. Change the most important settings quickly and without touching `mongooseim.cfg` or `vm.args` files directly. Recommended for basic usage. - -The file contains erlang tuples terminated with period ('.'). For users not familiar with Erlang syntax, here is a quick cheat sheet: - -* Each config option (key and value) is a tuple. Tuples are (Erlangers, forgive us the simplification) other Erlang terms separated with commas and enclosed in curly brackets ({}). -* Tuples (at least the top-level ones) in `vars.config` are always 2-element. -* The first element of each tuple is the name (Erlang atom). The file contains all possible keys so you will never have to change the first element or add new tuple. -* The second element is a string (in quotes: ""). Remeber to escape quote with backslash ('\') if you ever use one inside a string. -* A value can be a list. Erlang lists are other Erlang terms separated with commas and enclosed in square brackets ([]). -* If a value is terminated with a period (e.g. `acl`) or a comma (e.g. `mod_privacy`), don't change it. -* Config options that are "features", can be disabled by using empty string as the value or prepending the actual value with '%' ('%' starts one-line comment in Erlang, like '//' in C or Java). - -### Options - -There are 2 types of options: params and features. Unlike params, features can be disabled. - -* **hosts** - param - * **Description:** List of supported XMPP domains. Usually it's best to stick with just one or two domains. - * **Warning:** extension modules and database backends will be started separately for every domain, so when increasing the number of domains please make sure you have enough resources available (e.g. connection limit set in DBMS). - * **Example:** `"[\"localhost\", \"domain2\"]"` - -* **host_config** - feature - * **Description:** List of specific options for chosen XMPP domains. They will override the global ones. Allowed keys are marked on the [Advanced configuration](Advanced-configuration.md) page - * **Syntax:** `"{host_config, \"overridden-domain\", [{key, value}]}."` - * **Example:** `"{host_config, \"localhost2\", [{auth_method, anonymous}, {allow_multiple_connections, false}]}." ` - -* **auth_ldap** - feature - * **Description:** Put [[LDAP configuration]] here. - -* **all_metrics_are_global** - param - * **Description:** When set to 'true', per-host metrics are replaced with global equivalents. For more info consult [Advanced configuration](Advanced-configuration.md) - -* **s2s_addr** - feature - * **Description:** Override DNS lookup for specific non-local XMPP domain and use predefined server IP and port for S2S connection (server-to-server). - * **Syntax:** `"{ {s2s_addr, \"some-domain\"}, { {10,20,30,40}, 7890 } }."` - -* **s2s_default_policy** - param - * **Description:** Default policy for new S2S (server-to-server) **both incoming and outgoing** connection to/from unknown remote server. - -* **outgoing_s2s_port** - param - * **Description:** Port to be used locally when establishing outgoing S2S (server-to-server) connection. Default is 5269. - -* **node_name** - param - * **Description:** Erlang node name. Should be changed when deploying MongooseIM cluster, otherwise not relevant. - -* **c2s_port** - param - * **Description:** Port to listen on for standard incoming XMPP connections. Default is 5222. - -* **s2s_port** - param - * **Description:** Port to listen on for incoming S2S (server-to-server) connections. Default is 5269. - -* **cowboy_port** - param - * **Description:** Port for all HTTP-based MongooseIM services like BOSH or Websockets. Default is 5280. - -* **mod_last, mod_offline, mod_privacy, mod_private, mod_roster, mod_vcard, mod_snmp** - feature - * **Description:** Allows enabling/disabling specific modules and configuring them. Read more on the [Modules](advanced-configuration/Modules.md) page. - -* **sm_backend** - param - * **Description:** Defines the session management module (session storage backend). - * **Valid values:** `mnesia`, `redis` - -* **auth_method** - param - * **Description:** Chooses authentication modules. Can be either a single module or a list of modules to be tried in sequence until one of them succeeds. - * **Valid values:** `internal`, `rdbms`, `external`, `anonymous`, `ldap`, `riak` - * `internal` means Mnesia-based - * **Examples:** `"rdbms"`, `"[internal, anonymous]"` - -* **ext_auth_script** - feature - * **Description:** Path to the authentication script used by `external` auth module. Script API specification can be found in [[External authentication script]]. - -* **tls_config** - feature - * **Description:** Allows enabling the StartTLS feature in client-to-server XMPP connections. Just remove '%%' prefix and set path to the PEM file containing certificate and (not protected by password) private key in X.509 format. - -* **zlib** - feature - * **Description:** Controls the zlib compression feature for client-to-server XMPP connections. To enable it, remove '%%' prefix. You can define a limit for output data size to prevent killing the server with [zlib bomb](https://xmpp.org/community/security-notices/uncontrolled-resource-consumption-with-highly-compressed-xmpp-stanzas.html). Set it to `unlimited` to bypass the check (**not recommended**). diff --git a/doc/advanced-configuration/Listener-modules.md b/doc/advanced-configuration/Listener-modules.md index ff2654d9114..2eac2a82b57 100644 --- a/doc/advanced-configuration/Listener-modules.md +++ b/doc/advanced-configuration/Listener-modules.md @@ -1,130 +1,3 @@ -Some of the MongooseIM modules are specialised in handling user connections. -They can be used in the `listen` clause in the `mongooseim.cfg` file. -See this section for their description and configuration options. - -Options described with a value type (e.g. string, integer) are key-value tuples. -Other options are enabled by being added as atoms. -E.g. a tuple option might be: `{access, c2s}` while other options are added as: `starttls`. - -## Client-to-server (C2S): `ejabberd_c2s` - -Handles pure XMPP client-to-server (C2S) connections, relies on `ejabberd_listener` for listening. -It processes the incoming data from the user client, while the data reception and parsing is executed with `ejabberd_receiver`'s help. -You only need to declare running `ejabberd_c2s`, to have the other 2 modules started and used. - -**Default port:** 5222 - -### Configuration - -#### General - -* `access` (atom, default: `c2s`) - Access Rule to use for C2S connections. -* `c2s_shaper` (atom, default: `none`) - Connection shaper to use for incoming C2S stanzas. -* `max_stanza_size` (positive integer, default: infinity) - Maximum allowed incoming stanza size. - **Warning:** this limit is checked **after** the input data parsing, so it does not apply to the input data size itself. -* `backlog` (positive integer, default 100) - overrides the default TCP backlog value -* `max_fsm_queue` (positive integer, the value of this option set global) - message queue limit to prevent resource exhaustion; overrides the global value of this option -* `hibernate_after` (integer, default: 0) - Time in milliseconds after which a client process spawned by this listener will hibernate. - Hibernation greatly reduces memory consumption of client processes, but *may* result in increased CPU consumption if a client is used *very* frequently. - The default, recommended value of 0 means that the client processes will hibernate at every opportunity. -* `acceptors_num` (integer, default: 100) - For TCP-based listeners: the number of processes accepting new connections on the listening socket. -* `zlib` (atom or a positive integer, default: disabled) - Enables ZLIB support, the integer value is a limit for a decompressed output size (to prevent successful [ZLIB bomb attack](https://xmpp.org/community/security-notices/uncontrolled-resource-consumption-with-highly-compressed-xmpp-stanzas.html)); the limit can be disabled with an atom 'unlimited'. -* `proxy_protocol` (boolean, default: `false`) - For TCP-based listeners: when set to `true`, [Proxy Protocol](https://www.haproxy.com/blog/haproxy/proxy-protocol/) is enabled and each connecting client has to provide a proxy header. Use only with a proxy (or a load balancer) to allow it to provide the connection details (including the source IP address) of the original client. Versions 1 and 2 of the protocol are supported. - -#### Common TLS options - -* `starttls` (default: disabled) - Enables StartTLS support; requires `certfile`. -* `starttls_required` (default: disabled) - enforces StartTLS usage. -* `tls` (default: disabled) - When this option is set, clients must initiate a TLS session immediately after connecting, before beginning the normal XML stream. -* `tls_module` (atom, default: `fast_tls`) - Provides a TLS library to use. `fast_tls` uses OpenSSL-based NIFs, while `just_tls` uses Erlang TLS implementation provided by OTP. They are fully interchangeable, with some exceptions (`ejabberd_c2s` options supported by only one of them are explicitly described, e.g. `crlfiles`). -* `certfile` (string, default: no certfile will be used) - Path to the X509 PEM file with a certificate and a private key (not protected by a password). - If the certificate is signed by an intermediate CA, you should specify here the whole CA chain by concatenating all public keys together and appending the private key after that. -* `cafile` (string, default: no CA file will be used) - Path to the X509 PEM file with a CA chain that will be used to verify clients. Won't have any effect if `verify_peer` is not enabled. -* `verify_peer` (default: disabled) - Enforces verification of a client certificate. Requires a valid `cafile`. -* `dhfile` (string, default: no DH file will be used) - Path to the Diffie Hellman parameter file - -#### `fast_tls` - specific options - -* `ciphers` (string, default: `"TLSv1.2:TLSv1.3"`) - Cipher suites to use with StartTLS or TLS. Please refer to the [OpenSSL documentation](http://www.openssl.org/docs/man1.0.2/apps/ciphers.html) for the cipher string format. -* `protocol_options` List of OpenSSL options, the default value is `["no_sslv2", "no_sslv3", "no_tlsv1, "no_tlsv1_1"]`. You can find the mappings between supported options and actual OpenSSL flags in the `fast_tls` [source code](https://github.com/processone/fast_tls/blob/master/c_src/options.h). - -#### `just_tls` - specific options - -* `crlfiles` (list of strings, default: []) - A list of paths to Certificate Revocation Lists. - -## HTTP-based services (BOSH, WebSocket, REST): `ejabberd_cowboy` - -Manages all HTTP-based services, such as BOSH (HTTP long-polling) and WebSocket. -Unlike `ejabberd_c2s`, it doesn't use `ejabberd_receiver` or `ejabberd_listener`. - -**Default port:** 5280 - -### Configuration - -* `ip` (IP tuple, default: `{0,0,0,0}`) - IP address to bind to. -* `num_acceptors` (positive integer, default: 100) - Number of acceptors. -* `transport_options` (proplist, default: []) - Ranch-specific transport options. - See [ranch:opt()](https://ninenines.eu/docs/en/ranch/1.7/manual/ranch/#_opt). -* `protocol_options` (proplist, default: []) - Protocol configuration options for Cowboy. - See [Cowboy HTTP module docs](https://ninenines.eu/docs/en/cowboy/2.6/manual/cowboy_http/). -* `ssl` (list of ssl options, required for https, no default value) - If specified, https will be used. - Accepts all ranch_ssl options that don't take fun() parameters. - Only `certfile` and `keyfile` are mandatory. - See [ranch_ssl documentation](https://github.com/ninenines/ranch/blob/master/doc/src/manual/ranch_ssl.asciidoc) for details. A minimal usage would be as follows: - - {ssl, [ - {certfile, "priv/ssl/fake_cert.pem"}, - {keyfile, "priv/ssl/fake_key.pem"}, - ]}, - - Here, `certfile` and `keyfile` specify the certificate and private key files respectively. - If the keyfile is password-protected, one will need to specify the password with `{password, "secret"}`. - If the certificate is signed by an intermediate CA, one will probably want to specify the CA chain with `cacertfile` option. - - Note that `port`, `ip` and `max_connections` are taken from the listener config above and will be ignored if specified under `ssl`. - -* `modules` (list of tuples: `{Host, Path, Modules}`) - List of enabled HTTP-based modules. `"_"` equals any host. - * `mod_bosh` - BOSH connections handler. - Default declaration: - - `{"_", "/http-bind", mod_bosh}` - - * `mod_websockets` - Websocket connections as defined in [RFC 7395](https://tools.ietf.org/html/rfc7395). - You can pass optional parameters: - * `{timeout, Val}` (positive integer, default: infinity) - the time after which an inactive user is disconnected. - * `{ping_rate, Val}` (positive integer, default: none) - the Ping rate points to the time between pings sent by server. - By declaring this field you enable server-side pinging. - * `{max_stanza_size, Val}` (positive integer, default: infinity) - Maximum allowed incoming stanza size. - **Warning:** this limit is checked **after** the input data parsing, so it does not apply to the input data size itself. - * `{ejabberd_service, Params}` (default: []) - this enables external component connections over WebSockets. - See the [ejabberd_service](#ejabberd_service) section for more details how to configure it. - - Default declaration: - - `{"_", "/ws-xmpp", mod_websockets, []}` - - * (OBSOLETE) `mongoose_api` - REST API for accessing internal MongooseIM metrics. - Please refer to the [REST interface to metrics](../rest-api/Metrics-backend.md) page for more information. - Default declaration: - - `{"localhost", "/api", mongoose_api, [{handlers, [mongoose_api_metrics]}]}` - - * `mongoose_api_admin` - REST API for admin commands. Exposes all mongoose_commands. - It expects one optional argument: - * Credentials: `{auth, {Username, Password}}`. - If they're not provided, authorization is disabled. - - Example: - - `{"localhost", "/api", mongoose_api_admin, [{auth, {<<"ala">>, <<"makotaipsa">>}}]}` - - * `mongoose_api_client` - REST API for client side commands. - Exposes all mongoose_commands marked as "user". - - Example: - - `{"localhost", "/api/contacts/{:jid}", mongoose_api_client_contacts, []}` - ### HTTP module: `mod_cowboy` This module provides an additional routing layer on top of HTTP(s) or WS(S) protocols. @@ -153,72 +26,3 @@ Here's an example of its configuration (added to ejabberd_cowboy modules list de According to this configuration, all HTTP requests will go through the `mod_revproxy` module (see [mod_revproxy](../modules/mod_revproxy.md) for more details). As for now, all WebSocket connections with the `Sec-WebSocket-Protocol: xmpp` header, will go through the mod_websockets connection. This is the MongooseIM's regular websocket connection handler. - -## Server-to-server (S2S): `ejabberd_s2s_in` - -Handles incoming server-to-server (S2S) connections (federation). -Relies on `ejabberd_listener` and `ejabberd_receiver` just like `ejabberd_c2s`. - -**Note:** Many S2S options are configured as top-level config options and they apply to both incoming and outgoing connections. -Please refer to the [Advanced configuration](../Advanced-configuration.md) for more information. - -**Default port:** 5269 - -### Configuration - -* `shaper` (atom, default: `none`) - Connection shaper to use for incoming S2S data. -* `max_stanza_size` (positive integer, default: infinity) - Maximum allowed incoming stanza size. - **Warning:** this limit is checked **after** input data parsing, so it does not apply to the input data size itself. -* `protocol_options` List of supported SSL protocols, default "no_sslv3". - It also accepts "no_tlsv1" and "no_tlsv1_1" -* `dhfile` (string, default: no DH file will be used) - Path to the Diffie Hellman parameter file -* `ciphers` (string, default: `"TLSv1.2:TLSv1.3"`) - cipher suites to use with StartTLS. -* `cafile` (string, default: no CA file will be used) - Path to the X509 PEM file with a CA chain that will be used to verify clients (here server initiating S2S connection). - -## XMPP components: `ejabberd_service` - -Interface for XMPP components ([XEP-0114: Jabber Component Protocol](http://xmpp.org/extensions/xep-0114.html)), enabling communication between servers and "external" components over the XMPP network. - -**Default port:** 8888 - -### Configuration - -* `access` (atom, default: `all`) - Access Rule to use for incoming component connections. -* `password` (string) - the service is protected with this password -* `shaper_rule` (atom, default: `none`) - Connection shaper to use for incoming component traffic. -* `service_check_from` (boolean, default: `true`) - Checks whether the server should verify the "from" field in stanzas from the component -* `max_fsm_queue` (positive integer, the value of this option set global) - message queue limit to prevent resource exhaustion; overrides the global value of this option -* `hidden_components` (boolean, default: `false`) - All components connected to an endpoint with this option enabled will be considered "hidden" (see explanation below). -* `conflict_behaviour` (`disconnect`, `kick_old`, default: `disconnect`) - If set to `kick_old`, in case of a routing conflict it stops the previous connection (see the explanation below). - -According to ([XEP-0114: Jabber Component Protocol](http://xmpp.org/extensions/xep-0114.html)) component's hostname should be given in the element. - -### Custom extension to the protocol - -In order to register a component for all virtual hosts served by the server (listed in global variable hosts), the component must add the attribute `is_subdomain="true"` to the opening stream element. -This maybe helpful if someone wants to have a single instance of a component serving multiple virtual hosts. -The `is_subdomain` attribute is optional and the default behaviour is as described in the XEP. - -### Hidden components - -Hidden components have a special flag enabled in the internal component table. -Alone, it doesn't change the server behaviour in any way, but it may be used by other modules and extensions to execute special logic. -An example would be [`mod_disco`](../modules/mod_disco.md), which may be configured to filter out hidden components from disco results, so they won't be discoverable by clients. -A reason to do so could be reduced traffic - systems with many components could return very long disco responses. -Also, some deployments would like to avoid revealing some services; not because it is a security threat (this method does not prevent clients from communicating with hidden components), but rather because they are not meant to interact with clients directly (e.g. helper components for other components). - -## Conflict behaviour - -By default, when a component tries to connect and a registration conflict occures, we drop such connection by sending: - -```xml - - - - -``` - -It makes implementing the reconnection logic difficult, because the old connection would not allow any other connections. - -By setting `{conflict_behaviour, kick_old}`, we drop any old connections registered at the same host, before accepting new ones. - diff --git a/doc/advanced-configuration/Modules.md b/doc/advanced-configuration/Modules.md index 719c937df6a..4dee0b2148b 100644 --- a/doc/advanced-configuration/Modules.md +++ b/doc/advanced-configuration/Modules.md @@ -1,21 +1,47 @@ -MongooseIM provides a wide range of pluggable and configurable modules, that implement various features including XEPs. -For instance `mod_muc` enables Multi-User Chat (group chat), `mod_mam` gives us Message Archive Management, and `mod_stream_management` is for stanza acknowledgement and stream resumption. +MongooseIM provides a wide range of pluggable and configurable modules, +that implement various features including XEPs. +For instance `mod_muc` enables Multi-User Chat (group chat), +`mod_mam` gives us Message Archive Management, +and `mod_stream_management` is for stanza acknowledgement and stream resumption. This modular architecture provides great flexibility for everyday operations and feature development. A module configuration generally looks like this: ``` - {mod_muc, [ - {host, "muc.@HOST@"}, - {access, muc}, - {access_create, muc_create} - ]}, +[modules.mod_muc] + host = "muc.@HOST@" + access = "muc" + access_create = "muc_create" ``` -## Module list +### IQ processing policies + Some of the modules feature an `iqdisc` parameter. It defines the method for handling incoming IQ stanzas. -Please refer to [[IQ handlers]] for more information. -Valid values: `no_queue`, `one_queue`, `{queues, N}`, `parallel`. Default: `one_queue`. + +The server may use one of the following strategies to handle incoming IQ stanzas: + +### `modules.*.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, or `"parallel"` +* **Example:** `iqdisc.type = "one_queue"` + +**Note:** In the `"queues"` case alone, the following key becomes mandatory: + +##### `modules.*.iqdisc.workers` +* **Syntax:** positive integer +* **Example:** `iqdisc.workers = 50` + +Their semantics works as follow: + +* `no_queue` registers a new IQ handler, which will be called in the + context of the process serving the connection on which the IQ arrives. +* `one_queue` spawns a new process by which the incoming IQ stanzas will be handled. +* `queues` spawns **N** worker processes, as provided by the `iqdisc.workers` key. + Every incoming stanza will be then handled by one of those processes. +* `parallel` registers the handler without spawning any process: + a new process will be spawned in place, for each incoming stanza. + + +## Module list ### [mod_adhoc](../modules/mod_adhoc.md) Implements [XEP-0050: Ad-Hoc Commands](http://xmpp.org/extensions/xep-0050.html) for advertising and executing application-specific commands, such as those related to a configuration workflow, using [XEP-0004: Data Forms](http://xmpp.org/extensions/xep-0004.html) in order to structure the information exchange. diff --git a/doc/advanced-configuration/Services.md b/doc/advanced-configuration/Services.md index bf3b9db5b6d..7dc9522b37b 100644 --- a/doc/advanced-configuration/Services.md +++ b/doc/advanced-configuration/Services.md @@ -1,80 +1,104 @@ Some functionalities in MongooseIM are provided by "services". -A service is similar to a module, but while a module is started for every virtual host and may have global or host-specific configuration, a service is started only once with global configuration. -Service configuration is similar to a module configuration, e.g.: +A service is similar to a module, but while a module is started for every virtual +host and may have global or host-specific configuration, a service is started +only once with global configuration. +Currently, only two modules are categorised as "service providers". +Eventually the modules which are not host-specific will be refactored to be services. + +* **Scope:** global +* **Syntax:** Both services are specified in their own sections, either +`[services.service_admin_extra]` or `[services.service_mongoose_system_metrics]`. +* **Default:** None - each service needs to be enabled explicitly. +Typical services are already specified in the example configuration file. +* **Example:** A configuration of the `service_admin_extra` service. + ``` -{services, [ - {service_admin_extra, [{submods, [node, accounts, sessions]}]}, - {service_mongoose_system_metrics, [report, - {intial_report, 300000}, - {periodic_report, 108000000}]} - ] -}. +[services.service_admin_extra] + submods = ["node", "account", "sessions", "vcard", "gdpr", "upload", "roster", + "last", "private", "stanza", "stats"] ``` +### service_admin_extra -## Service list +#### `services.service_admin_extra.submods` +* **Syntax:** Array of strings representing function groups added by `service_admin_extra`. +* **Default:** All submodules: `["node", "account", "sessions", "vcard", "gdpr", + "upload", "roster", "last", "private", "stanza", "stats"]` +* **Example:** `submods = ["stats", "gdpr"]` -Currently, only two modules are categorised as a "service provider". -Eventually the modules which are not host-specific will be refactored to be services. +This service provides additional commands to the mongooseimctl script. +They are bundled in the following groups: -### service_admin_extra - -Provides additional commands to mongooseimctl script. - -#### Options -* `submods` (default: all submodules): List of function groups added by `service_admin_extra`. Allowed elements: - * `accounts`: Adds `change_password`, `check_password_hash`, `delete_old_users`, `delete_old_users_vhost`, `ban_account`, `num_active_users`, `check_account`, `check_password` - * `last`: Adds `set_last` - * `node`: Adds `load_config`, `get_cookie`, `remove_node` - * `private`: Adds `private_get`, `private_set` - * `roster`: Adds `add_rosteritem`, `delete_rosteritem`, `process_rosteritems`, `get_roster`, `push_roster`, `push_roster_all`, `push_roster_alltoall` - * `sessions`: Adds `num_resources`, `resource_num`, `kick_session`, `status_num_host`, `status_num`, `status_list_host`, `status_list`, `connected_users_info`, `connected_users_vhost`, `user_sessions_info`, `set_presence` - * `stanza`: Adds `send_message_chat`, `send_message_headline`, `send_stanza_c2s` - * `stats`: Adds `stats`, `stats_host` - * `vcard`: Adds `get_vcard`, `get_vcard2`, `get_vcard2_multi`, `set_vcard`, `set_vcard2`, `set_vcard2_multi` - * `gdpr`: Adds `retrieve_personal_data` - * `upload` : Adds `http_upload` - -#### Example configuration -` {service_admin_extra, [{submods, [node, accounts, sessions]}]} ` +* `accounts`: Adds `change_password`, `check_password_hash`, `delete_old_users`, + `delete_old_users_vhost`, `ban_account`, `num_active_users`, `check_account`, + `check_password` +* `last`: Adds `set_last` +* `node`: Adds `load_config`, `get_cookie`, `remove_node` +* `private`: Adds `private_get`, `private_set` +* `roster`: Adds `add_rosteritem`, `delete_rosteritem`, `process_rosteritems`, + `get_roster`, `push_roster`, `push_roster_all`, `push_roster_alltoall` +* `sessions`: Adds `num_resources`, `resource_num`, `kick_session`, `status_num_host`, + `status_num`, `status_list_host`, `status_list`, `connected_users_info`, + `connected_users_vhost`, `user_sessions_info`, `set_presence` +* `stanza`: Adds `send_message_chat`, `send_message_headline`, `send_stanza_c2s` +* `stats`: Adds `stats`, `stats_host` +* `vcard`: Adds `get_vcard`, `get_vcard2`, `get_vcard2_multi`, `set_vcard`, + `set_vcard2`, `set_vcard2_multi` +* `gdpr`: Adds `retrieve_personal_data` +* `upload` : Adds `http_upload` ### service_mongoose_system_metrics MongooseIM system metrics are being gathered to analyse the trends and needs of our users, improve MongooseIM, and get to know where to focus our efforts. See [System Metrics Privacy Policy](../operation-and-maintenance/System-Metrics-Privacy-Policy.md) for more details. -#### Options -* `report` (default: disabled) - Explicit acknowledgement that the metrics are gathered and reported. -Enabling this option is silencing the notification reminder that metrics are gathered. -* `no_report` (default: disabled) - When this option is set, System Metrics Service is not started and metrics are not collected. -Having this option enabled, stops the notification warning that the functionality is not being used. -* `intial_report`: - * **Description:** Time delay counted when the service is started after which the first metrics report is created and sent. - * **Syntax:** `{initial_report, Delay}` - * **Default:** 300000ms (5min). - * **Example:** `{intial_report, 300000}` -* `periodic_report`: - * **Description:** Time delay for a periodic update report to be created and sent. - * **Syntax:**`{periodic_report, Delay}` - * **Default:** 108000000ms (3h) - * **Example:** `{periodic_report, 108000000}` -* `tracking_id`: - * **Description:** Tracking ID to forward the reported metrics so that they can be viewed in the Google Analytics dashboard. - * **Syntax:**`{tracking_id, TrackingID}` - * **Default:** disabled - * **Example:** `{tracking_id, UA-123456789}` - -Removing the `service_mongoose_system_metrics` entry from list of services will result in the service not being started. +#### `services.service_mongoose_system_metrics.report` +* **Syntax:** boolean +* **Default:** not specified +* **Example:** `report = true` + +Explicit acknowledgement that the metrics are gathered and reported. +When this option is not specified, the reports are gathered and a notification +appears in logs on startup. +Enabling this option silences the notification reminder that metrics are gathered. +When this option is set to `false`, System Metrics Service is not started and metrics are not collected. + +#### `services.service_mongoose_system_metrics.intial_report` +* **Syntax:** non-negative integer +* **Default:** `300_000` (milliseconds - 5 minutes). +* **Example:** `intial_report = 300_000` + +Time delay counted when the service is started after which the first metrics report is created and sent. + +#### `services.service_mongoose_system_metrics.periodic_report` +* **Syntax:** non-negative integer +* **Default:** `108_000_000` (milliseconds - 3 hours) +* **Example:** `periodic_report = 108_000_000` + +Time delay for a periodic update report to be created and sent. + +#### `services.service_mongoose_system_metrics.tracking_id`: +* **Syntax:** string +* **Default:** no default. +* **Example:** `tracking_id = "UA-123456789"` + +Tracking ID to forward the reported metrics so that they can be viewed in the Google Analytics dashboard. + +Removing the `services.service_mongoose_system_metrics` entry will result in the service not being started. Metrics will not be collected and shared. It will generate a notification that the feature is not being used. The notification can be silenced by setting the `no_report` option explicitly. -#### Example configuration +### Example configuration + +``` +[services.service_admin_extra] + submods = ["node", "account", "sessions", "vcard", "gdpr", "upload", "roster", + "last", "private", "stanza", "stats"] + +[services.service_mongoose_system_metrics] + report = true + initial_report = 300_000 + periodic_report = 108_000_000 + tracking_id = "UA-123456789" ``` -{service_mongoose_system_metrics, [ - report, - {intial_report, 300000}, - {periodic_report, 108000000} - ] -} -``` \ No newline at end of file diff --git a/doc/advanced-configuration/access.md b/doc/advanced-configuration/access.md new file mode 100644 index 00000000000..58d3e4a55de --- /dev/null +++ b/doc/advanced-configuration/access.md @@ -0,0 +1,226 @@ +The `access` section is used to define **access rules** which return specific values for specific access classes. + +* **Scope:** global +* **Syntax:** each access rule is a key-value pair, where: + * Key is the name of the rule, + * Value is a TOML array of rule clauses - TOML tables, whose format is described below. +* **Default:** no default - each access rule needs to be specified explicitly. +* **Example:** see the [examples](#rule-examples) below. + +## Access rule clauses + +Whenever a rule is checked to obtain the resulting value for a user, the clauses are traversed one by one until a matching one is found or the list is exhausted (in which case the special value `deny` is returned). + +Each clause has to contain the following keys: + +### `access.*.acl` + +* **Syntax:** string +* **Example:** `acl = "local"` + +The access class defined in the `acl` section. The user is matched against it. The special name `all` is a catch-all value that matches any user. If the class does not exist, the clause does not match (there is no error). + +### `access.*.value` + +* **Syntax:** string or integer +* **Example:** `value = "allow"` + +For rules determining access, the value will be `"allow"` or `"deny"`. For other rules it can be an integer or a string. + +## Rule examples + +The following access rules are already defined in the example configuration file. + +### C2S Access + +The `c2s` rule is used to allow/deny the users to establish C2S connections: + +```toml + c2s = [ + {acl = "blocked", value = "deny"}, + {acl = "all", value = "allow"} + ] +``` + +It has the following logic: + +* if the access class is `blocked`, the returned value is `"deny"`, +* otherwise, the returned value is `"allow"`. + +The `blocked` access class can be defined in the `acl` section and match blacklisted users. + +For this rule to take effect, it needs to be referenced in the options of a [C2S listener](listen.md#listenc2saccess). + +### C2S Shaper + +The `c2s_shaper` rule is used to determine the shaper used to limit the incoming traffic on C2S connections: + +```toml + c2s_shaper = [ + {acl = "admin", value = "none"}, + {acl = "all", value = "normal"} + ] +``` + +It has the following logic: + +* if the access class is `admin`, the returned value is `"none"`, +* otherwise, the returned value is `"normal"`. + +The `admin` access class can be defined in the `acl` to specify admin users who will bypass the `normal` shaper. + +For this rule to take effect, it needs to be referenced in the options of a [C2S listener](listen.md#listenc2sshaper). + +### S2S Shaper + +The `s2s_shaper` rule is used to determine the shaper used to limit the incoming traffic on C2S connections: + +```toml + s2s_shaper = [ + {acl = "all", value = "fast"} + ] +``` + +It assigns the `fast` shaper to all S2S connections. + +For this rule to take effect, it needs to be referenced in the options of an [S2S listener](listen.md#listens2sshaper). + +### MUC + +The following rules manage the permissions of MUC operations: + +```toml + muc_admin = [ + {acl = "admin", value = "allow"} + ] + + muc_create = [ + {acl = "local", value = "allow"} + ] + + muc = [ + {acl = "all", value = "allow"} + ] +``` + +They are referenced in the options of the [`mod_muc`](../modules/mod_muc.md) module. + +### Registration + +This rule manages the permissions to create new users with `mod_register`. + +```toml + register = [ + {acl = "all", value = "allow"} + ] +``` + +It needs to be referenced in the options of the [`mod_register`](../modules/mod_register.md) module. + +### MAM permissions + +These rules set the permissions for MAM operations triggered by IQ stanzas and handled by the [`mod_mam`](../modules/mod_mam.md) module. + +```toml + mam_set_prefs = [ + {acl = "all", value = "default"} + ] + + mam_get_prefs = [ + {acl = "all", value = "default"} + ] + + mam_lookup_messages = [ + {acl = "all", value = "default"} + ] +``` + +They can return `"allow"`, `"deny"` or `"default"`. +The last value uses the default setting for the operation, which is to allow the operation when the sender and recipient JID's are the same. + +### MAM shapers + +These rules limit the rate of MAM operations triggered by IQ stanzas. + +```toml + mam_set_prefs_shaper = [ + {acl = "all", value = "mam_shaper"} + ] + + mam_get_prefs_shaper = [ + {acl = "all", value = "mam_shaper"} + ] + + mam_lookup_messages_shaper = [ + {acl = "all", value = "mam_shaper"} + ] + + mam_set_prefs_global_shaper = [ + {acl = "all", value = "mam_global_shaper"} + ] + + mam_get_prefs_global_shaper = [ + {acl = "all", value = "mam_global_shaper"} + ] + + mam_lookup_messages_global_shaper = [ + {acl = "all", value = "mam_global_shaper"} + ] +``` + +For each operation there are two rules: + +- `*_shaper` - limits the number of operations per user connection per second, +- `*_global_shaper` - limits the number of operations per server node per second. + +The values returned by the rules (`mam_shaper`, `mam_global_shaper`) are shaper names, which need to be defined in the [`shaper` section](shaper.md#mam-shapers). + +### Maximum number of sessions + +The `max_user_sessions` rule is used to determine the maximum number of sessions a user can open. + +```toml + max_user_sessions = [ + {acl = "all", value = 10} + ] +``` + +By default all users can open at most 10 concurrent sessions. + +### Maximum number of offline messages + +The `max_user_offline_messages` rule is used to determine the maximum number of messages that is stored for a user by the [`mod_offline` module](../modules/mod_offline.md). + +```toml + max_user_offline_messages = [ + {acl = "admin", value = 5000}, + {acl = "all", value = 100} + ] +``` + +It has the following logic: + +* if the access class is `admin`, the returned value is `5000`, +* otherwise, the returned value is `100`. + +This means that the admin users can have 5000 messages stored offline, while the others can have at most 100. +The `admin` access class can be defined in the [`acl` section](acl.md). + +## For developers + +To access the rule functionality, one has to use the `acl:match_rule/3` function. + +Given the following rule: + +```toml + register = [ + {acl = "all", value = "deny"} + ] +``` + +One can call: + +`acl:match_rule(<<"localhost">>, register, jid:make(<<"p">>, <<"localhost">>, <<>>)).` + +Which in our case will return `deny`. +If the rule is not host specific, one can use `global` instead of `<<"localhost">>`. diff --git a/doc/advanced-configuration/acl.md b/doc/advanced-configuration/acl.md index b9d046ff4eb..d8a16a44f15 100644 --- a/doc/advanced-configuration/acl.md +++ b/doc/advanced-configuration/acl.md @@ -1,138 +1,203 @@ -In MongooseIM access control is performed via Access Control Lists (ACL). -Initially, this functionality was supposed to answer the following question: "Is a given user allowed to access a particular resource?". -The answer was either `allow` or `deny`, but this functionality was extended to return any value. -For instance, we can ask how many concurrent session a given user can open, where the answer may vary depending on user affiliation - are they an admin or a normal user. +The `acl` section is used to define **access classes** to which the connecting users are assigned. These classes are used in [access rules](access.md). + +* **Scope:** global +* **Syntax:** each access class is a key-value pair, where: + * Key is the name of the access class, + * Value is a TOML array of patterns - TOML tables, whose format is described below. +* **Default:** no default - each access class needs to be specified explicitly. +* **Example:** the `local` access class is used for the regular users connecting to the [C2S listener](listen.md#client-to-server-c2s-listenc2s). + +```toml + local = [ + {user_regexp = ""} + ] +``` + +When there are multiple patterns listed, the resulting pattern will be the union of all of them. + +## Patterns + +The options listed below are used to assign the users to the access class. There are no default values for any of them. + +**Note:** the options can NOT be combined with each other unless the description says otherwise. + + +### `acl.*.match` -The functionality is defined via two top-level options: +* **Syntax:** string, one of: `"all"`, `"none"` +* **Example:** `match = "all"` -* `{acl, Name, Pattern}`: contrary to its name, it defines a user or the whole group. - For instance we can define admin users or banned users. -* `{access, Name, List}`: an ACL that has a name and list of rules and values returned by them in the case of a successfull match. +Matches either all users or none of them. The latter is useful for disabling access to some services. + +```toml + everyone = [ + {match = "all"} + ] +``` + +### `acl.*.user` + +* **Syntax:** string +* **Example:** `user = "admin"` + +Matches all JIDs with the specified user name. +The following class includes `alice@localhost`, but not `bob@localhost`: + +```toml + admin = [ + {user = "alice"}, + {user = "charlie"} + ] +``` -# ACL tuple +### `acl.*.server` - `{acl, Name, Pattern}`: the name could be any atom. - This name will be used in the access definitions. - Note that there might be many ACL entries with the same name, in this case the result will be the union of all patterns. +* **Syntax:** string +* **Example:** `server = "localhost"` -### Patterns +Matches all JIDs with the specified domain name. +The following class includes `alice@localhost`, but not `alice@xmpp.org`: -This sections describes all possible ACL patterns. -Some of them use the `re` syntax for regular expressions and some accept the `glob` syntax. +```toml + localhost_users = [ + {server = "localhost"} + ] +``` -* [`re` definitions](http://erlang.org/doc/man/re.html#id253353) -* [`glob` definitions](https://en.wikipedia.org/wiki/Glob_(programming)) +This option can be combined with `user` - only `alice@localhost` belongs to the following class: -Patterns can be defined in one of the following formats: +```toml + admin = [ + {user = "alice", server = "localhost"} + ] +``` -#### **all** -All users/JIDs match. +### `acl.*.resource` -#### **{user, username()}** +* **Syntax:** string +* **Example:** `resource = "mobile"` -All users with a given user name:
-`{user, "admin"}`: includes `admin@localhost`, `admin@xmpp.org`, etc. +Matches all JIDs with the specified resource name. +The following class includes `alice@localhost/mobile`, but not `alice@localhost/home`: -#### **{user, username(), server()}** -In this case the username and the domain have to match:
-`{user, "admin", "localhost"}`: `admin@localhost` matches, but`admin@xmpp.org` doesn't. +```toml + mobile_users = [ + {resource = "mobile"} + ] +``` -#### **{server, server()}** -All users from a given domain:
-`{server, "localhost"}`: `admin@localhost` and `pawel@localhost` match, but `pawel@xmpp.org` doesn't. +### `acl.*.user_regexp` -#### **{resource, resource()}** -All users with a matching resource:
-`{resource, "res1"}`: `admin@localhost/res1` matches, but `admin@localhost/res2` doesn't. +* **Syntax:** string, [regular expression](http://erlang.org/doc/man/re.html#regexp_syntax) +* **Example:** `user_regexp = "^user.*"` -#### **{user_regexp, username_regexp()}** -Similar to user, but the match is done against a regular expression:
-`{user_regexp, "^ad.*"}`: `admin@localhost` and `ads@localhost2` match, but `pad@localhost` doesn't. +Matches all JIDs with the user name matching the regular expression. +The following class includes `alice@localhost` and `albert@jabber.org`, but not `bob@localhost`: -#### **{user_regexp, username_regexp(), server()}** -Similar to user, the username is matched against regex, the server part has to be exactly the same:
-`{user_regexp, "^ad.*", "localhost"}`: `admin@localhost` matches, but `admin@xmpp.org` doesn't. +```toml + ae = [ + {user_regexp = "^a.*e"} + ] +``` -#### **{server_regexp, server_regexp()}** -Analogous to `user_regexp`, but regex matches on the server part of the JID:
-`{server_regexp, "^local.*"}`: `admin@localhost` matches, but `admin@relocal` doesn't. +This option can be combined with `server` - here `albert@jabber.org` is excluded: -#### **{resource_regexp, resource_regexp()}** -The same story as above, but for the resource part:
-`{resource_regexp, "res.*"}`: `admin@localhost/resource` matches, but `admin@localhost/ios` doesn't. +```toml + localhost_ae = [ + {user_regexp = "^a.*e", server = "localhost"} + ] +``` -#### **{node_regexp, username_regexp(), server_regexp()}** -Regexp matching on both the username and the domain:
-`{node_regexp, "ad.*", "loc.*"}`: `admin@localhost` matches, but `pawel@xmpp.org` doesn't. +### `acl.*.server_regexp` -#### **{user_glob, username_glob()}** -Match on the username part using glob style patterns:
-`{user_glob, "paw*"}`: `pawel@localhost` matches, but `admin@localhost` doesn't. +* **Syntax:** string, [regular expression](http://erlang.org/doc/man/re.html#regexp_syntax) +* **Example:** `server = "localhost"` -#### **{user_glob, username_glob(), server()}** -Match on the username part using glob patterns and on the server using exact match:
-`{user_glob, "paw*", "localhost"}`: `pawel@localhost` matches, but `pawel@xmpp.org` doesn't. +Matches all JIDs with the domain name matching the regular expression. +The following class includes `alice@host1`, but not `alice@xmpp.org`: -#### **{server_glob, server_glob()}** -Match on the server part using glob patterns:
-`{server_glob, "local*"}`: `pawel@localhost` matches, but `pawel@xmpp.org` doesn't. +```toml + host_users = [ + {server_regexp = "host"} + ] +``` -#### **{resource_glob, resource_glob()}** -Match on the resource part using glob patterns:
-`{resource_glob, "r*"}`: `pawel@localhost/res` matches, but `pawel@xmpp.org` doesn't. +This option can be combined with `user_regexp`, e.g. we can require the user name to contain 'a' and the domain name to start with 'a': -#### **{node_glob, username_glob(), server_glob()}** -Match on the username and the server part using glob patterns:
-`{node_glob, "paw*", "loc*"}`: `pawel@localhost/res` matches, but `pawel@xmpp.org` doesn't. +```toml + a = [ + {user_regexp = "a", server_regexp = "^a"} + ] +``` -# Access tuple +### `acl.*.resource_regexp` -Once we have our group of interest gathered in an ACL tuple, we can use them in an access control list. -To do so, we need to specify a tuple like the following: +* **Syntax:** string, [regular expression](http://erlang.org/doc/man/re.html#regexp_syntax) +* **Example:** `resource_regexp = "^res"` -`{access, name_of_access_rule, [{value(), acl_name()}]}` +Matches all JIDs with the resource name matching the regular expression. This class includes `bob@xmpp.org/res123`, but not `bob@xmpp.org/home`: -As an example we can discuss 2 rules which are present in the default config file: +```toml + digital_resources = [ + {resource_regexp = '^res\d+$'} + ] +``` -* `{access, register, [{allow, all}]}`.
-This rule is used while registering a new user. -The example above has no restrictions, but we might want to block certain JIDs, `admin` JID for instance. -To do so we need to set:
-`{acl, admin, {user, "admin"}}.`, then `{access, register, [{deny, admin}, {allow, all}]}.` -* `{access, max_user_offline_messages, [{5000, admin}, {100, all}]}`.
-This rule is used in `mod_offline`, it determines `mod_offline`'s storage limit. -For users defined in the admin ACL (for example `{acl, admin, {user, "pawel", "localhost"}}`) the size is 5000 messages, while the size for a normal user is 10. +Note the use of a literal string (single quotes) to prevent `\d` from being escaped. +### `acl.*.user_glob` -# Priority: global vs host access lists +* **Syntax:** string, [glob pattern](https://en.wikipedia.org/wiki/Glob_(programming)) +* **Example:** `user_glob = "^user.*"` -By default, both ACL and access elements are "global", so they apply to all domains available on the server. -However, using the `host_config` option, we are able to override the rules for a particular domain. +Matches all JIDs with the user name matching the pattern: +The following class includes `alice@localhost` and `albert@jabber.org`, but not `bob@localhost`: +```toml + ae_users = [ + {user_glob = "a*e*"} + ] ``` -%% Define specific Access Rules in a virtual host. -{host_config, "localhost", - [ - {access, c2s, [{allow, admin}, {deny, all}]}, - {access, register, [{deny, all}]} - ] -}. + +This option can be combined with `server` - here `albert@jabber.org` is excluded: + +```toml + localhost_ae_users = [ + {user_glob = "a*e*", server = "localhost"} + ] ``` -The global rule has the highest priority, however if the global rule ends with `{allow, all}` the host specific rule is taken into account. +### `acl.*.server_glob` -# For developers +* **Syntax:** string, [glob pattern](https://en.wikipedia.org/wiki/Glob_(programming)) +* **Example:** `server = "localhost"` -To access the ACL functionality, one has to use the `acl:match_rule/3` function. +Matches all JIDs with the domain name matching the pattern. +The following class includes `alice@host1`, but not `alice@xmpp.org`: -Given the following ACL: +```toml + localhost_users = [ + {server_glob = "host*"} + ] +``` -`{access, register, [{deny, all}]}` +This option can be combined with `user_glob`, e.g. we can require the user name to contain 'a' and the domain name to start with 'a': -One can call: +```toml + a = [ + {user_glob = "*a*", server_glob = "a*"} + ] +``` + +### `acl.*.resource_glob` -`acl:match_rule(<<"localhost">>, register, jid:make(<<"p">>, <<"localhost">>, <<>>)).` +* **Syntax:** string, [glob pattern](https://en.wikipedia.org/wiki/Glob_(programming)) +* **Example:** `resource_glob = "^res"` -Which in our case will return deny. -If the rule is not host specific, one can use `global` instead of `<<"localhost">>`. +Matches all JIDs with the resource name matching the pattern. This class includes `bob@xmpp.org/res123`, but not `bob@xmpp.org/home`: +```toml + limited_resources = [ + {resource_glob = "res???"} + ] +``` diff --git a/doc/advanced-configuration/auth.md b/doc/advanced-configuration/auth.md new file mode 100644 index 00000000000..bacad2419ad --- /dev/null +++ b/doc/advanced-configuration/auth.md @@ -0,0 +1,134 @@ +The `auth` section is used to choose and configure the **methods** which are used by MongooseIM to authenticate connecting users. +The following methods are supported: + +* `internal` - stores the user accounts in an internal Mnesia database, +* [`rdbms`](../authentication-methods/rdbms.md) - stores the user accounts in a SQL database, +* [`external`](../authentication-methods/external.md) - uses an external program to authenticate the user, +* [`anonymous`](../authentication-methods/anonymous.md) - allows anonymous connections, +* [`ldap`](../authentication-methods/ldap.md) - checks the user credentials in LDAP, +* [`jwt`](../authentication-methods/jwt.md) - authenticates the users with JSON Web Tokens, +* [`riak`](../authentication-methods/riak.md) - stores the user accounts in a Riak database, +* [`http`](../authentication-methods/http.md) - uses an external HTTP service to authenticate the user, +* [`pki`](../authentication-methods/pki.md) - uses the certificate provided by the user to authenticate them, +* [`dummy`](../authentication-methods/dummy.md) - no authentication, only for development and testing. + +To enable user connections, you need to set up at least one of the methods listed above (see `auth.methods` below). +Some methods have more complex setup procedures - the method names above are links to their descriptions, +which list their specific configuration options. The general options are described below. + +# General Options + +The options listed here are used to configure the authentication methods. + +### `auth.methods` +* **Syntax:** array of strings. Allowed values: `"internal"`, `"rdbms"`, `"external"`, `"anonymous"`, `"ldap"`, `"jwt"`, `"riak"`, `"http"`, `"pki"`, `"dummy"` +* **Default:** not set +* **Example:** `["internal", "anonymous"]` + +Specifies the methods used to authenticate connecting users. Methods from the list are queried one after another until one of them replies positively. By default there are no methods, so nobody can authenticate. + +**Warning:** Make sure that the compatible SASL mechanisms are enabled, see [capabilities](#authentication-method-capabilities). + +### `auth.sasl_mechanisms` +* **Syntax:** array of strings. Allowed values: `"scram_sha512_plus"`, `"scram_sha512"`, `"scram_sha384_plus"`, `"scram_sha384"`, `"scram_sha256_plus"`, `"scram_sha256"`, `"scram_sha224_plus"`, `"scram_sha224"`, `"scram_sha1_plus"`, `"scram_sha1"`, `"plain"`, `"anonymous"`, `"oauth"`, `"external"`, `"digest"` +* **Default:** `["scram_sha512_plus", "scram_sha512", "scram_sha384_plus", "scram_sha384", "scram_sha256_plus", "scram_sha256", "scram_sha224_plus", "scram_sha224", "scram_sha1_plus", "scram_sha1", "plain", "anonymous", "oauth"]` +* **Example:** `["external", "plain"]` + +Specifies the list of allowed SASL mechanisms, which are announced during stream negotiation and eventually enforced (users can't pick a mechanism not listed here). + +**Notes:** + +* This list is still filtered by [capabilities](#authentication-method-capabilities). +* Configuring the `sasl_mechanisms` replaces the default list entirely. +* The order in which the mechanisms are listed in the config will be taken as the order in which they are advertised. +* All `SCRAM-SHA-*` mechanisms (specified as `scram_sha*`) have their counterparts which support channel binding and are advertised as separate authentication mechanisms suffixed by `-PLUS` (specified as `scram_sha*_plus`). +* The `DIGEST-MD5` mechanism (specified as `digest`) is deprecated and will be removed in the next release. + +#### Authentication method capabilities + +The table below shows the supported SASL mechanisms (columns) for each authentication method (row). + +| | plain | digest | scram_sha* | anonymous | external | +|-----------|:-----:|:------:|:----------:|:---------:|:--------:| +| internal | x | x | x | | | +| rdbms | x | x | x | | | +| external | x | | | | | +| anonymous | x | x | x | x | | +| ldap | x | | | | x | +| jwt | x | | | | | +| riak | x | x | x | | | +| http | x | x | x | | | +| pki | | | | | x | + +### `auth.sasl_external` +* **Syntax:** list of strings, allowed values: `"standard"`, `"common_name"`, `"auth_id"` +* **Default:** `["standard"]` +* **Example:** `sasl_external = ["standard", "common_name"]` + +There are three possible ways of using the `SASL EXTERNAL` mechanism: + +* `standard` - do not accept a certificate with no `xmpp_addrs` field (default), +* `common_name` - use the `common_name` field if it is provided in the certificate, +* `auth_id` - accept a certificate without `xmpp_addrs` and use the user identity from the authentication request. + +This option allows you to list the enabled ones in the order of preference (they are tried until one succeeds or the list is exhausted). + +## Password-related options + +These options are common to the `http`, `rdbms`, `internal` and `riak` methods. + +### `auth.password.format` +* **Syntax:** string, one of: `"plain"`, `"scram"` +* **Default:** `"scram"` +* **Example:** `password.format = "plain"` + +Decide whether user passwords will be kept plain or hashed in the database. +Currently, popular XMPP clients support the SCRAM method and it is strongly recommended to use the hashed version. +The older XMPP clients can still use the `PLAIN` mechanism. + +**Note:** The `DIGEST-MD5` mechanism is not available with the `scram` password format. + +### `auth.password.hash` +* **Syntax:** list of strings, allowed values: `"sha"`, `"sha224"`, `"sha256"`, `"sha384"`, `"sha512"` +* **Default:** not set - all hash functions supported +* **Example:** `password.hash = ["sha384", "sha512"]` + +MongooseIM supports SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for SCRAM hashing. +You can use this option to limit the supported hash functions by listing them explicitly. +The value `"sha"` stands for the SHA-1 algorithm. + +### `auth.scram_iterations` +* **Syntax:** positive integer +* **Default:** 10000, as recommended in this [XEP](https://xmpp.org/extensions/xep-0438.html#pbkdf2) and this [NIST Guidelines](https://pages.nist.gov/800-63-3/sp800-63b.html#sec5) +* **Example:** `scram_iterations = 20_000` + +Hash function round count. +This is a tradeoff between latency and security. +The higher the value, the more difficult breaking the hashes is: increasing the count increases the work it requires to compute a full derivation, which effectively slows down brute-force attacks. +But it adds load on both client and server, so this parameter should be tuned as high as the business-rules allow. +Note that increasing the security of a password has a higher impact over the security of the algorithm, without impacting its load. +See more information in this [NIST guide, Appendix A.2.2](https://csrc.nist.gov/publications/detail/sp/800-132/final) + +## Example + +This minimal authentication setup uses the internal Mnesia database to store users and their passwords: + +```toml +[auth] + methods = ["internal"] +``` + +According to the [capabilities](#authentication-method-capabilities) of the `internal` method, the `PLAIN`, `DIGEST-MD5` and `SCRAM-SHA-*` mechanisms will be supported. + +However, for production systems other methods like `rdbms` are recommended, as using an external database offers easier maintenance, flexibility, scalability and configurability in a typical setup. + +# Method-specific options + +See the links below for options related to the particular methods: + +* [Anonymous method options](../authentication-methods/anonymous.md#configuration-options) +* [External method options](../authentication-methods/external.md#configuration-options) +* [LDAP method options](../authentication-methods/ldap.md#configuration-options) +* [JWT method options](../authentication-methods/jwt.md#configuration-options) +* [Riak method options](../authentication-methods/riak.md#configuration-options) +* [HTTP method options](../authentication-methods/http.md#configuration-options) diff --git a/doc/advanced-configuration/general.md b/doc/advanced-configuration/general.md new file mode 100644 index 00000000000..0caf2a03e39 --- /dev/null +++ b/doc/advanced-configuration/general.md @@ -0,0 +1,202 @@ +The `general` section contains basic settings as well as some miscellaneous options. +You can start with providing only the basic options, configuring the loglevel, a single host (XMPP domain) and setting the default server language: + +``` +[general] + loglevel = "warning" + hosts = ["my-xmpp-domain.com"] + language = "en" +``` + +All options are described below. + +# General options + +These are the basic settings that you should configure before running your MongooseIM server. + +## `general.loglevel` +* **Scope:** local +* **Syntax:** string, one of `"none"`, `"emergency"`, `"alert"`, `"critical"`, `"error"`, `"warning"`, `"notice"`, `"info"`, `"debug"`, `"all"`. +* **Default:** `"warning"` +* **Example:** `loglevel = "error"` + +Verbosity level of the logger. Values recommended for production systems are `"error"` and `"warning"`. The `"debug"` level is good for development. + +## `general.hosts` +* **Scope:** global +* **Syntax:** array of strings representing the domain names. +* **Default:** there is no default, you have to provide at least one host name. +* **Example:** `hosts = ["localhost", "domain2"]` + +Mandatory option, specifying the XMPP domains served by this cluster. + +**Warning:** Extension modules and database backends will be started separately for every domain. When increasing the number of domains, please make sure you have enough resources available (e.g. connection limit set in the DBMS). + +## `general.language` +* **Scope:** global +* **Syntax:** string representing the two-letter language code. +* **Default:** `"en"` +* **Example:** `language = "pl"` + +Default language for messages sent by the server to users. You can get a full list of supported codes by executing `cd [MongooseIM root] ; ls priv/*.msg | awk '{split($0,a,"/"); split(a[4],b,"."); print b[1]}'` (`en` is not listed there) + +# Database settings + +RDBMS connection pools are set using [outgoing connections configuration](./outgoing-connections.md). +There are some additional options that influence all database connections in the server: + +## `general.rdbms_server_type` +* **Scope:** local +* **Syntax:** string, `"mssql"` or `"pgsql"` +* **Default:** not set +* **Example:** `rdbms_server_type = "mssql"` + +When using MSSQL or PostgreSQL databases, this option allows MongooseIM to optimize some queries for these DBs (e.g. `mod_mam_rdbms_user` uses different queries for `mssql`). + +## `general.pgsql_users_number_estimate` +* **Scope:** local +* **Syntax:** boolean +* **Default:** false +* **Example:** `pgsql_users_number_estimate = true` + +PostgreSQL's internal structure can make row counting slow. +Enabling this option uses an alternative query instead of `SELECT COUNT`, that might be not as accurate, but is always fast. + +# Access management + +User access rules are configured mainly in the [`acl`](acl.md) and [`access`](access.md) sections. Here you can find some additional options. + +## `general.mongooseimctl_access_commands` +* **Scope:** local +* **Syntax:** TOML table, whose **keys** are the names of the access rules defined in the [`access`](access.md) config section and **values** specify allowed administration commands. Each value is a table with the following nested options: + * `commands`: mandatory, a list of strings representing the allowed commands, or the string `"all"` + * `argument_restrictions`: optional, a table whose keys are the argument names and the values are strings representing the allowed values +* **Default:** not set + +By default all admin operations are permitted with the `mongooseimctl` command without authentication. You can change that by setting this option for a specific access rule. When the rule returns the value `"allow"`, the user is permitted to use the specified commands with the optional restrictions. + +**Example 1.** Allow administrators to execute all commands without any restrictions: + +``` + [general.mongooseimctl_access_commands.admin] + commands = "all" +``` + +The `admin` rule needs to be defined in the `access` section. + +**Example 2.** Allow local users to execute the `join_cluster` command, but only if the `node` argument is equal to `mongooseim@prime`: + +``` + [general.mongooseimctl_access_commands.local] + commands = ["join_cluster"] + argument_restrictions.node = "mongooseim@prime" +``` + +The `local` rule needs to be defined in the `access` section. + +# Security + +Here you can find some additional options related to system security. + +## `general.registration_timeout` +* **Scope:** local +* **Syntax:** the string `"infinity"` or a number of seconds (positive integer) +* **Default:** `600` +* **Example:** `registration_timeout = "infinity"` + +Limits the registration frequency from a single IP address. The special value `infinity` means no limit. + +## `general.hide_service_name` +* **Scope:** local +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `hide_service_name = true` + +According to RFC 6210, even when a client sends invalid data after opening a connection, the server must open an XML stream and return a stream error anyway. For extra security, this option may be enabled. It changes MIM behaviour to simply close the connection without any errors returned (effectively hiding the server's identity). + +# User session management + +These options can be used to configure the way MongooseIM manages user sessions. + +## `general.sm_backend` +* **Scope:** global +* **Syntax:** string, `"mnesia"` or `"redis"` +* **Default:** `"mnesia"` +* **Example:** `sm_backend = "redis"` + +Backend for storing user session data. All nodes in a cluster must have access to a complete session database. +Mnesia is sufficient in most cases, use Redis only in large deployments when you notice issues with the mnesia backend. Requires a redis pool with the `default` tag defined in the `outgoing_pools` section. +See the section about [redis connection setup](./outgoing-connections.md#redis-connection-setup) for more information. + +## `general.replaced_wait_timeout` +* **Scope:** local +* **Syntax:** positive integer, representing time in milliseconds +* **Default:** `2000` +* **Example:** `replaced_wait_timeout = 5000` + +When a user's session is replaced (due to a full JID conflict) by a new one, this parameter specifies the time MongooseIM waits for the old sessions to close. The default value is sufficient in most cases. If you observe `replaced_wait_timeout` warning in logs, then most probably the old sessions are frozen for some reason and it should be investigated. + +# Message routing + +The following options influence the way MongooseIM routes incoming messages to their recipients. + +## `general.route_subdomains` +* **Scope:** local +* **Syntax:** string, the only accepted value is `"s2s"` +* **Default:** not set +* **Example:** `route_subdomains = "s2s"` + +If a stanza is addressed to a subdomain of the served domain and this option is set to `s2s`, such a stanza will be transmitted over a server-to-server connection. Without it, MongooseIM will try to route the stanza to one of its internal services. + +## `general.routing_modules` +* **Scope:** local +* **Syntax:** a list of strings representing the routing module names. +* **Default:** `["mongoose_router_global", "mongoose_router_localdomain", "mongoose_router_external_localnode", "mongoose_router_external", "ejabberd_s2s"]` +* **Example:** `routing_modules = ["mongoose_router_global", "mongoose_router_localdomain"]` + +Provides an ordered list of modules used for routing messages. If one of the modules accepts packet for processing, the remaining ones are not called. + +Allowed module names: + +* `mongoose_router_global` - calls the `filter_packet` hook. +* `mongoose_router_localdomain` - routes packets addressed to a domain supported by the local cluster. +* `mongoose_router_external_localnode` - delivers packets to an XMPP component connected to the node, which processes the request. +* `mongoose_router_external` - delivers packets to an XMPP component connected to the local cluster. +* `ejabberd_s2s` - forwards packets to another XMPP cluster over XMPP Federation. + +# Miscellaneous + +The options listed below are used to configure more specific settings, that do not need to be changed in usual use cases. + +## `general.all_metrics_are_global` +* **Scope:** local +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `all_metrics_are_global = true` + +When enabled, all per-host metrics are merged into global equivalents. It means it is no longer possible to view individual host1, host2, host3, ... metrics, only sums are available. This option significantly reduces CPU and (especially) memory footprint in setups with exceptionally many domains (thousands, tens of thousands). + +## `general.http_server_name` +* **Scope:** local +* **Syntax:** string +* **Default:** `"Cowboy"` +* **Example:** `http_server_name = "Apache"` + +Replaces [Cowboy](https://github.com/ninenines/cowboy)'s default name returned in the `server` HTTP response header. It may be used for extra security, as it makes it harder for the malicious user to learn what HTTP software is running under a specific port. This option applies to **all** configured HTTP listeners. + +## `general.override` +* **Scope:** local +* **Syntax:** array of strings: `"global"`, `"local"`, `"acls"` +* **Default:** not set +* **Example:** `override = ["global", "local"]` + +Will cause MongooseIM to erase all global/local/acl configuration options in database respectively. This ensures that ALL settings of a specific type will be reloaded on startup. + +## `general.max_fsm_queue` +* **Scope:** local +* **Syntax:** positive integer +* **Default:** not set +* **Example:** `max_fsm_queue = 5000` + +When specified, will terminate certain processes (e.g. client handlers) that have more messages accumulated in the queue than the specified limit, to prevent resource exhaustion. +This option is set for C2S, outgoing S2S and component connections and can be overridden for particular `s2s` or `service` listeners in their configurations. **Use with caution!** diff --git a/doc/advanced-configuration/host_config.md b/doc/advanced-configuration/host_config.md new file mode 100644 index 00000000000..14747bc373a --- /dev/null +++ b/doc/advanced-configuration/host_config.md @@ -0,0 +1,255 @@ +The `host_config` section is used to configure options for specific XMPP domains. +For each domain requiring such options, a `host_config` section needs to be created with the following format: + +* **Scope:** for each option the scope is the same as for the corresponding top-level option. +* **Syntax:** domain subsection starts with `[[host_config]]` and contains the options listed below. +* **Default:** none - all domain-level options need to be specified explicitly. +* **Example:** see the examples for each section below. + +**Note:** Each hosted domain needs to be included in the list of [`hosts`](general.md#generalhosts) in the `general` section. + +# General options + +### `host_config.host` + +* **Syntax:** string, domain name +* **Default:** no default, this option is mandatory +* **Example:** `host = "my-xmpp-server.com"` + +This option specifies the XMPP domain that this section refers to. + +# Configuration sections + +The following sections are accepted in `host_config`: + +## `host_config.general` + +The options defined here override the ones defined in the top-level [`general`](general.md) section. +The following options are allowed: + +* [`pgsql_users_number_estimate`](general.md#generalpgsql_users_number_estimate) +* [`route_subdomains`](general.md#generalroute_subdomains) +* [`replaced_wait_timeout`](general.md#generalreplaced_wait_timeout) +* [`hide_service_name`](general.md#generalhide_service_name) + +#### Example + +The `hide_service_name` option is set to `false` only for `domain2.com`. + +```toml +[general] + hosts = ["domain1.com", "domain2.com", "domain3.com"] + loglevel = "info" + hide_service_name = true + replaced_wait_timeout = 1000 + +[[host_config]] + host = "domain2.com" + + [host_config.general] + hide_service_name = false +``` + +## `host_config.auth` + +This section overrides the top-level [`auth`](auth.md) section, all options are allowed. +It is recommended to repeat all top-level options in the domain-specific section as the rule is quite complicated: + +- If you specify any of the following options, **all** of the following options will be overridden: + - [`sasl_external`](auth.md#authsasl_external) + - [`password.*`](auth.md#password-related-options) + - [`scram_iterations`](auth.md#authscram_iterations) + - [`external.program`](../../authentication-methods/external/#authexternalprogram) + - [`ldap.*`](../../authentication-methods/ldap) + - [`jwt.*`](../../authentication-methods/jwt) + - [`riak.*`](../../authentication-methods/riak) + - [`http.*`](../../authentication-methods/http) +- If you specify any of the following options, only these options will be overridden: + - [`methods`](auth.md#authmethods) + - [`sasl_mechanisms`](auth.md#authsasl_mechanisms) + - [`external.instances`](../../authentication-methods/external/#authexternalinstances) + - [`anonymous.*`](../../authentication-methods/anonymous) + +#### Example + +In the example below the number of `scram_iterations` is increased for `domain2`. +It is necessary to put the `password.hash` there as well, as otherwise it would be replaced with the default setting. +However, specifying `methods` is not necessary as this value will not be changed. + +```toml +[general] + hosts = ["domain1.com", "domain2.com", "domain3.com"] + +[auth] + methods = ["rdbms"] + password.hash = ["sha256"] + +[[host_config]] + host = "domain2.com" + + [host_config.auth] + methods = ["rdbms"] + password.hash = ["sha256"] + scram_iterations = 40_000 +``` + +The last section would work the same without `methods`: + +```toml + [host_config.auth] + password.hash = ["sha256"] + scram_iterations = 40_000 +``` + +## `host_config.modules` + +This section completely overrides the top-level [`modules`](../Modules) section. All options are allowed. + +#### Example + +The modules enabled for `domain2.com` will be `mod_disco` and `mod_stream_management`. +If we wanted to enable `mod_roster`, it would need to be repeated in `host_config`. + +```toml +[general] + hosts = ["domain1.com", "domain2.com", "domain3.com"] + +[modules.mod_disco] + users_can_see_hidden_services = false + +[modules.mod_roster] + backend = "rdbms" + +[[host_config]] + host = "domain2.com" + + [host_config.modules.mod_disco] + users_can_see_hidden_services = false + + [host_config.modules.mod_stream_management] +``` + +## `host_config.acl` + +The access classes defined here are merged with the ones defined in the top-level [`acl`](acl.md) section - when a class is defined in both places, the result is a union of both classes. + +#### Example + +The `blocked` access class is extended for `host_config` by adding `hacker2`. + +```toml +[general] + hosts = ["domain1.com", "domain2.com", "domain3.com"] + +[acl] + blocked = [ + {user = "spammer"}, + {user = "hacker1"} + ] + +[[host_config]] + host = "domain2.com" + + [host_config.acl] + blocked = [ + {user = "hacker2"} + ] +``` + +## `host_config.access` + +The access rules defined here are merged with the ones defined in the top-level [`access`](access.md) section: +When a rule is defined in both places: + +* If the top-level rule ends with a catch-all clause `{acl = "all", value = "allow"}`, the resulting domain-specific rule has the clauses from **both** rules with the domain-specific clauses inserted after the top-level ones, but before the catch-all clause. +* If the top-level rule does not end with a catch-all clause, the resulting domain-specific rule has the clauses from **both** rules with the domain-specific clauses inserted after the top-level ones. + +#### Example + +The `c2s` access rule defined at the top level allows anyone to connect. +However, the rule for `domain2.com` is extended to prevent the `blocked` users from connecting: + +```toml +[general] + hosts = ["domain1.com", "domain2.com", "domain3.com"] + +[access] + c2s = [ + {acl = "admin", value = "allow"}, + {acl = "all", value = "allow"} + ] + +[[host_config]] + host = "domain2.com" + + [host_config.access] + c2s = [ + {acl = "blocked", value = "deny"} + ] + + register = [ + {acl = "all", value = "deny"} + ] +``` + +The resulting rule for `domain2.com` could be written as: + +```toml +c2s = [ + {acl = "admin", value = "allow"}, + {acl = "blocked", value = "deny"}, + {acl = "all", value = "allow"} +] +``` + +The `register` rule is defined only for `domain2.com`. + +**Note:** some access rules are checked outside of the context of any domain, e.g. the [access rule for external components](listen.md#listenserviceaccess) - defining them in `host_config` would have no effect. + +## `host_config.s2s` + +The options defined here override the ones defined in the top-level [`s2s`](s2s.md) section. +The following options are allowed: + +* [`default_policy`](s2s.md#s2sdefault_policy) +* [`host_policy`](s2s.md#s2shost_policy) - overrides the top-level setting host by host +* [`shared`](s2s.md#s2sshared) +* [`max_retry_delay`](s2s.md#s2smax_retry_delay) + +#### Example + +The `host_policy` option is changed for `domain2.com`: + +```toml +[general] + hosts = ["domain1.com", "domain2.com", "domain3.com"] + +[s2s] + default_policy = "deny" + + host_policy = [ + {host = "good-xmpp.org", policy = "allow"}, + {host = "bad-xmpp.org", policy = "deny"} + ] + +[[host_config]] + host = "domain2.com" + + [host_config.s2s] + host_policy = [ + {host = "bad-xmpp.org", policy = "allow"}, + {host = "evil-xmpp.org", policy = "deny"} + ] +``` + +The resulting `host_policy` for `domain2.com` is the following: + +```toml +host_policy = [ + {host = "good-xmpp.org", policy = "allow"}, + {host = "bad-xmpp.org", policy = "allow"}, + {host = "evil-xmpp.org", policy = "deny"} +] +``` + +The `default_policy` is still `deny`. diff --git a/doc/advanced-configuration/listen.md b/doc/advanced-configuration/listen.md new file mode 100644 index 00000000000..60cbae2fd61 --- /dev/null +++ b/doc/advanced-configuration/listen.md @@ -0,0 +1,736 @@ +The `listen` section specifies how MongooseIM handles incoming connections. + +* **Scope:** local +* **Syntax:** Each listener is specified in a subsection starting with `[[listen.type]]` where `type` is one of the allowed listener types, handling different types of incoming connections: + + * `c2s` - client-to-server XMPP connections, + * `s2s` - server-to-server XMPP connections, + * `service` - XMPP connections from external components, + * `http` - HTTP connections from clients or other services. + +The double-bracket syntax is used because there can be multiple listeners of a given type, so for each listener type there is a TOML array of one or more tables (subsections). + +* **Default:** None - each listener needs to be enabled explicitly. Typical listeners are already specified in the example configuration file. +* **Example:** The simplest XMPP listener configuration, handling only incoming XMPP client connections: + +```toml +[[listen.c2s]] + port = 5222 +``` + +# General listener options + +The options listed below are the same for all listener types. They set the basic listening socket options. Only `port` is required, the rest can be used to change the default settings. + +#### `listen.*.port` +* **Syntax:** integer, port number +* **Default:** no default, this option is mandatory. +* **Example:** `port = 5222` + +The port number to which the listening socket is bound. + +#### `listen.*.ip_address` +* **Syntax:** string with the IP address +* **Default:** all-zeros address (e.g. `"0.0.0.0"` for IPv4) +* **Example:** `ip_address = "127.0.0.1"` + +The IP address to which the listening socket is bound. + +#### `listen.*.proto` +* **Syntax:** string, `"udp"` or `"tcp"` +* **Default:** `"tcp"` +* **Example:** `proto = "udp"` + +The protocol, which is TCP by default. There is no reason to change this for XMPP or HTTP listeners. + +#### `listen.*.ip_version` +* **Syntax:** integer, `4` or `6` +* **Default:** if `ip_address` is specified, the IP version is determined from that address, otherwise it is `4` +* **Example:** `ip_version = 6` + +Allows to set the IP version to IPv6. Does not need to be set if `ip_address` is defined. + +# XMPP listener options + +The options listed below can be set for the `c2s`, `s2s` and `service` listeners to adjust their parameters. + +#### `listen.*.backlog` +* **Syntax:** positive integer +* **Default:** `100` +* **Example:** `backlog = 1000` + +Overrides the default TCP backlog value. + +#### `listen.*.proxy_protocol` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `proxy_protocol = true` + +When set to `true`, [Proxy Protocol](https://www.haproxy.com/blog/haproxy/proxy-protocol/) is enabled and each connecting client has to provide a proxy header. Use only with a proxy (or a load balancer) to allow it to provide the connection details (including the source IP address) of the original client. Versions 1 and 2 of the protocol are supported. + +#### `listen.*.hibernate_after` +* **Syntax:** non-negative integer +* **Default:** `0` +* **Example:** `hibernate_after = 10` + +Time in milliseconds after which a client process spawned by this listener will hibernate. +Hibernation greatly reduces memory consumption of client processes, but *may* result in increased CPU consumption if a client is used *very* frequently. +The default, recommended value of 0 means that the client processes will hibernate at every opportunity. + +#### `listen.*.max_stanza_size` +* **Syntax:** positive integer +* **Default:** not set, unlimited size +* **Example:** `max_stanza_size = 10_000` + +Maximum allowed incoming stanza size in bytes. +**Warning:** this limit is checked **after** the input data parsing, so it does not apply to the input data size itself. + +#### `listen.*.num_acceptors` +* **Syntax:** positive integer +* **Default:** `100` +* **Example:** `num_acceptors = 200` + +The number of processes accepting new connections on the listening socket. + +#### `listen.*.max_fsm_queue` +* **Syntax:** positive integer +* **Default:** not set - no limit +* **Example:** `max_fsm_queue = 1000` + +Message queue limit to prevent resource exhaustion; overrides the value set in the `general` section. +This option does **not** work for `s2s` listeners - the `general` value is used for them. + +# Client-to-server (C2S): `[[listen.c2s]]` + +Handles XMPP client-to-server (C2S) connections. +The recommended port number for a C2S listener is 5222 [as registered in the XMPP protocol](https://tools.ietf.org/html/rfc6120#section-14.7). +The following options are supported for each C2S listener: + +#### `listen.c2s.access` +* **Syntax:** string, rule name or `"all"` +* **Default:** `"all"` +* **Example:** `access = "c2s"` + +The rule that determines who is allowed to connect. By default the rule is `"all"`, which means that anyone can connect. The rule referenced here needs to be defined in the `access` configuration section. + +#### `listen.c2s.shaper` +* **Syntax:** string, rule name +* **Default:** `"none"` (no shaper) +* **Example:** `shaper = "c2s_shaper"` + +The rule that determines what traffic shaper is used to limit the incoming XMPP traffic to prevent the server from being flooded with incoming data. +The rule referenced here needs to be defined in the `access` configuration section. +The value of the access rule needs to be either the shaper name or the string `"none"`, which means no shaper. + +#### `listen.c2s.zlib` +* **Syntax:** positive integer +* **Default:** not set, disabled +* **Example:** `zlib = 1024` + +Enables ZLIB support, the integer value is a limit for a decompressed output size in bytes (to prevent a successful [ZLIB bomb attack](https://xmpp.org/community/security-notices/uncontrolled-resource-consumption-with-highly-compressed-xmpp-stanzas.html)). + +## TLS options for C2S + +The following options allow enabling and configuring TLS which makes the client-to-server conenctions secure. +They all have the `tls.` prefix. + +#### `listen.c2s.tls.mode` +* **Syntax:** string, one of `"tls"`, `"starttls"`, `"starttls_required"` +* **Default:** not set +* **Example:** `tls.mode = "starttls"` + +By default there is no encryption for the incoming connections. You can change this by setting the `tls.mode` option to one of the following modes: + +* `tls` - clients must initiate a TLS session immediately after connecting, before beginning the normal XML stream, +* `starttls` - enables StartTLS support; requires `certfile`, +* `starttls_required` - enables and enforces StartTLS usage. + +#### `listen.c2s.tls.verify_peer` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `verify_peer = true` + +Enforces verification of a client certificate. Requires a valid `cacertfile`. + +#### `listen.c2s.tls.module` +* **Syntax:** string, one of `"just_tls"`, `"fast_tls"` +* **Default:** `"fast_tls"` +* **Example:** `tls.module = "just_tls"` + +By default the TLS library used for C2S connections is `fast_tls`, which uses OpenSSL-based NIFs. It is possible to change it to `just_tls` - Erlang TLS implementation provided by OTP. Some TLS-related options described here have different formats for these two libraries. + +Requires setting `tls.verify_mode`. When set to `false`, it allows the client to connect even though the certificate verification failed. It is then up to the authentication layer to accept or reject the client connection. This behaviour mimics the FastTLS one. + +#### `listen.c2s.tls.certfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.certfile = "server.pem"` + +Path to the X509 PEM file with a certificate and a private key (not protected by a password). If the certificate is signed by an intermediate CA, you should specify here the whole CA chain by concatenating all public keys together and appending the private key after that. + +#### `listen.c2s.tls.cacertfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.cacertfile = "ca.pem"` + +Path to the X509 PEM file with a CA chain that will be used to verify clients. It won't have any effect if `verify_peer` is not enabled. + +#### `listen.c2s.tls.dhfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.dhfile = "dh.pem"` + +Path to the Diffie-Hellman parameter file. + +#### `listen.c2s.tls.protocol_options` - only for `fast_tls` +* **Syntax:** array of strings +* **Default:** `["no_sslv2", "no_sslv3", "no_tlsv1", "no_tlsv1_1"]` +* **Example:** `tls.protocol_options = ["no_tlsv1", "no_tlsv1_1"]` + +A list of OpenSSL options for FastTLS. You can find the mappings between supported options and actual OpenSSL flags in the `fast_tls` [source code](https://github.com/processone/fast_tls/blob/master/c_src/options.h). + +#### `listen.c2s.tls.ciphers` - for `fast_tls` +* **Syntax:** string with the OpenSSL cipher suite specification +* **Default:** `"TLSv1.2:TLSv1.3"` +* **Example:** `tls.ciphers = "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384"` + +Cipher suites to use with StartTLS or TLS. Please refer to the [OpenSSL documentation](http://www.openssl.org/docs/man1.0.2/apps/ciphers.html) for the cipher string format. + +#### `listen.c2s.tls.ciphers` - for `just_tls` +* **Syntax:** array of tables with the following keys: `cipher`, `key_exchange`, `mac`, `prf` and string values. +* **Default:** not set, all supported cipher suites are accepted +* **Example:** `tls.ciphers = "[{cipher = "aes_25_gcm", key_exchange = "any", mac = "aead", "prf = sha384"}]"` + +Cipher suites to use with StartTLS or TLS. For allowed values, see the [Erlang/OTP SSL documentation](https://erlang.org/doc/man/ssl.html#type-ciphers) + +#### `listen.c2s.tls.verify_mode` - only for `just_tls` +* **Syntax:** string, one of `"peer"`, `"selfsigned_peer"`, `"none"` +* **Default:** not set (equivalent to `"peer"` in the current version of Erlang/OTP) +* **Example:** `tls.verify_mode = "selfsigned_peer"` + +Specifies the way certificate verification works: + +* `peer` - makes sure the peer's certificate is valid and signed by a trusted CA, +* `selfsigned_peer` - makes sure the peer's certificate is valid, but allows self-signed certificates, +* `none` - any certificate is accepted. + +#### `listen.c2s.tls.disconnect_on_failure` - only for `just_tls` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `tls.disconnect_on_failure = false` + +#### `listen.c2s.tls.versions` - only for `just_tls` +* **Syntax:** array of strings +* **Default:** not set, all supported versions are accepted +* **Example:** `tls.versions = ["tlsv1.2", "tlsv1.3"]` + +TLS versions to use with StartTLS or TLS. For allowed values, see the [Erlang/OTP SSL documentation](https://erlang.org/doc/man/ssl.html#type-protocol_version) + +#### `listen.c2s.tls.crl_files` - only for `just_tls` +* **Syntax:** array of strings, paths in the file system +* **Default:** not set +* **Example:** `tls.crl_files = ["certs.crl"]` + +Specifies the paths to Certificate Revocation Lists. + +### C2S Example + +The following section configures two C2S listeners. + +```toml +[[listen.c2s]] + port = 5222 + zlib = 10000 + access = "c2s" + shaper = "c2s_shaper" + max_stanza_size = 65536 + tls.mode = "starttls" + tls.certfile = "server.pem" + tls.dhfile = "dh_server.pem" + +[[listen.c2s]] + port = 5223 + zlib = 4096 + access = "c2s" + shaper = "c2s_shaper" + max_stanza_size = 65536 +``` + +* One at port 5222, which accepts a plain TCP connection and allows to use StartTLS for upgrading it to an encrypted one. The files containing the certificate and the DH parameter are also provided. +* One at port 5223, which accepts only encrypted TLS connections - this is the legacy method as StartTLS is preferred. + +Both listeners use ZLIB and the `c2s` and `c2s_shaper` rules for access management and traffic shaping, respectively. + +## Server-to-server (S2S): `[[listen.s2s]]` + +Handles incoming server-to-server (S2S) connections (federation). +The recommended port number for an S2S listener is 5269 [as registered in the XMPP protocol](https://tools.ietf.org/html/rfc6120#section-14.7). + +**Note:** Many S2S options are configured in the `s2s` section of the configuration file and they apply to both incoming and outgoing connections. + +#### `listen.s2s.shaper` +* **Syntax:** string, name of the shaper rule or `"none"` +* **Default:** `"none"` - no shaper +* **Example:** `shaper = "s2s_shaper"` + +Name of the rule that determines what traffic shaper is used to limit the incoming XMPP traffic to prevent the server from being flooded with incoming data. The rule referenced here needs to be defined in the `access` config section and it should return the shaper name or the value `"none"`. + +### TLS options for S2S + +S2S connections do not use TLS encryption unless enabled with the `use_starttls` option in the `s2s` section. +Here you can specify some additional options of the TLS encryption. + +#### `listen.s2s.tls.cacertfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.cacertfile = "ca.pem"` + +Path to the X509 PEM file with a CA chain that will be used to verify the connecting XMPP servers (acting as clients here). It won't have any effect if `verify_peer` is not enabled. + +#### `listen.s2s.tls.dhfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.dhfile = "dh.pem"` + +Path to the Diffie-Hellman parameter file. + +#### `listen.s2s.tls.ciphers` +* **Syntax:** string with the OpenSSL cipher suite specification +* **Default:** `"TLSv1.2:TLSv1.3"` +* **Example:** `tls.ciphers = "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384"` + +Cipher suites to use with StartTLS. Please refer to the [OpenSSL documentation](http://www.openssl.org/docs/man1.0.2/apps/ciphers.html) for the cipher string format. + +### S2S Example + +The following section configures an S2S listener with some basic settings set up. +The `s2s_shaper` access rule is used, which requires a definition in the `access` section. + +```toml +[[listen.s2s]] + port = 5269 + shaper = "s2s_shaper" + max_stanza_size = 131072 + tls.dhfile = "dh_server.pem" +``` + +## XMPP Components: `[[listen.service]]` + +Interface for external services acting as XMPP components ([XEP-0114: Jabber Component Protocol](http://xmpp.org/extensions/xep-0114.html)), enabling communication between MongooseIM and external services over the XMPP network. The recommended port number for a component listener is 8888. + +According to ([XEP-0114: Jabber Component Protocol](http://xmpp.org/extensions/xep-0114.html)) the component's hostname should be given in the element. + +#### `listen.service.access` +* **Syntax:** string, rule name or `"all"` +* **Default:** `"all"` +* **Example:** `access = "component"` + +Determines who is allowed to connect to the listener. By default the rule is `all`, which means that any external component can connect. The access rule referenced here needs to be defined in the `access` configuration section. + +#### `listen.service.password` +* **Syntax:** string +* **Default:** no default, this option is mandatory +* **Example:** `password = "secret"` + +The external component needs to authenticate with this password to connect. + +#### `listen.service.shaper_rule` +* **Syntax:** string, name of the shaper +* **Default:** `"none"` +* **Example:** `shaper = "component_shaper"` + +The traffic shaper used to limit the XMPP traffic to prevent the server from being flooded with incoming data. +Contrary to the C2S and S2S shapers, here the shaper name directly references the shaper that needs to be defined in the [`shaper`](shaper.md) section. + +#### `listen.service.check_from` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `check_from = false` + +Specifies whether the server should verify the "from" field in stanzas from the component. + +#### `listen.service.hidden_components` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `hidden_components = true` + +All components connected to an endpoint with this option enabled will be considered "hidden". + +Hidden components have a special flag enabled in the internal component table. +Alone, it doesn't change the server behaviour in any way, but it may be used by other modules and extensions to execute special logic. +An example would be [`mod_disco`](../modules/mod_disco.md), which may be configured to filter out hidden components from disco results, so they won't be discoverable by clients. +A reason to do so could be reduced traffic - systems with many components could return very long disco responses. +Also, some deployments would like to avoid revealing some services; not because it is a security threat (this method does not prevent clients from communicating with hidden components), but rather because they are not meant to interact with clients directly (e.g. helper components for other components). + +#### `listen.service.conflict_behaviour` +* **Syntax:** string, one of: `"disconnect"`, `"kick_old"` +* **Default:** `"disconnect"` +* **Example:** `conflict_behaviour = "kick_old"` + +By default, when a component tries to connect and a registration conflict occurs, the connection is dropped with the following error: + +```xml + + + + +``` + +It makes implementing the reconnection logic difficult, because the old connection would not allow any other connections. +By setting this option to `kick_old`, we drop any old connections registered at the same host before accepting new ones. + +### Custom extension to the protocol + +In order to register a component for all virtual hosts served by the server (see `hosts` in the `general` section), the component must add the attribute `is_subdomain="true"` to the opening stream element. +This maybe helpful if someone wants to have a single instance of a component serving multiple virtual hosts. +The `is_subdomain` attribute is optional and the default behaviour is as described in [XEP-0114](http://xmpp.org/extensions/xep-0114.html). + +### Service listener example + +The following section configures a service listener, accepting connections from external components. +The IP address is limited to loopback to prevent connections from different hosts. +All components are allowed to connect, but they need to provide the password. +The shaper named `fast` needs to be defined in the `shaper` section. + +```toml +[[listen.service]] + port = 8888 + access = "all" + shaper_rule = "fast" + ip_address = "127.0.0.1" + password = "secret" +``` + +## HTTP-based services: `[[listen.http]]` + +Manages all HTTP-based services, such as BOSH (HTTP long-polling), WebSocket and REST. +It uses the [Cowboy](https://ninenines.eu/docs/en/cowboy/2.6/manual) web server. +Recommended port number: 5280 for BOSH/WS. + +There are the following options for each of the HTTP listeners: + +#### `listen.http.handlers` +* **Syntax:** each handler is specified in a subsection starting with `[[listen.http.handlers.type]]` where `type` is one of the allowed handler types, handling different connection types, e.g. + + * `mod_bosh` - for [BOSH](https://xmpp.org/extensions/xep-0124.html) connections, + * `mod_websockets` - for [WebSocket](https://tools.ietf.org/html/rfc6455) connections, + * `mongoose_api_*`, `mongoose_client_api_*`, ... - for REST API. + + These types are described below in more detail. + The double-bracket syntax is used because there can be multiple handlers of a given type, so for each type there is a TOML array of one or more tables (subsections). + +* **Default:** there is no default, all handlers need to be specified explicitely. +* **Example:** two handlers, one for BOSH and one for WebSockets +```toml + [[listen.http.handlers.mod_bosh]] + host = "_" + path = "/http-bind" + + [[listen.http.handlers.mod_websockets]] + host = "_" + path = "/ws-xmpp" +``` + +### Common handler options + +#### `listen.http.handlers.*.host` +* **Syntax:** string +* **Default:** no default, mandatory option +* **Example:** `host = "localhost"` + +Host name for this handler or `"_"` for any host. + +#### `listen.http.handlers.*.path` +* **Syntax:** string +* **Default:** no default, mandatory option +* **Example:** `path = "/ws-xmpp"` + +Path for this handler. + +### Handler types: BOSH - `mod_bosh` + +To handle incoming BOSH traffic you need to configure the `mod_bosh` module in the `modules` section as well. + +### Handler types: WebSockets - `mod_websockets` + +Websocket connections as defined in [RFC 7395](https://tools.ietf.org/html/rfc7395). +You can pass the following optional parameters: + +#### `listen.http.handlers.mod_websockets.timeout` +* **Syntax:** positive integer or the string `"infinity"` +* **Default:** `"infinity"` +* **Example:** `timeout = 60_000` + +The time (in milliseconds) after which an inactive user is disconnected. + +#### `listen.http.handlers.mod_websockets.ping_rate` +* **Syntax:** positive integer or the string `"none"` +* **Default:** `"none"` +* **Example:** `ping_rate = 10_000` + +The time between pings sent by server. By setting this option you enable server-side pinging. + +#### `listen.http.handlers.mod_websockets.max_stanza_size` +* **Syntax:** positive integer or the string `"infinity"` +* **Default:** `"infinity"` +* **Example:** `max_stanza_size = 10_000` + +Maximum allowed incoming stanza size. +**Warning:** this limit is checked **after** the input data parsing, so it does not apply to the input data size itself. + +#### `listen.http.handlers.mod_websockets.service` +* **Syntax:** an array of `listen.service.*` options +* **Default:** not set +* **Example:** + +```toml +[listen.http.handlers.mod_websockets.service] + access = "all" + shaper_rule = "fast" + password = "secret" +``` + +This subsection enables external component connections over WebSockets. +See the [service](#xmpp-components-listenservice) listener section for details. + +### Handler types: REST API - Admin - `mongoose_api_admin` + +For more information about the API, see the [REST interface](../rest-api/Administration-backend.md) documentation. +The following options are supported for this handler: + +#### `listen.http.handlers.mongoose_api_admin.username` +* **Syntax:** string +* **Default:** not set +* **Example:** `username = "admin"` + +When set, enables authentication for the admin API, otherwise it is disabled. Requires setting `password`. + +#### `listen.http.handlers.mongoose_api_admin.password` +* **Syntax:** string +* **Default:** not set +* **Example:** `password = "secret"` + +Required to enable authentication for the admin API. + +### Handler types: REST API - Client + +To enable the REST API for clients, several handlers need to be added: + +* `mongoose_client_api_*` - handles individual API endpoints. You can add and remove these to enable particular functionality. +* `lasse_handler` - provides the [SSE handler](https://github.com/inaka/lasse) which is required for the client HTTP API, should not be changed. +* `cowboy_*` - hosts the Swagger web-based documentation, should not be changed, but can be removed to disable the API docs. + +The recommended configuration is shown in [Example 3](#example-3-client-api) below. +Please refer to [REST interface](../rest-api/Client-frontend.md) documentation for more information. + +### Handler types: Metrics API (obsolete) - `mongoose_api` + +REST API for accessing the internal MongooseIM metrics. +Please refer to the [REST interface to metrics](../rest-api/Metrics-backend.md) page for more information. +The following option is required: + +#### `listen.http.handlers.mongoose_api.handlers` +* **Syntax:** array of strings - Erlang modules +* **Default:** not set, this is a mandatory option for this handler +* **Example:** `handlers = ["mongoose_api_metrics"]` + +### Transport options + +The options listed below are used to modify the HTTP transport settings. + +#### `listen.http.transport.num_acceptors` +* **Syntax:** positive integer +* **Default:** `100` +* **Example:** `transport.num_acceptors = 10` + +Number of HTTP connection acceptors. + +#### `listen.http.transport.max_connections` +* **Syntax:** positive integer or the string `"infinity"` +* **Default:** `1024` +* **Example:** `transport.max_connections = "infinity"` + +Maximum number of open connections. The default value of 1024 is set by the [Ranch](https://ninenines.eu/docs/en/ranch/1.7/guide/) library. + +### TLS (HTTPS) options + +By default the HTTP listener does not use TLS. +To use TLS (HTTPS), you need to add a TOML table (subsection) called `tls` to the config file with the `certfile` and `keyfile` options that specify the location of the certificate and private key files, respectively. +If the keyfile is password-protected, `password` is required as well. +If the certificate is signed by an intermediate CA, one will probably want to specify the CA chain with the `cacertfile` option. +The library used for HTTP is the Erlang TLS implementation provided by OTP - see [ranch_ssl](https://github.com/ninenines/ranch/blob/master/doc/src/manual/ranch_ssl.asciidoc) for details. + +#### `listen.http.tls.verify_peer` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `tls.verify_peer = true` + +Enforces verification of a client certificate. Requires a valid `cacertfile`. + +#### `listen.http.tls.verify_mode` +* **Syntax:** string, one of `"peer"`, `"selfsigned_peer"`, `"none"` +* **Default:** not set (equivalent to `"peer"` in the current version of Erlang/OTP) +* **Example:** `tls.verify_mode = "selfsigned_peer"` + +Specifies the way certificate verification works: + +* `peer` - makes sure the peer's certificate is valid and signed by a trusted CA, +* `selfsigned_peer` - makes sure the peer's certificate is valid, but allows self-signed certificates, +* `none` - any certificate is accepted. + +#### `listen.http.tls.certfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.certfile = "server.pem"` + +Path to the X509 PEM file with a certificate and a private key (not protected by a password). If the certificate is signed by an intermediate CA, you should specify here the whole CA chain by concatenating all public keys together and appending the private key after that. + +#### `listen.http.tls.cacertfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.cacertfile = "ca.pem"` + +Path to the X509 PEM file with a CA chain that will be used to verify clients. It won't have any effect if `verify_peer` is not enabled. + +#### `listen.http.tls.dhfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.dhfile = "dh.pem"` + +Path to the Diffie-Hellman parameter file. + +#### `listen.http.tls.ciphers` +* **Syntax:** array of tables with the following keys: `cipher`, `key_exchange`, `mac`, `prf` and string values. +* **Default:** not set, all supported cipher suites are accepted +* **Example:** `tls.ciphers = "[{cipher = "aes_25_gcm", key_exchange = "any", mac = "aead", "prf = sha384"}]"` + +Cipher suites to use. For allowed values, see the [Erlang/OTP SSL documentation](https://erlang.org/doc/man/ssl.html#type-ciphers) + +#### `listen.http.tls.versions` +* **Syntax:** array of strings +* **Default:** not set, all supported versions are accepted +* **Example:** `tls.versions = ["tlsv1.2", "tlsv1.3"]` + +TLS versions to use. For allowed values, see the [Erlang/OTP SSL documentation](https://erlang.org/doc/man/ssl.html#type-protocol_version) + +#### `listen.http.tls.keyfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.keyfile = "key.pem"` + +Path to the X509 PEM file with the private key. + +#### `listen.http.tls.password` +* **Syntax:** string +* **Default:** not set +* **Example:** `tls.password = "secret"` + +Password to the X509 PEM file with the private key. + +### Protocol options + +These are some additional options of the HTTP protocol. + +#### `listen.http.protocol.compress` +* **Syntax:** boolean +* **Default:** false +* **Example:** `protocol.compress = "true"` + +Compresses response bodies automatically when the client supports it. + +### HTTP listener examples + +The examples shown below are included in the provided default configuration file. + +#### Example 1. BOSH and WS + +The following listener accepts BOSH and WebSocket connections and has TLS configured. + +```toml +[[listen.http]] + port = 5285 + tls.certfile = "mycert.pem" + tls.keyfile = "mykey.pem" + tls.password = "secret" + + [[listen.http.handlers.mod_bosh]] + host = "_" + path = "/http-bind" + + [[listen.http.handlers.mod_websockets]] + host = "_" + path = "/ws-xmpp" +``` + +#### Example 2. Admin API + +REST API for administration, the listener is bound to `127.0.0.1` for increased security. +The number of acceptors and connections is specified (reduced). + +```toml +[[listen.http]] + ip_address = "127.0.0.1" + port = 8088 + transport.num_acceptors = 5 + transport.max_connections = 10 + + [[listen.http.handlers.mongoose_api_admin]] + host = "localhost" + path = "/api" +``` + +#### Example 3. Client API + +REST API for clients. + +```toml +[[listen.http]] + port = 8089 + transport.max_connections = 1024 + protocol.compress = true + + [[listen.http.handlers.lasse_handler]] + host = "_" + path = "/api/sse" + module = "mongoose_client_api_sse" + + [[listen.http.handlers.mongoose_client_api_messages]] + host = "_" + path = "/api/messages/[:with]" + + [[listen.http.handlers.mongoose_client_api_contacts]] + host = "_" + path = "/api/contacts/[:jid]" + + [[listen.http.handlers.mongoose_client_api_rooms]] + host = "_" + path = "/api/rooms/[:id]" + + [[listen.http.handlers.mongoose_client_api_rooms_config]] + host = "_" + path = "/api/rooms/[:id]/config" + + [[listen.http.handlers.mongoose_client_api_rooms_users]] + host = "_" + path = "/api/rooms/:id/users/[:user]" + + [[listen.http.handlers.mongoose_client_api_rooms_messages]] + host = "_" + path = "/api/rooms/[:id]/messages" + + [[listen.http.handlers.cowboy_swagger_redirect_handler]] + host = "_" + path = "/api-docs" + + [[listen.http.handlers.cowboy_swagger_json_handler]] + host = "_" + path = "/api-docs/swagger.json" + + [[listen.http.handlers.cowboy_static]] + host = "_" + path = "/api-docs/[...]" + type = "priv_dir" + app = "cowboy_swagger" + content_path = "swagger" +``` + diff --git a/doc/advanced-configuration/outgoing-connections.md b/doc/advanced-configuration/outgoing-connections.md index 1f6a2f0e49d..736dba8cce6 100644 --- a/doc/advanced-configuration/outgoing-connections.md +++ b/doc/advanced-configuration/outgoing-connections.md @@ -1,177 +1,103 @@ -# Outgoing connections - -MongooseIM can be configured to talk to external service like databases or HTTP servers in order to get or set the required data. +MongooseIM can be configured to talk to external services like databases or HTTP servers in order to get or set the required data. The interface for outgoing connections management was unified and is now available via the `outgoing_pools` config option for the following type of connections: * `cassandra` - pool of connections to Cassandra cluster * `riak` - pool of connections to Riak cluster * `redis` - pool of connections to Redis server -* `http` - pool of connections to various HTTP(S) servers MongooseIM can talk to, in example HTTP authentication backend or HTTP notifications +* `http` - pool of connections to an HTTP(S) server MongooseIM can talk to, for example HTTP authentication backend or HTTP notifications * `elastic` - pool of connections to ElasticSearch server * `rdbms` - pool of connections to an RDBMS database * `rabbit` - pool of connections to a RabbitMQ server * `ldap` - pool of connections to an LDAP server -* `generic` - pool of generic workers not associated directly with a particular connection (SNS, PushNotifications) +* `generic` - pool of generic workers not associated directly with a particular connection -All the above pools are managed by [inaka/worker_pool](https://github.com/inaka/worker_pool) library. +* **Syntax:** Each pool is specified in a subsection starting with `[outgoing_pools.type.tag]`, where `type` is one of available connection types and `tag` is an arbitrary value uniquely identifying the pool within its type. +This allows you to create multiple dedicated pools of the same type. -Every entry in the `outgoing_pools` is a 5-element tuple: +# General pool options -```erlang -{Type, Host, Tag, PoolOptions, ConnectionOptions} -``` +#### `outgoing_pools.*.*.scope` +* **Syntax:** string, one of:`"global"`, `"host"`, `"single_host"` +* **Default:** `"global"` +* **Example:** `scope = "host"` -Where: +#### `outgoing_pools.*.*.host` +* **Syntax:** string +* **Default:** no default; required if `"single_host"` scope is specified +* **Example:** `host = "anotherhost.com"` -* `Type` is one of the types listed above -* `Host` can be set to: - * `global` - meaning the pool will started once no matter how many XMPP hosts served by MongooseIM - * `host` - the pool will be started for all the XMPP hosts served by MongooseIM - * a binary representing a specific XMPP host like `<<"domain1.chat.im">>` -* `Tag` is a name to distinguish pools with the same `Type` and `Host` parameter. -* `PoolOptions` is a list of `{key, value}` pairs as defined in [worker_pool doc](https://github.com/inaka/worker_pool#starting-a-pool) - with the following exception: - * `strategy` - specifies the worker selection strategy for the given pool, default is `best_worker`, - more details on this can be found in [Choosing strategy in worker_pool doc](https://github.com/inaka/worker_pool#choosing-a-strategy) - *WARNING:* `redis` and `riak` backends are not compatible with `available_worker` strategy. - * `call_timeout` - specifies the timeout, in milliseconds, for a call operation to the pool -* `ConnectionOptions` - options list passed to the `start` function of the pool type +`scope` can be set to: +* `global` - meaning that the pool will started once no matter how many XMPP hosts are served by MongooseIM +* `host` - the pool will be started for each XMPP host served by MongooseIM +* `single_host` - the pool will be started for the selected host only (you must provide a host name). +# Worker pool options -### Examples +All pools are managed by the [inaka/worker_pool](https://github.com/inaka/worker_pool) library. -Provided MongooseIM serves domains `<<"a.com">>`, `<<"b.com">>`, `<<"c.eu">>` and `<<"d.eu">>` -the following `outgoing_pools` configuration: +Available options are: +#### `outgoing_pools.*.*.strategy` +* **Syntax:** string, one of:`"best_worker"`, `"random_worker"`, `"next_worker"`, `"available_worker"`, `"next_available_worker"` +* **Default:** `"best_worker"` +* **Example:** `strategy = "available_worker"` -```erlang -{redis, <<"a.com">>, default, PoolOpts, ConnOptsForDomain1}, -{redis, host, default, PoolOpts, ConnOpts}, -{redis, <<"c.eu", default, PoolOpts, ConnOptsForDomain2} -``` +Defines worker seletion strategy. Consult worker_pool documentation for details. -will be expanded to the following configuration: +#### `outgoing_pools.*.*.workers` +* **Syntax:** positive integer +* **Default:** 100 +* **Example:** `workers = 10` -```erlang -{redis, <<"a.com">>, default, PoolOpts, ConnOptsForDomain1}, -{redis, <<"b.com">>, default, PoolOpts, ConnOpts}, -{redis, <<"c.eu", default, PoolOpts, ConnOptsForDomain2}, -{redis, <<"d.eu">>, default, PoolOpts, ConnOpts} -``` +Number of workers to be started by the pool. -## RDBMS connection setup +#### `outgoing_pools.*.*.call_timeout` +* **Syntax:** positive integer +* **Default:** 5000 +* **Example:** `call_timeout = 5000` -An example RDBMS configuration inside `outgoing_pools` may look like this: +Number of milliseconds after which a call to the pool will time out. -```erlang -{outgoing_pools, [ - {rdbms, global, default, [{workers, 5}], - [{server, {mysql, "localhost", 3306, "mydb", "user", "passwd"}}]} -]}. -``` +# Connection options -This configuration will create a default, global pool of 5 connections to a mysql database. -We might also want to add a dedicated pool for a specific host: +Options specific to a pool connection are defined in a subsection starting with `[outgoing_pools.*.*.connection]`. +For example: -```erlang -{outgoing_pools, [ - {rdbms, global, default, [{workers, 5}], - [{server, {mysql, "localhost", 3306, "mydb", "user", "passwd"}}]}, - {rdbms, "myhost.com", default, [{workers, 3}], - [{server, {mysql, "localhost", 3306, "mydb", "user", "passwd"}}]} -]}. ``` +[outgoing_pools.rdbms.default] + scope = "global" + workers = 5 -Please remember that SQL databases require creating a schema. -See [Database backends configuration](./database-backends-configuration.md) for more information. -Also see [Advanced configuration](../Advanced-configuration.md) for additional options that influence RDBMS connections. -Currently all pools must use the same RDBMS type (e.g. `mysql`, `pgsql`). - -### Connection options - -* **server** - * **Description:** SQL DB connection configuration. Currently supported DB types are `mysql` and `pgsql`. - * **Syntax:** `{server, {Type, Host, Port, DBName, Username, Password}}.` **or** `{server, ""}` - * **Default:** `undefined` - -* **keepalive_interval** - * **Description:** When enabled, will send `SELECT 1` query through every DB connection at given interval to keep them open. - This option should be used to ensure that database connections are restarted after they became broken (e.g. due to a database restart or a load balancer dropping connections). - Currently, not every network related error returned from a database driver to a regular query will imply a connection restart. - * **Syntax:** `{keepalive_interval, IntervalSeconds}.` - * **Example:** `{keepalive_interval, 30}.` - * **Default:** `undefined` - -#### MySQL and PostgreSQL SSL connection setup - -In order to establish a secure connection with a database, additional options must be passed in the `server` tuple. -Here is the proper syntax: - -`{server, {Type, Host, Port, DBName, Username, Password, SSL}}.` - -##### MySQL - -SSL configuration options for MySQL: - -* **SSL** - * **Description:** Specifies SSL connection options. - * **Syntax:** `[Opt]` - * **Supported values:** The options are just a **list** of Erlang `ssl:ssl_option()`. More details can be found in [official Erlang ssl documentation](http://erlang.org/doc/man/ssl.html). - -###### Example configuration - -An example configuration can look as follows: - -```erlang -{outgoing_pools, [ - {rdbms, global, default, [{workers, 5}], - [{server, {mysql, "localhost", 3306, "mydb", "mim", "mimpass", - [{versions, ['tlsv1.2']}, - {verify, verify_peer}, - {cacertfile, "path/to/cacert.pem"}, - {server_name_indication, disable}]}}]} -]}. + [outgoing_pools.rdbms.default.connection] + ... ``` -##### PostgreSQL +## RDBMS options -SSL configuration options for PGSQL: +#### `outgoing_pools.rdbms.*.driver` +* **Syntax:** string, one of `"pgsql"`, `"mysql"` or `"odbc"` (a supported driver) +* **Example:** `driver = "psgql"` -* **SSL** - * **Description:** Specifies general options for SSL connection. - * **Syntax:** `[SSLMode, SSLOpts]` +Selects driver for RDBMS connection. The choice of driver impacts the set of available options. -* **SSLMode** - * **Description:** Specifies a mode of SSL connection. Mode expresses how much the PostgreSQL driver carries about security of the connections. - For more information click [here](https://github.com/epgsql/epgsql). - * **Syntax:** `{ssl, Mode}` - * **Supported values:** `false`, `true`, `required` +#### `outgoing_pools.rdbms.*.call_timeout` +* **Syntax:** positive integer +* **Default:** 60000 (msec) +* **Example:** `call_timeout = 60000` -* **SSLOpts** - * **Description:** Specifies SSL connection options. - * **Syntax:** `{ssl_opts, [Opt]}` - * **Supported values:** The options are just a **list** of Erlang `ssl:ssl_option()`. More details can be found in [official Erlang ssl documentation](http://erlang.org/doc/man/ssl.html). +RDBMS pool sets its own default value of this option. -###### Example configuration +### ODBC options -An example configuration can look as follows: +#### `outgoing_pools.rdbms.*.settings` +* **Syntax:** string +* **Default:** no default; required if the `"odbc"` driver is specified +* **Example:** `settings = "DSN=mydb"` -```erlang -{outgoing_pools, [ - {rdbms, global, default, [{workers, 5}], - [{server, {pgsql, "localhost", 5432, "mydb", "mim", "mimpass", - [{ssl, required}, {ssl_opts, [{verify, verify_peer}, {cacertfile, "path/to/cacert.pem"}]}]}}]} -]}. -``` +ODBC - specific string defining connection parameters. ##### ODBC SSL connection setup -If you've configured MongooseIM to use an ODBC driver, i.e. you've provided an ODBC connection string in the `server` option, e.g. - -```erlang -{server, "DSN=mydb"}. -``` - -then the SSL options, along other connection options, should be present in the `~/.odbc.ini` file. +If you've configured MongooseIM to use an ODBC driver, then the SSL options, along other connection options, should be present in the `~/.odbc.ini` file. To enable SSL connection the `sslmode` option needs to be set to `verify-full`. Additionally, you can provide the path to the CA certificate using the `sslrootcert` option. @@ -188,278 +114,155 @@ sslmode = verify-full sslrootcert = /path/to/ca/cert ``` -## HTTP connections setup +### Other RDBMS backends -Some MongooseIM modules need an HTTP connection to an external service. -These pools need to be configured and started before the module that needs them. -Below is a sample configuration: +#### `outgoing_pools.rdbms.*.connection.host` +* **Syntax:** string +* **Example:** `host = "localhost"` -```erlang -{outgoing_pools, [ - {http, global, default, PoolOptions, ConnectionOptions} -]}. -``` -where `PoolOptions` is as previously described. -Recommended `PoolOptions` for `HTTP` pools are: +#### `outgoing_pools.rdbms.*.connection.database` +* **Syntax:** string +* **Example:** `database = "mim-db"` -* `strategy` - the recommended value is `available_worker` -* `call_timeout` - it should be equal or longer than the value set in `request_timeout` below. +#### `outgoing_pools.rdbms.*.connection.username` +* **Syntax:** string +* **Example:** `username = "mim-user"` -`ConnectionOptions` can take the following `{key, value}` pairs: +#### `outgoing_pools.rdbms.*.connection.password` +* **Syntax:** string +* **Example:** `password = "mim-password"` -* `{server, HostName}` - string, default: `"http://localhost"` - the URL of the destination HTTP server (including a port number if needed). -* `{path_prefix, Prefix}` - string, default: `"/"` - the part of the destination URL that is appended to the host name (`host` option). -* `{request_timeout, TimeoutValue}` - non-negative integer, default: `2000` - maximum number of milliseconds to wait for the HTTP response. -* `{http_opts, HTTPOptions}` - list, default: `[]` - can be used to pass extra parameters which are passed to [fusco], the library used for making the HTTP calls. - More details about the possible `http_opts` can be found in [fusco]'s documentation. +#### `outgoing_pools.rdbms.*.connection.keepalive_interval` +* **Syntax:** positive integer +* **Default:** undefined (keep-alive not activated) +* **Example:** `keepalive_interval = 30` -[fusco]: https://github.com/esl/fusco +When enabled, MongooseIM will send SELECT 1 query through every DB connection at given interval to keep them open. This option should be used to ensure that database connections are restarted after they became broken (e.g. due to a database restart or a load balancer dropping connections). Currently, not every network-related error returned from a database driver to a regular query will imply a connection restart. -##### Example configuration +## HTTP options -```Erlang -{outgoing_pools, [ - {http, global, http_auth, - [{strategy, available_worker}], [{server, "https://my_server:8080"}]} -]}. -``` +#### `outgoing_pools.http.*.connection.host` +* **Syntax:** `"http[s]://string[:integer]"` +* **Example:** `host = "https://server.com:879"` -If peer certificate verification is required, the pool can be configured in the following way: - -```Erlang -{outgoing_pools, [ - {http, global, mongoose_push_http, - [{workers, 50}], - [{server, "https://localhost:8443"}, - {http_opts, [ - {connect_options, [{verify, verify_peer}]} - ]} - ]} -]}. -``` +#### `outgoing_pools.http.*.connection.path_prefix` +* **Syntax:** string +* **Default:** `"/"` +* **Example:** `path_prefix = "/api/auth/"` -Please note the `connect_options` passed to [fusco] via the pool's `http_opts` parameter. +Initial part of path which will be common to all calls. Prefix will be automatically prepended to path specified by a call to the pool. +#### `outgoing_pools.http.*.connection.request_timeout` +* **Syntax:** positive integer +* **Default:** `2000` (milliseconds) +* **Example:** `request_timeout = 5000` -## Redis connection setup +Number of milliseconds after which http call to the server will time out. It should be lower than `call_timeout` set at the pool level. -Session manager backend or `mod_global_distrib` requires a redis pool defined in the `outgoing_pools` option. -They can be defined as follows: +HTTP also supports all TLS-specific options described in the TLS section. -```erlang -{ougtoing_pools, [ - {redis, global, Tag, WorkersOptions, ConnectionOptions} -]}. -``` +## Redis-specific options -*WARNING:* `redis` backend is not compatible with `available_worker` strategy. +Redis can be used as a session manager backend. +Global distribution (implemented in `mod_global_distrib`) requires Redis pool. -The `Tag` parameter can only be set to `default` for a session backend. -For `mod_global_distrib` module it can take any value (default is **global_distrib**) but the name needs to be passed as: +There are two important limitations: -```erlang -{redis, [{pool, Tag}]} -``` -in the `mod_global_distrib` options. See [mod_global_distrib doc](../modules/mod_global_distrib.md) for details and examples. +* for a session backend, the `Tag` parameter has to be equal to `default` +* `redis` backend is not compatible with `available_worker` strategy. -The `ConnectionOptions` list can take following parametrs as `{key, value`} pairs: +#### `outgoing_pools.redis.*.connection.host` +* **Syntax:** string +* **Default:** `"127.0.0.1"` +* **Example:** `host = "redis.local"` -* **host** (default: **"localhost"**) the hostname or IP address of the Redis server -* **port** (default: **6379**) the port of the Redis server -* **database** (default: **0**) number of the database to use by the pool -* **password** (default: **""**) the password to the database (if set). +#### `outgoing_pools.redis.*.connection.port` +* **Syntax:** integer, between 0 and 65535, non-inclusive +* **Default:** `6379` +* **Example:** `port = 9876` -### Example +#### `outgoing_pools.redis.*.connection.database` +* **Syntax:** non-negative integer +* **Default:** `0` +* **Example:** `database = 2` -```erlang -{ougtoing_pools, [ - {redis, global, default, [{strategy, random_worker}], - [{host, "198.172.15.12"}, - {port, 9923}]} -]}. -``` +Logical database index (zero-based). -## Riak connection setup +#### `outgoing_pools.redis.*.connection.password` +* **Syntax:** string +* **Default:** `""` +* **Example:** `password = "topsecret"` -Currently only one Riak connection pool can exist for each supported XMPP host. -It is configured with the following tuple inside the `outgoing_pools` config option. +## Riak options -```erlang -{outgoing_pools, [ - {riak, global, default, [{workers, 20}], [{address, "127.0.0.1"}, {port, 8087}]} -]}. -``` +Currently only one Riak connection pool can exist for each supported XMPP host (the default pool). *WARNING:* `riak` backend is not compatible with `available_worker` strategy. -#### Riak SSL connection setup - -Using SSL for Riak connection requires passing extra options in `ConnectionOptions` to the -aforementioned `riak` tuple. - -Here is the proper syntax: - -* **Credentials** - * **Description:** Specifies credentials to use to connect to the database. - * **Syntax:** `{credentials, User, Password}` - * **Supported values** `User` and `Password` are strings with a database username and password respectively. - -* **CACert** - * **Description:** Specifies a path to the CA certificate that was used to sign the database certificates. - * **Syntax:** `{cacertfile, Path}` - * **Supported values** `Path` is a string with a path to the CA certificate file. - -* **SSL_Opts** - * **Description**: list of SSL options as defined in [Erlang SSL module doc](http://erlang.org/doc/man/ssl.html) - They will be passed verbatim to the `ssl:connect` function. - * **Syntax** `{ssl_opts, ListOfSSLOpts}` - * **Example**: - -```erlang -{ssl_opts, [{ciphers, ["AES256-SHA", "DHE-RSA-AES128-SHA256"]}, - {server_name_indication, disable}]} -``` - -##### Example configuration - -An example configuration can look as follows: - -```erlang -{outgoing_pools, [ - {riak, global, default, [{workers, 20}, {strategy, next_worker}], - [{address, "127.0.0.1"}, {port, 8087}, - {credentials, "username", "pass"}, - {cacertfile, "path/to/cacert.pem"}]} -]}. -``` - -## Cassandra connection setup +#### `outgoing_pools.riak.*.connection.address` +* **Syntax:** string +* **Example:** `address = "127.0.0.1"` -The minimum pool definition for cassandra workers looks as follows: +#### `outgoing_pools.riak.*.connection.port` +* **Syntax:** integer +* **Example:** `port = 8087` -```erlang -{outgoing_pools, [ - {cassandra, global, default, [], []} -]}. -``` - -In this case MongooseIM will by default try to connect to Cassandra server on "localhost" and port 9042. -The keyspace used in queries will be `mongooseim`. - - -#### ConnectionOptions - -The `ConnectionOptions` list can take following parameters as `{key, Value}` pairs: - -* **servers** - A list of servers in Cassandra cluster in `{HostnameOrIP, Port}` format. -* **keyspace** - A name of keyspace to use in queries executed in this pool. -* You can find a full list in `cqerl` [documentation](https://github.com/matehat/cqerl#all-modes). - -#### Example - -``` -{cassandra, global, default, [], - [ - {servers, [{"cassandra_server1.example.com", 9042}, {"cassandra_server2.example.com", 9042}] }, - {keyspace, "big_mongooseim"} - ] - ]} -``` - -#### SSL connection setup - -In order to establish a secure connection to Cassandra you must make some changes in the MongooseIM and Cassandra configuration files. - -##### Create server keystore -Follow [this](https://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/secureSSLCertWithCA.html) guide if you need to create certificate files. +#### `outgoing_pools.riak.*.connection.credentials` +* **Syntax:** `{user = "username", password = "pass"}` +* **Default:** none +* **Example:** `credentials = {user = "myuser", password = "tisismepasswd"}` -##### Change the Cassandra configuration file -Find `client_encryption_options` in `cassandra.yaml` and make these changes: +This is optional - setting this option forces connection over TLS -```yaml -client_encryption_options: -    enabled: true -    keystore: /your_certificate_directory/server.keystore -    keystore_password: your_password -``` - -Save the changes and restart Cassandra. +Riak also supports all TLS-specific options described in the TLS section. -##### Enable MongooseIM to connect with SSL -An SSL connection can be established with both self-signed and CA-signed certificates. +## Cassandra options -###### Self-signed certificate +#### `outgoing_pools.cassandra.*.connection.servers` +* **Syntax:** a TOML array of tables containing keys `"ip_adddress"` and `"port"` +* **Default:** `[{ip_address = "localhost", port = 9042}]` +* **Example:** `servers = [{ip_address = "host_one", port = 9042}, {ip_address = "host_two", port = 9042}]` -Add the following to `ConnectionOptions` list: +#### `outgoing_pools.cassandra.*.connection.keyspace` +* **Syntax:** string +* **Default:** `"mongooseim"` +* **Example:** `keyspace = "big_mongooseim_database"` -```erlang -{ssl, [{verify, verify_none}]} -``` +To use plain text authentication (using cqerl_auth_plain_handler module): -Save the changes and restart MongooseIM. +#### `outgoing_pools.cassandra.*.connection.auth.plain.username` +* **Syntax:** string +* **Example:** `username = "auser"` -###### CA-signed certificate +#### `outgoing_pools.cassandra.*.connection.auth.plain.password` +* **Syntax:** string +* **Example:** `password = "somesecretpassword"` -Add the following to `ConnectionOptions` list: -``` -{ssl, [{cacertfile, - "/path/to/rootCA.pem"}, - {verify, verify_peer}]} -``` -Save the changes and restart MongooseIM. - -##### Testing the connection - -Make sure Cassandra is running and then run MongooseIM in live mode: - -```bash - $ ./mongooseim live - $ (mongooseim@localhost)1> cqerl:get_client(default). - {ok,{<0.474.0>,#Ref<0.160699839.1270874114.234457>}} - $ (mongooseim@localhost)2> sys:get_state(pid(0,474,0)). - {live,{client_state,cqerl_auth_plain_handler,undefined, -                    undefined, -                    {"localhost",9042}, -                    ssl, -                    {sslsocket,{gen_tcp,#Port<0.8458>,tls_connection,undefined}, -                               <0.475.0>}, -                    undefined,mongooseim,infinity,<<>>,undefined, -                    [...], -                    {[],[]}, -                    [0,1,2,3,4,5,6,7,8,9,10,11|...], -                    [],hash, -                    {{"localhost",9042}, -                     [...]}}} -``` +Support for other authentication modules may be added in the future. -If no errors occurred and your output is similar to the one above then your MongooseIM and Cassandra nodes can communicate over SSL. +Cassandra also supports all TLS-specific options described in the TLS section. -## ElasticSearch connection setup +## Elasticsearch options -A connection pool to ElasticSearch can be configured as follows: +Currently only one pool tagged `default` can be used. -```erlang -{outgoing_pools, [ - {elastic, global, default, [], [{host, "localhost"}]} -]}. -``` +#### `outgoing_pools.elastic.default.connection.host` +* **Syntax:** string +* **Default:** `"localhost"` +* **Example:** `host = "otherhost"` -Currently only one pool with tag `default` can be used. +#### `outgoing_pools.elastic.default.connection.port` +* **Syntax:** positive integer +* **Default:** `9200` +* **Example:** `port = 9211` MongooseIM uses [inaka/tirerl](https://github.com/inaka/tirerl) library to communicate with ElasticSearch. -This library uses `worker_pool` in a bit different way than MongooseIM does, so the following options are not configurable via `WPoolOpts`: +This library uses `worker_pool` in a bit different way than MongooseIM does, so the following options are not configurable: -* `call_timeout` (inifinity) +* `call_timeout` (infinity) * worker selection strategy (`available_worker` or what's set as `default_strategy` of `worker_pool` application) -* `overrun_warning` (infinity) -* `overrun_handler`, ({error_logger, warning_report}) - -Other `worker_pool` options are possible to set. - -In `ConnectionOpts` you can add (as `{key, value}` pairs): -* `host` (default: `"localhost"`) - hostname or IP address of ElasticSearch node -* `port` (default: `9200`) - port the ElasticSearch node's HTTP API is listening on +The only pool-related variable you can tweak is thus the number of workers. Run the following function in the MongooseIM shell to verify that the connection has been established: @@ -483,113 +286,155 @@ Run the following function in the MongooseIM shell to verify that the connection Note that the output might differ based on your ElasticSearch cluster configuration. -## RabbitMQ connection setup +## RabbitMQ options -RabbitMQ backend for [`mod_event_pusher`](../modules/mod_event_pusher.md) -requires a `rabbit` pool defined in the `outgoing_pools` option. -They can be defined as follows: +The `Tag` parameter must be set to `event_pusher` in order to be able to use +the pool for [`mod_event_pusher_rabbit`](../modules/mod_event_pusher_rabbit.md). +Any other `Tag` can be used for other purposes. -```erlang -{ougtoing_pools, [ - {rabbit, host, Tag, WorkersOptions, ConnectionOptions} -]}. -``` +#### `outgoing_pools.rabbit.*.connection.amqp_host` +* **Syntax:** string +* **Default:** `"localhost"` +* **Example:** `amqp_host = "anotherhost"` -Notice that `Host` parameter is set to atom `host`. This basically means that -MongooseIM will start as many `rabbit` pools as XMPP hosts are served by -the server. +#### `outgoing_pools.rabbit.*.connection.amqp_port` +* **Syntax:** integer +* **Default:** `5672` +* **Example:** `amqp_port = 4561` -The `Tag` parameter must be set to `event_pusher` in order to be able to use -the pool for [`mod_event_pusher_rabbit`](../modules/mod_event_pusher_rabbit.md). -Any other `Tag` can be used for any other RabbitMQ connection pool. +#### `outgoing_pools.rabbit.*.connection.amqp_username` +* **Syntax:** string +* **Default:** `"guest"` +* **Example:** `amqp_username = "corpop"` -The `ConnectionOptions` list can take following parameters as `{key, value`} pairs: +#### `outgoing_pools.rabbit.*.connection.amqp_password` +* **Syntax:** string +* **Default:** `"guest"` +* **Example:** `amqp_password = "guest"` -* **amqp_host** (default: `"localhost"`) - Defines RabbitMQ server host (domain or IP address; both as a string); -* **amqp_port** (default: `5672`) - Defines RabbitMQ server AMQP port; -* **amqp_username** (default: `"guest"`) - Defines RabbitMQ server username; -* **amqp_password** (default: `"guest"`) - Defines RabbitMQ server password; -* **confirms_enabled** (default: `false`) - Enables/disables one-to-one publishers confirms; -* **max_worker_queue_len** (default: `1000`; use `infinity` to disable it) - -Sets a limit of messages in a worker's mailbox above which the worker starts -dropping the messages. If a worker message queue length reaches the limit, -messages from the head of the queue are dropped until the queue length is again -below the limit. +#### `outgoing_pools.rabbit.*.connection.confirms_enabled` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `confirms_enabled = false` -### Example +Enables/disables one-to-one publishers confirms. -```erlang -{ougtoing_pools, [ - {rabbit, host, event_pusher, [{workers, 20}], - [{amqp_host, "localhost"}, - {amqp_port, 5672}, - {amqp_username, "guest"}, - {amqp_password, "guest"}, - {confirms_enabled, true}, - {max_worker_queue_len, 100}]} -]}. -``` +#### `outgoing_pools.rabbit.*.connection.max_worker_queue_len` +* **Syntax:** non-negative integer or `"infinity"` +* **Default:** `1000` +* **Example:** `max_worker_queue_len = "infinity"` -## LDAP connection setup +Sets a limit of messages in a worker's mailbox above which the worker starts dropping the messages. If a worker message queue length reaches the limit, messages from the head of the queue are dropped until the queue length is again below the limit. Use `infinity` to disable. -To configure a pool of connections to an LDAP server, use the following syntax: +## LDAP options -```erlang -{ldap, Host, Tag, PoolOptions, ConnectionOptions} -``` +#### `outgoing_pools.ldap.*.connection.servers` +* **Syntax:** an array of strings +* **Default:** `["localhost"]` +* **Example:** `servers = ["ldap_one", "ldap_two"]` -### Connection options +#### `outgoing_pools.ldap.*.connection.port` +* **Syntax:** integer +* **Default:** `389` (or `636` if encryption is enabled) +* **Example:** `port = 800` -The following options can be specified in the `ConnectionOptions` list: +#### `outgoing_pools.ldap.*.connection.rootdn` +* **Syntax:** string +* **Default:** empty string +* **Example:** `rootdn = "cn=admin,dc=example,dc=com"` -* **servers** - * **Description:** List of IP addresses or DNS names of your LDAP servers. They are tried sequentially until the connection succeeds. - * **Value:** A list of strings - * **Default:** `["localhost"]` - * **Example:** `["primary-ldap-server.example.com", "secondary-ldap-server.example.com"]` +Leaving out this option makes it an anonymous connection, which most likely is what you want. -* **encrypt** - * **Description:** Enable connection encryption with your LDAP server. - The value `tls` enables encryption by using LDAP over SSL. Note that STARTTLS encryption is not supported. - * **Values:** `none`, `tls` - * **Default:** `none` +#### `outgoing_pools.ldap.*.connection.password` +* **Syntax:** string +* **Default:** empty string +* **Example:** `password = "topsecret"` -* **tls_options** - * **Description:** Specifies TLS connection options. Requires `{encrypt, tls}` (see above). - * **Value:** List of `ssl:tls_client_option()`. More details can be found in the [official Erlang ssl documentation](http://erlang.org/doc/man/ssl.html). - * **Default:** no options - * **Example:** `[{verify, verify_peer}, {cacertfile, "path/to/cacert.pem"}]` +#### `outgoing_pools.ldap.*.connection.connect_interval` +* **Syntax:** integer +* **Default:** `10000` +* **Example:** `connect_interval = 20000` -* **port** - * **Description:** Port to connect to your LDAP server. - * **Values:** Integer - * **Default:** 389 if encryption is disabled. 636 if encryption is enabled. +Reconnect interval after a failed connection. -* **rootdn** - * **Description:** Bind DN - * **Values:** String - * **Default:** empty string which is `anonymous connection` +#### `outgoing_pools.ldap.*.connection.encrypt` +* **Syntax:** string, one of: `"none"` or `"tls"` +* **Default:** `"none"` +* **Example:** `encrypt = "tls"` -* **password** - * **Description:** Bind password - * **Values:** String - * **Default:** empty string +LDAP also supports all TLS-specific options described in the TLS section (provided `encrypt` is set to `tls`). -* **connect_interval** - * **Description:** Interval between consecutive connection attempts in case of connection failure - * **Value:** Integer (milliseconds) - * **Default:** 10000 +## TLS options -### Example +TLS options for a given pool type/tag pair are defined in a subsection starting with `[outgoing_pools.[pool_type].[pool_tag].connection.tls]`. -A pool started for each host with the `default` tag and 5 workers. The LDAP server is at `ldap-server.example.com:389`. MongooseIM will authenticate as `cn=admin,dc=example,dc=com` with the provided password. +#### `outgoing_pools.*.*.connection.tls.required` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `tls.required = true` -```erlang -{outgoing_pools, [ - {ldap, host, default, [{workers, 5}], - [{servers, ["ldap-server.example.com"]}, - {rootdn, "cn=admin,dc=example,dc=com"}, - {password, "ldap-admin-password"}] - } -]}. -``` +This option is Postgresql-specific, doesn't apply in other cases. + +#### `outgoing_pools.*.*.connection.tls.verify_peer` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `tls.verify_peer = true` + +Enforces verification of a client certificate. Requires a valid `cacertfile`. + +#### `outgoing_pools.*.*.connection.tls.certfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.certfile = "server.pem"` + +Path to the X509 PEM file with a certificate and a private key (not protected by a password). +If the certificate is signed by an intermediate CA, you should specify here the whole CA chain by concatenating all public keys together and appending the private key after that. + +#### `outgoing_pools.*.*.connection.tls.cacertfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.cacertfile = "ca.pem"` + +Path to the X509 PEM file with a CA chain that will be used to verify clients. It won't have any effect if `verify_peer` is not enabled. + +#### `outgoing_pools.*.*.connection.tls.dhfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.dhfile = "dh.pem"` + +Path to the Diffie-Hellman parameter file. + +#### `outgoing_pools.*.*.connection.tls.keyfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `tls.keyfile = "key.pem"` + +Path to the X509 PEM file with the private key. + +#### `outgoing_pools.*.*.connection.tls.password` +* **Syntax:** string +* **Default:** not set +* **Example:** `tls.password = "secret"` + +Password to the X509 PEM file with the private key. + +#### `outgoing_pools.*.*.connection.tls.ciphers` +* **Syntax:** array of tables with the following keys: `cipher`, `key_exchange`, `mac`, `prf` and string values. +* **Default:** not set, all supported cipher suites are accepted +* **Example:** `tls.ciphers = "[{cipher = "aes_25_gcm", key_exchange = "any", mac = "aead", "prf = sha384"}]"` + +Cipher suites to use. For allowed values, see the [Erlang/OTP SSL documentation](https://erlang.org/doc/man/ssl.html#type-ciphers) + +#### `outgoing_pools.*.*.connection.tls.versions` +* **Syntax:** list of strings +* **Default:** not set, all supported versions are accepted +* **Example:** `tls.versions = ["tlsv1.2", "tlsv1.3"]` + +Cipher suites to use. For allowed values, see the [Erlang/OTP SSL documentation](https://erlang.org/doc/man/ssl.html#type-ciphers) + +#### `outgoing_pools.*.*.connection.tls.server_name_indication` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `tls.server_name_indication = false` + +Enables SNI extension to TLS protocol. diff --git a/doc/advanced-configuration/release-options.md b/doc/advanced-configuration/release-options.md new file mode 100644 index 00000000000..e1b04d8226a --- /dev/null +++ b/doc/advanced-configuration/release-options.md @@ -0,0 +1,226 @@ +When building a MongooseIM release from source code, the initial configuration files are generated with options taken from the `vars-toml.config` file found in the `[MongooseIM root]/rel/` directory. +You can change the values in this file to affect the resulting `vm.args` and `mongooseim.toml` files. + +The file contains erlang tuples terminated with period ('.'). For users not familiar with Erlang syntax, here is a quick cheat sheet: + +* Each config option (key and value) is a tuple. Tuples are (Erlangers, forgive us the simplification) other Erlang terms separated with commas and enclosed in curly brackets ({}). +* Tuples (at least the top-level ones) in `vars.config` are always 2-element. +* The first element of each tuple is the name (Erlang atom). +* The second element is a quoted string. Any quotes (`"`) inside the string should be escaped with a backslash (`\`). + +There are two types of options: parameters and blocks: + +* a **parameter** is inserted into the value of an already defined option. +Parameters are mandatory - a valid value has to be provided. +* a **block** can be an empty string, one line or multiple lines, defining zero, one or more options. +Blocks are optional - the default is an empty string. + +# vm.args options + +These options are inserted into the `rel/files/vm.args` template. + +## node_name + +* **Type:** parameter +* **Option:** value of `-sname` in [vm.args](../../advanced-configuration#options) +* **Syntax:** Erlang node name: `name@host` +* **Example:** `{node_name, "mongooseim@localhost"}.` + +## highload_vm_args + +* **Type:** block +* **Option:** arguments in [vm.args](../../advanced-configuration#options): `+K`, `+A`, `+P`, `-env ERL_MAX_PORTS` +* **Syntax:** command-line arguments +* **Example:** `{highload_vm_args, "+P 10000000 -env ERL_MAX_PORTS 250000"}.` + +# TOML Options + +These options are inserted into the `rel/files/mongooseim.toml` template. + +## hosts + +* **Type:** parameter +* **Option:** [`general.hosts`](../../advanced-configuration/general#generalhosts) +* **Syntax:** comma-separated list of strings +* **Example:** `{hosts, "\"localhost\", \"domain2\""}.` + +## host_config + +* **Type:** block +* **Option:** [`host_config`](../../advanced-configuration/host_config) +* **Syntax:** TOML block, one or more `[[host_config]]` sections. +* **Example:** + +``` +{host_config, " +[[host_config]] + host = \"anonymous.localhost\" + + [host_config.auth] + methods = [\"anonymous\"] +"}. +``` + +## auth_ldap + +* **Type:** block +* **Option:** [`auth.ldap`](../../authentication-methods/ldap) +* **Syntax:** TOML block, the `[auth.ldap]` subsection +* **Example:** + +``` +{auth_ldap, " + [auth.ldap] + base = \"ou=Users,dc=esl,dc=com\" + filter = \"(objectClass=inetOrgPerson)\" +"}. +``` + +## all_metrics_are_global + +* **Type:** parameter +* **Option:** [`general.all_metrics_are_global`](../../advanced-configuration/general#generalall_metrics_are_global) +* **Syntax:** boolean +* **Example:** `{all_metrics_are_global, "false"}.` + +## s2s_addr + +* **Type:** block +* **Option:** [`auth.s2s.address`](../../advanced-configuration/s2s#s2saddress) +* **Syntax:** TOML key-value pair with the `address` option +* **Example:** + +``` +{s2s_addr, " + address = [ + {host = \"my.xmpp.org\", ip_address = \"192.0.100.1\"}, + {host = \"your.xmpp.org\", ip_address = \"192.0.1.100\", port = 5271} + ] +"}. +``` + +## s2s_default_policy + +* **Type:** parameter +* **Option:** [`s2s.default_policy`](../../advanced-configuration/s2s#s2sdefault_policy) +* **Syntax:** string +* **Example:** `{s2s_default_policy, "\"deny\""}.` + +## outgoing_s2s_port + +* **Type:** parameter +* **Option:** [`s2s.outgoing.port`](../../advanced-configuration/s2s#s2soutgoingport) +* **Syntax:** integer +* **Example:** `{outgoing_s2s_port, "5269"}.` + +## c2s_port + +* **Type:** parameter +* **Option:** [`listen.c2s.port`](../../advanced-configuration/listen#listenport) +* **Syntax:** integer +* **Example:** `{c2s_port, "5222"}.` + +## s2s_port + +* **Type:** parameter +* **Option:** [`listen.s2s.port`](../../advanced-configuration/listen#listenport) +* **Syntax:** integer +* **Example:** `{s2s_port, "5269"}.` + +## cowboy_port + +* **Type:** parameter +* **Option:** [`listen.http.port`](../../advanced-configuration/listen#listenport) +* **Syntax:** integer +* **Example:** `{http_port, "5280"}.` + +## mod_last + +* **Type:** block +* **Option:** [`modules.mod_last`](../../modules/mod_last) +* **Syntax:** TOML section: `[modules.mod_last]` +* **Example:** `{mod_last, "[modules.mod_last]"}.` + +## mod_offline + +* **Type:** block +* **Option:** [`modules.mod_offline`](../../modules/mod_offline) +* **Syntax:** TOML section: `[modules.mod_offline]` +* **Example:** + +``` +{mod_offline, " +[modules.mod_offline] + access_max_user_messages = \"max_user_offline_messages\" +"}. +``` + +## mod_privacy + +* **Type:** block +* **Option:** [`modules.mod_privacy`](../../modules/mod_privacy) +* **Syntax:** TOML section: `[modules.mod_privacy]` +* **Example:** `{mod_privacy, "[modules.mod_privacy]"}.` + +## mod_private + +* **Type:** block +* **Option:** [`modules.mod_private`](../../modules/mod_private) +* **Syntax:** TOML section: `[modules.mod_private]` +* **Example:** `{mod_private, "[modules.mod_private]"}.` + +## mod_roster + +* **Type:** block +* **Option:** [`modules.mod_roster`](../../modules/mod_roster) +* **Syntax:** TOML section: `[modules.mod_roster]` +* **Example:** `{mod_roster, "[modules.mod_roster]"}.` + +## mod_vcard + +* **Type:** block +* **Option:** [`modules.mod_vcard`](../../modules/mod_vcard) +* **Syntax:** TOML section: `[modules.mod_vcard]` +* **Example:** + +``` +{mod_vcard, " +[modules.mod_vcard] + host = \"vjud.@HOST@\" +"}. +``` + +## sm_backend + +* **Type:** parameter +* **Option:** [`general.sm_backend`](../../advanced-configuration/general#generalsm_backend) +* **Syntax:** string +* **Example:** `{sm_backend, \""redis\""}.` + +## tls_config + +* **Type:** block +* **Option:** [`listen.c2s.tls.*`](../../advanced-configuration/listen#tls-options-for-c2s) +* **Syntax:** TOML key-value pairs +* **Example:** + +``` +{tls_config, " + tls.certfile = \"priv/ssl/fake_server.pem\" + tls.mode = \"starttls\" +"}. +``` + +## auth_method + +* **Type:** parameter +* **Option:** [`auth.methods`](../../advanced-configuration/auth#authmethods) +* **Syntax:** comma-separated list of strings +* **Example:** `{auth_method, "\"internal\""}.` + +## zlib + +* **Type:** block +* **Option:** [`listen.c2s.zlib`](../../advanced-configuration/listen#listenc2szlib) +* **Syntax:** TOML key-value pair +* **Example:** `{zlib, "zlib = 10_000"}.` diff --git a/doc/advanced-configuration/s2s.md b/doc/advanced-configuration/s2s.md new file mode 100644 index 00000000000..941cb3d6b79 --- /dev/null +++ b/doc/advanced-configuration/s2s.md @@ -0,0 +1,160 @@ +The `s2s` section contains options configuring the server-to-server connections used to communicate with other federated XMPP servers. + +# General options + +These options affect both incoming and outgoing S2S connections. + +## `s2s.default_policy` +* **Scope:** local +* **Syntax:** string, `"allow"` or `"deny"` +* **Default:** `"allow"` +* **Example:** `default_policy = "deny"` + +Default policy for opening new S2S connections to/from remote servers. + +## `s2s.host_policy` +* **Scope:** local +* **Syntax:** array of TOML tables with the following mandatory content: + * `host` - string, host name + * `policy` - string, `"allow"` or `"deny"` +* **Default:** `"allow"` +* **Example:** + +```toml + host_policy = [ + {host = "good.xmpp.org", policy = "allow"}, + {host = "bad.xmpp.org", policy = "deny"} + ] +``` + +Policy for opening new connections to/from specific remote servers. + +## `s2s.use_starttls` +* **Scope:** local +* **Syntax:** string, one of `"false"`, `"optional"`, `"required"`, `"required_trusted"` +* **Default:** `"false"` +* **Example:** `use_starttls = "required"` + +Allows to configure StartTLS for incoming and outgoing S2S connections: + +- `false` - StartTLS is disabled, +- `optional` - StartTLS is supported, +- `required` - StartTLS is supported and enforced, +- `required_trusted` - StartTLS is supported and enforced with certificate verification. + +## `s2s.certfile` +* **Scope:** local +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `certfile = "cert.pem"` + +Path to the X509 PEM file with a certificate and a private key inside (not protected by any password). Required if `use_starttls` is not `false`. + +## `s2s.domain_certfile` +* **Scope:** local +* **Syntax:** array of TOML tables with the following mandatory content: + * `domain` - string, XMPP domain name + * `certfile` - string, path in the file system +* **Default:** not set +* **Example:** + +```toml + domain_certfile = [ + {domain = "localhost1.com", certfile = "cert1.pem"}, + {domain = "localhost2.com", certfile = "cert2.pem"} + ] +``` + +This option overrides the configured certificate file for specific local XMPP domains. + +**Notes:** + +* This option applies to **S2S and C2S** connections. +* Each domain needs to be included in the list of [`hosts`](general.md#generalhosts) configured in the `general` section. + +## `s2s.shared` +* **Scope:** local +* **Syntax:** string +* **Default:** 10 strong random bytes, hex-encoded +* **Example:** `shared = "82gc8b23ct7824"` + +S2S shared secret used in the [Server Dialback](https://xmpp.org/extensions/xep-0220.html) extension. + +# Outgoing connections + +The options listed below affect only the outgoing S2S connections. + +## `s2s.address` +* **Scope:** local +* **Syntax:** array of TOML tables with the following content: + * `host` - string, mandatory, host name + * `ip_address` - string, mandatory, IP address + * `port` - integer, optional, port number +* **Default:** `"allow"` +* **Example:** + +```toml + address = [ + {host = "my.xmpp.org", ip_address = "192.0.100.1"}, + {host = "your.xmpp.org", ip_address = "192.0.1.100", port = 5271} + ] +``` + +This option defines IP addresses and port numbers for specific non-local XMPP domains, allowing to override the DNS lookup for outgoing S2S connections. + +## `s2s.ciphers` +* **Scope:** local +* **Syntax:** string +* **Default:** `"TLSv1.2:TLSv1.3"` +* **Example:** `ciphers = "TLSv1.2"` + +Defines a list of accepted SSL ciphers for outgoing S2S connections. +Please refer to the [OpenSSL documentation](http://www.openssl.org/docs/apps/ciphers.html) for the cipher string format. + +## `s2s.max_retry_delay` +* **Scope:** local +* **Syntax:** positive integer +* **Default:** `300` +* **Example:** `max_retry_delay = 300` + +Specifies the maximum time in seconds that MongooseIM will wait until the next attempt to connect to a remote XMPP server. The delays between consecutive attempts will be doubled until this limit is reached. + +## `s2s.outgoing.port` +* **Scope:** local +* **Syntax:** integer, port number +* **Default:** `5269` +* **Example:** `outgoing.port = 5270` + +Defines the port to be used for outgoing S2S connections. + +## `s2s.outgoing.ip_versions` +* **Scope:** local +* **Syntax:** array of integers (IP versions): `4` or `6` +* **Default:** `[4, 6]` +* **Example:** `outgoing.ip_versions = [6]` + +Specifies the order of IP address families to try when establishing an outgoing S2S connection. + +## `s2s.outgoing.connection_timeout` +* **Scope:** local +* **Syntax:** positive integer or the string `"infinity"` +* **Default:** `10_000` +* **Example:** `outgoing.connection_timeout = 5000` + +Timeout (in seconds) for establishing an outgoing S2S connection. + +## `s2s.dns.timeout` +* **Scope:** local +* **Syntax:** positive integer +* **Default:** `10` +* **Example:** `dns.timeout = 30` + +Timeout (in seconds) for DNS lookups when opening an outgoing S2S connection. + +## `s2s.dns.retries` +* **Scope:** local +* **Syntax:** positive integer +* **Default:** `2` +* **Example:** `dns.retries = 1` + +Number of DNS lookup attempts when opening an outgoing S2S connection. diff --git a/doc/advanced-configuration/shaper.md b/doc/advanced-configuration/shaper.md new file mode 100644 index 00000000000..da679e29042 --- /dev/null +++ b/doc/advanced-configuration/shaper.md @@ -0,0 +1,62 @@ +The `shaper` section specifies **traffic shapers** used to limit the incoming XMPP traffic, providing a safety valve to protect the server. It can be used to prevent DoS attacks or to calm down too noisy clients. + +* **Scope:** global +* **Syntax:** each shaper is specified in a subsection starting with `[shaper.name]` where `name` is used to uniquely identify the shaper. +* **Default:** no default - each shaper needs to be specified explicitly. +* **Example:** the `normal` shaper is used for the C2S connections. + +```toml +[shaper.normal] + max_rate = 1000 +``` + +# Traffic shaper options + +### `shaper.maxrate` +* **Syntax:** positive integer +* **Default:** no default, this option is mandatory +* **Example:** `maxrate = 1000` + +Defines the maximum accepted rate. For the shapers used by XMPP listeners this is the number of bytes per second, but there are shapers that use different units, e.g. [MAM shapers](#mam-shapers). + +# Examples + +The following examples show the typical shaper definitions. + +## C2S Shaper + +This is the typical definition of an XMPP shaper, which accepts the maximum data rate of 1 kbps. When the rate is exceeded, the receiver pauses before processing the next packet. + +```toml +[shaper.normal] + max_rate = 1000 +``` + +To make use of it, the [corresponding rule](access.md#c2s-shaper) should be defined in the `access` section. +Finally, the C2S listener has to be configured to use the defined shaper - see the [C2S Example](listen.md#c2s-example). + +## S2S Shaper + +For S2S connections we need to increase the limit as they receive the accumulated traffic from multiple users - e.g. to 50 kbps: + +```toml +[shaper.fast] + max_rate = 50_000 +``` + +To make use of it, the [corresponding rule](access.md#s2s-shaper) should be defined in the `access` section. +Finally, the C2S listener has to be configured to use the defined shaper - see the [S2S Example](listen.md#s2s-example). + +## MAM Shapers + +These shapers limit the number of MAM operations per second (rather than bytes per second). + +```toml +[shaper.mam_shaper] + max_rate = 1 + +[shaper.mam_global_shaper] + max_rate = 1000 +``` + +To make use of them, the [corresponding rules](access.md#mam-shapers) should be defined in the `access` section. diff --git a/doc/authentication-backends/JWT-authentication-module.md b/doc/authentication-backends/JWT-authentication-module.md deleted file mode 100644 index e44715e325e..00000000000 --- a/doc/authentication-backends/JWT-authentication-module.md +++ /dev/null @@ -1,36 +0,0 @@ -## Overview - -JWT authentication backend can verify JSON Web Tokens provided by the clients. -A wide range of signature algorithms is supported, including those using public key cryptography. - -The module checks the signature and validity of the following parameters: - -* `exp` - an expired token is rejected, -* `iat` - a token must be issued in the past, -* `nbf` - a token might not be valid *yet*. - -Requires the SASL PLAIN method. - -## Configuration options - -* **jwt_secret_source** - * **Description:** A path to a file or an environment variable, which will be used as a JWT secret. - * **Warning:** Please note that while a direct path to a file is read only once during startup, a path in the environment variable is read on every auth request. - * **Value:** string, e.g. `/etc/secrets/jwt` or `{env, "env-variable-name"}` - * **Default:** none, either `jwt_secret_source` or `jwt_secret` must be set - -* **jwt_secret** - * **Description:** A binary with a JWT secret. This option is ignored and overwritten, if `jwt_secret_source` is defined. - * **Value:** binary - * **Default:** none (either `jwt_secret_source` or `jwt_secret` must be set) - -* **jwt_algorithm** - * **Description:** A name of the algorithm used to sign JWT. - * **Valid values:** `"HS256", "RS256", "ES256", "HS386", "RS386", "ES386", "HS512", "RS512", "ES512"` - * **Default:** none, it's a mandatory option - -* **jwt_username_key** - * **Description:** A JWT key that contains the username to verify. - * **Value:** atom - * **Default:** none, it's a mandatory option - diff --git a/doc/authentication-backends/LDAP-authentication-module.md b/doc/authentication-backends/LDAP-authentication-module.md deleted file mode 100644 index 15f7cbe54fd..00000000000 --- a/doc/authentication-backends/LDAP-authentication-module.md +++ /dev/null @@ -1,123 +0,0 @@ -## Overview - -An LDAP authentication module. -It provides a read-only abstraction over an LDAP directory. - -The following SASL methods are supported: - -### SASL EXTERNAL - -User credentials are verified by performing an LDAP search with the user name provided by the client. This can be used to verify that the user is allowed to log in after the provided certificate has been verified. - -This method requires one connection pool with the `default` tag (unless you change it with the `ldap_pool_tag` option). You need to provide the root DN and password unless your LDAP password allows anonymous searches. - -Example: - -```erlang -{outgoing_pools, [ - {ldap, host, default, [{workers, 5}], - [{servers, ["ldap-server.example.com"]}, - {rootdn, "cn=admin,dc=example,dc=com"}, - {password, "ldap-admin-password"}] - } -]}. -``` - -For more details see [outgoing connections](../advanced-configuration/outgoing-connections.md). - -### SASL PLAIN - -User credentials are verified by performing an LDAP search followed by a bind with the user name and password provided by the client. - -To use SASL PLAIN, you need to configure two connection pools: - -* one with the `default` tag (unless you change it with the `ldap_pool_tag` option) for the search operations (like for SASL EXTERNAL), -* one with the `bind` tag (unless you change it with the `ldap_bind_pool_tag` option) for the bind operations - for this one it is not necessary to provide the root DN and password as the bind operations will be performed with users' credentials. This pool has to be used exclusively for the bind operations as the authentication state of the connection changes with each request. - -Example: - -```erlang -{outgoing_pools, [ - {ldap, host, default, [{workers, 5}], - [{servers, ["ldap-server.example.com"]}, - {rootdn, "cn=admin,dc=example,dc=com"}, - {password, "ldap-admin-password"}] - }, - {ldap, host, bind, [{workers, 5}], - [{servers, ["ldap-server.example.com"]}] - } -]}. -``` - -For more details see [outgoing connections](../advanced-configuration/outgoing-connections.md). - -## Configuration options - -The following options can be set in the `auth_opts` tuple in `mongooseim.cfg`. - -* **ldap_pool_tag:** - * **Description:** Worker pool tag for the search operations. - * **Value:** Atom - * **Default:** `default` - -* **ldap_bind_pool_tag:** - * **Description:** Worker pool tag for the search operations. - * **Value:** Atom - * **Default:** `bind` - -* **ldap_base:** - * **Description:** LDAP base directory which stores user accounts. - * **Value:** String - * **Default:** This option is required - -* **ldap_uids:** - * **Description:** An LDAP attribute holding a list of attributes to use as alternatives for getting the JID. - The attributes take the following form: `[{ldap_uidattr}]` or `[{ldap_uidattr, ldap_uidattr_format}]`. - You can use as many comma separated attributes as needed. - * **Value:** `[ ldap_uidattr | {ldap_uidattr: ldap_uidattr_format} ]` - * **ldap_uidattr:** An LDAP attribute holding the user’s part of a JID. The default value is `uid`. - * **ldap_uidattr_format:** The format of the`ldap_uidattr` variable. - It must contain one and only one pattern variable `%u` which will be replaced by the user’s part of a JID (example: `%u@example.org`). - The default value is `%u`. - * **Default** `[{uid, %u}]` - -* **ldap_filter:** - * **Description:** An LDAP filter. - Please, do not forget to close the brackets and do not use superfluous whitespaces. - Also do not use the `ldap_uidattr` attribute in the filter because it will be substituted in the LDAP filter automatically. - * **Value:** String. For example: `"(&(objectClass=shadowAccount)(memberOf=Jabber Users))"` - * **Default:** `undefined` - -* **ldap_dn_filter:** - * **Description:** This filter is applied to the results returned by the main filter. - It performs an additional LDAP lookup to provide the complete result. - This is useful when you are unable to define all filter rules in the `ldap_filter`. - You can define `%u`, `%d`, `%s` and `%D` pattern variables in the filter: `%u` is replaced by a user’s part of a JID, `%d` is replaced by the corresponding domain (virtual host), all `%s` variables are consecutively replaced by values of `FilterAttrs` attributes and `%D` is replaced by the Distinguished Name. - Since this filter makes additional LDAP lookups, use it only as the last resort; try to define all filter rules in ldap_filter if possible. - * **Value:** `{Filter, [FilterAttributes]}`. For example: `{"(&(name=%s)(owner=%D)(user=%u@%d))": ["sn"]}` - * **Default:** `undefined` - -* **ldap_local_filter:** - * **Description:** If you can’t use the `ldap_filter` due to performance reasons (the LDAP server has many users registered), you can use this local filter. - The local filter checks an attribute in MongooseIM, not in LDAP, so this limits the load on the LDAP directory. - * **Value:** `Filter`. Example values: - ``` - {ldap_local_filter, {notequal, {"accountStatus",["disabled"]}}}. - {ldap_local_filter, {equal, {"accountStatus",["enabled"]}}}. - {ldap_local_filter, undefined}. - ``` - * **Default:** `undefined` - -* **ldap_deref** - * **Description:** Whether or not to dereference aliases - * **Values:** `never`, `always`, `finding`, `searching` - * **Default:** `never` - -Example: - -```erlang -{auth_opts, [ - {ldap_base, "ou=Users,dc=example,dc=com"}, - {ldap_filter, "(objectClass=inetOrgPerson)"} -]}. -``` diff --git a/doc/authentication-backends/Riak-authentication-module.md b/doc/authentication-backends/Riak-authentication-module.md deleted file mode 100644 index 3dc39a3be27..00000000000 --- a/doc/authentication-backends/Riak-authentication-module.md +++ /dev/null @@ -1,21 +0,0 @@ -## Overview - -A Riak authentication module `ejabberd_auth_riak`. -It stores users in riak. - -## Configuration options - -The following options can be set in the `auth_opts` tuple in `mongooseim.cfg`. - -* **bucket_type:** - * **Description:** Riak bucket type. - * **Value:** Binary - * **Default:** `<<"users">>` - -Example: - -```erlang -{auth_opts, [ - {bucket_type, <<"users">>} -]}. -``` diff --git a/doc/authentication-methods/anonymous.md b/doc/authentication-methods/anonymous.md new file mode 100644 index 00000000000..2c089b5d1c5 --- /dev/null +++ b/doc/authentication-methods/anonymous.md @@ -0,0 +1,34 @@ +## Overview + +This authentication method allows the users to connect anonymously. + +## Configuration options + +### `auth.anonymous.allow_multiple_connections` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `allow_multiple_connections = true` + +When set to true, allows multiple connections from the same JID using the `anonymous` authentication method. + +### `auth.anonymous.protocol` +* **Syntax:** string, one of `"sasl_anon"`, `"login_anon"`, `"both"` +* **Default:** `sasl_anon` +* **Example:** `anonymous_protocol = "both"` + +Specifies the SASL mechanisms supported by the `anonymous` authentication method: + +* `sasl_anon` - support only the the `ANONYMOUS` mechanism, +* `login_anon` - support the non-anonymous mechanisms (`PLAIN`, `DIGEST-MD5`, `SCRAM-*`), +* `both` - support both types of mechanisms. + +### Example + +```toml +[auth] + methods = ["anonymous"] + + [auth.anonymous] + allow_multiple_connections = true + anonymous_protocol = "both" +``` diff --git a/doc/authentication-backends/Dummy-authentication-module.md b/doc/authentication-methods/dummy.md similarity index 87% rename from doc/authentication-backends/Dummy-authentication-module.md rename to doc/authentication-methods/dummy.md index 4a6db881cbc..15a96a824a1 100644 --- a/doc/authentication-backends/Dummy-authentication-module.md +++ b/doc/authentication-methods/dummy.md @@ -1,6 +1,6 @@ ## Overview -The purpose of this module is to make it possible to authenticate a user without +The purpose of this method is to make it possible to authenticate a user without the need for real authentication. In other words, using this module allows to connect any user to the server without providing any password, certificate, etc. diff --git a/doc/authentication-backends/External-authentication-module.md b/doc/authentication-methods/external.md similarity index 52% rename from doc/authentication-backends/External-authentication-module.md rename to doc/authentication-methods/external.md index 342920ba902..2e1dba18d1f 100644 --- a/doc/authentication-backends/External-authentication-module.md +++ b/doc/authentication-methods/external.md @@ -1,8 +1,8 @@ ## Overview -This backend delegates the authentication to an external script. +This authentication method delegates the authentication to an external script. -Requires the SASL PLAIN method. +It uses the `SASL PLAIN` mechanism. ## Script API specification @@ -24,6 +24,27 @@ The following list describes packets that the script should support. ## Configuration options -* **extauth_program** - * **Description:** Path to the authentication script used by the `external` auth module. +### `auth.external.program` +* **Syntax:** string +* **Default:** no default, this option is mandatory for the `external` authentication method +* **Example:** `program = "/usr/bin/auth-script.sh"` +Path to the external authentication program. + +### `auth.external.instances` +* **Syntax:** positive integer +* **Default:** `1` +* **Example:** `instances = 2` + +Specifies the number of workers serving external authentication requests. + +### Example + +```toml +[auth] + methods = ["external"] + + [auth.external] + program = "/home/user/authenticator" + instances = 5 +``` diff --git a/doc/authentication-backends/HTTP-authentication-module.md b/doc/authentication-methods/http.md similarity index 68% rename from doc/authentication-backends/HTTP-authentication-module.md rename to doc/authentication-methods/http.md index d542fea2183..bed0c078ee4 100644 --- a/doc/authentication-backends/HTTP-authentication-module.md +++ b/doc/authentication-methods/http.md @@ -1,39 +1,45 @@ ## Overview -The purpose of this module is to connect with an external REST API and delegate the authentication operations to it whenever possible. -The component must implement the API described in one of the next sections for `ejabberd_auth_http` to work out of the box. +The purpose of this method is to connect to an external REST API and delegate the authentication operations to it. +The component must implement the [API described below](#authentication-service-api). -The module can be especially useful for users maintaining their own central user database which is shared with other services. It fits perfectly when the client application uses a custom authentication token and MongooseIM has to validate it externally. +This method can be especially useful when the user database is shared with other services. It fits perfectly when the client application uses a custom authentication token and MongooseIM has to validate it externally. -## Configuration +### Configuration options -### How to enable +The `auth` method uses an outgoing HTTP connection pool called `auth`, which has to be defined in the `outgoing_pools` section. -For a full reference please check [Advanced-configuration#authentication](../Advanced-configuration.md#authentication). -The simplest way is to just replace the default `auth_method` option in `rel/files/mongooseim.cfg` with `{auth_method, http}`. +For additional configuration, the following options can be provided in the `auth` section: -Enabling the module **is not enough!** -Please follow instructions below. +### `auth.http.basic_auth` +* **Syntax:** string +* **Default:** not set +* **Example:** `basic_auth = "admin:secret"` -### Configuration options +Optional HTTP Basic Authentication in format `"username:password"` - used to authenticate MongooseIM in the HTTP service. -`ejabberd_auth_http` uses an outgoing http connection pool called `auth`. -The pool has to be defined in outgoing_pools section (see [Outgoing-connections#/http-connections](../advanced-configuration/outgoing-connections#http-connections-setup)). -The following options can be set in the `auth_opts` tuple in `rel/files/mongooseim.cfg`: +### Example -* `basic_auth` (default: `""`) - HTTP Basic Authentication in format `"username:password"`; auth service doesn't have to require authentication for HTTP auth to work +Authentication: -#### Example +```toml +[auth] + methods = ["http"] + [auth.http] + basic_auth = "mongooseim:DzviNQw3qyGJDrJDu+ClyA" ``` -{auth_opts, [ - {basic_auth, "mongooseim:DzviNQw3qyGJDrJDu+ClyA"}, - ]}. + +Outgoing pools: + +```toml +[outgoing_pools.http.auth] + connection.host = "https://auth-service:8000" ``` ## SCRAM support -`ejabberd_auth_http` can use the SCRAM method. +The `http` method can use the `SASL SCRAM-*` mechanisms. When SCRAM is enabled, the passwords sent to the auth service are serialised and the same serialised format is expected when fetching a password from the component. It is transparent when MongooseIM is responsible for all DB operations such as password setting, account creation etc. @@ -151,23 +157,23 @@ Below you can find some examples of the auth service APIs and MongooseIM-side co An Auth token is provided as a password. * **Service implements:** `check_password`, `user_exists` -* **MongooseIM config:** `password_format`: `plain`, `mod_register` disabled -* **Client side:** MUST NOT use `DIGEST-MD5` mechanism; use `PLAIN` +* **MongooseIM config:** [`password.format`](../advanced-configuration/auth.md#authpasswordformat): `plain`, `mod_register` disabled +* **Client side:** Must NOT use the `DIGEST-MD5` mechanism; use `PLAIN` instead #### Central database of plaintext passwords * **Service implements:** `check_password`, `get_password`, `user_exists` -* **MongooseIM config:** `password_format`: `plain`, `mod_register` disabled -* **Client side:** May use any available auth method +* **MongooseIM config:** [`password.format`](../advanced-configuration/auth.md#authpasswordformat): `plain`, `mod_register` disabled +* **Client side:** May use any available SASL mechanism #### Central database able to process SCRAM * **Service implements:** `get_password`, `user_exists` -* **MongooseIM config:** `password_format`: `scram`, `mod_register` disabled -* **Client side:** May use any available auth method +* **MongooseIM config:** [`password.format`](../advanced-configuration/auth.md#authpasswordformat): `scram`, `mod_register` disabled +* **Client side:** May use any available SASL mechanism #### Godlike MongooseIM * **Service implements:** all methods -* **MongooseIM config:** `password_format`: `scram` (recommended) or `plain`, `mod_register` enabled -* **Client side:** May use any available auth method +* **MongooseIM config:** [`password.format`](../advanced-configuration/auth.md#authpasswordformat): `scram` (recommended) or `plain`, `mod_register` enabled +* **Client side:** May use any available SASL mechanism diff --git a/doc/authentication-methods/jwt.md b/doc/authentication-methods/jwt.md new file mode 100644 index 00000000000..0ab496cc1f8 --- /dev/null +++ b/doc/authentication-methods/jwt.md @@ -0,0 +1,50 @@ +## Overview + +This authentication method can verify [JSON Web Tokens](https://jwt.io) provided by the clients. +A wide range of signature algorithms is supported, including those using public key cryptography. + +The module checks the signature and validity of the following parameters: + +* `exp` - an expired token is rejected, +* `iat` - a token must be issued in the past, +* `nbf` - a token might not be valid *yet*. + +It requires the `SASL PLAIN` mechanism listed in `sasl_mechanisms`. + +## Configuration options + +### `auth.jwt.secret` +* **Syntax:** TOML table with exactly one of the possible items listed below: + * `file` - string, path to the file with the JWT secret, + * `env`- string, environment variable name with the JWT secret, + * `value` - string, the JWT secret value. +* **Default:** no default, this option is mandatory +* **Example:** `secret.env = "JWT_SECRET"` + +This is the JWT secret used for the authentication. You can store it in a file, as an environment variable or specify it directly. + +### `auth.jwt.algorithm` +* **Syntax:** string, one of: `"HS256"`, `"RS256"`, `"ES256"`, `"HS386"`, `"RS386"`, `"ES386"`, `"HS512"`, `"RS512"`, `"ES512"` +* **Default:** no default, this option is mandatory +* **Example:** `algorithm = "HS512"` + +Name of the algorithm used to sign the JWT. + +### `auth.jwt.username_key` +* **Syntax:** string +* **Default:** no default, this option is mandatory +* **Example:** `username_key = "user_name"` + +Name of the JWT key that contains the user name to verify. + +### Example + +```toml +[auth] + methods = ["jwt"] + + [auth.jwt] + secret.value = "top-secret123" + algorithm = "HS256" + username_key = "user" +``` diff --git a/doc/authentication-methods/ldap.md b/doc/authentication-methods/ldap.md new file mode 100644 index 00000000000..987184e0073 --- /dev/null +++ b/doc/authentication-methods/ldap.md @@ -0,0 +1,149 @@ +## Overview + +This authentication method provides a read-only abstraction over an LDAP directory. + +The following SASL mechanisms are supported: + +### SASL EXTERNAL + +User credentials are verified by performing an LDAP search with the user name provided by the client. This can be used to verify that the user is allowed to log in after the provided certificate has been verified. + +This method requires one connection pool with the `default` tag (unless you change it with the `pool_tag` option). You need to provide the root DN and password unless your LDAP password allows anonymous searches. + +Example: + +```toml +[outgoing_pools.ldap.default] + workers = 5 + connection.servers = ["ldap-server.example.com"] + connection.rootdn = "cn=admin,dc=example,dc=com" + connection.password = "ldap-admin-password" +``` + +For more details see [outgoing connections](../advanced-configuration/outgoing-connections.md). + +### SASL PLAIN + +User credentials are verified by performing an LDAP search followed by a bind with the user name and password provided by the client. + +To use SASL PLAIN, you need to configure two connection pools: + +* one with the `default` tag (unless you change it with the `pool_tag` option) for the search operations (like for SASL EXTERNAL), +* one with the `bind` tag (unless you change it with the `bind_pool_tag` option) for the bind operations - for this one it is not necessary to provide the root DN and password as the bind operations will be performed with users' credentials. This pool has to be used exclusively for the bind operations as the authentication state of the connection changes with each request. + +Example: + +```toml +[outgoing_pools.ldap.default] + workers = 5 + connection.servers = ["ldap-server.example.com"] + connection.rootdn = "cn=admin,dc=example,dc=com" + connection.password = "ldap-admin-password" + +[outgoing_pools.ldap.bind] + connection.servers = ["ldap-server.example.com"] +``` + +For more details see [outgoing connections](../advanced-configuration/outgoing-connections.md). + +## Configuration options + +### `auth.ldap.pool_tag` +* **Syntax:** string +* **Default:** `"default"` +* **Example:** `pool_tag = "my_pool"` + +Specifies the tag for the primary outgoing connection pool for LDAP authentication. + +### `auth.ldap.bind_pool_tag` +* **Syntax:** string +* **Default:** `"bind"` +* **Example:** `bind_pool_tag = "my_bind_pool"` + +Specifies the tag for the secondary outgoing connection pool for LDAP authentication, used for operations requiring the `bind` operations, such as checking passwords. + +### `auth.ldap.base` +* **Syntax:** string +* **Default:** no default, this option is mandatory +* **Example:** `base = "ou=Users,dc=example,dc=com"` + +LDAP base directory which stores user accounts. + +### `auth.ldap.uids` +* **Syntax:** array of TOML tables with the following content: + * `attr` - string, mandatory, name of the attribute + * `format` - pattern, default: `"%u"`, requires `attr` +* **Default:** `[{attr = "my_uid"}]` +* **Example:** `uids = [{attr = "my_uid", format = "%u@example.org"}, {attr = "another_uid"}]` + +List of LDAP attributes that contain the user name (user's part of the JID), used to search for user accounts. +They are used as alternatives - it is enough if one of them contains the name. +By default the whole value of the attribute is expected to be the user name. +If this is not the case, use the `format` option. +It must contain one and only one pattern variable `%u` which will be replaced by the user name. + +### `auth.ldap.filter` +* **Syntax:** string +* **Default:** not set +* **Example:** `filter = "(&(objectClass=shadowAccount)(memberOf=Jabber Users))"` + +An additional LDAP filter used to narrow down the search for user accounts. +Do not forget to close the brackets and do not use superfluous whitespaces as this expression is processed before sending to LDAP - the match for user name (see `ldap.uids`) is added automatically. + +### `auth.ldap.dn_filter` +* **Syntax:** TOML table with the following content: + * `filter` - string (LDAP filter), mandatory + * `attributes` - array of strings (attribute names) +* **Default:** not set +* **Example:** `dn_filter = {filter = "(&(name=%s)(owner=%D)(user=%u@%d))", attributes = ["sn"]}` + +This filter is applied to the results returned by the main filter. +It performs an additional LDAP lookup to provide the complete result. +This is useful when you are unable to define all filter rules in `ldap.filter`. +You can define `%u`, `%d`, `%s` and `%D` pattern variables in the filter: + +* `%u` is replaced by the user’s part of a JID, +* `%d` is replaced by the corresponding domain (virtual host), +* `%s` variables are consecutively replaced by values of the attributes listen as `attributes` +* `%D` is replaced by the Distinguished Name. + +Since this filter makes additional LDAP lookups, use it only as the last resort; try to define all filter rules in `ldap.filter` if possible. + +### `auth.ldap.local_filter` +* **Syntax:** TOML table with the following content: + * `operation` - string, mandatory, `"equal"` or `"notequal"` + * `attribute` - string, mandatory, LDAP attribute + * `values` - array of strings (attribute values) +* **Default:** not set +* **Example:** `local_filter = {operation = "equal", attribute = "accountStatus", values = ["enabled"]}` + +If you can’t use the `ldap.filter` due to performance reasons (the LDAP server has many users registered), you can use this local filter. +The local filter checks an attribute in MongooseIM, not in LDAP, so this limits the load on the LDAP directory. + +The example above shows a filter which matches accounts with the "enabled" status. +Another example is shown below - it matches any account that is neither "disabled" nor "blacklisted". +It also shows the usage of TOML dotted keys, which is recommended when the inline table grows too big. + +```toml + local_filter.operation = "notequal" + local_filter.attribute = "accountStatus" + local_filter.values = ["disabled", "blacklisted"] +``` + +### `auth.ldap.deref` +* **Syntax:** string, one of: `"never"`, `"always"`, `"finding"`, `"searching"` +* **Default:** `"never"` +* **Example:** `deref = "always"` + +Specifies whether or not to dereference aliases: `finding` means to dereference only when finding the base and `searching` - only when performing the LDAP search. See the documentation on [LDAP search operation](https://ldap.com/the-ldap-search-operation/) for more information. + +### Example + +```toml +[auth] + methods = ["ldap"] + + [auth.ldap] + base = "ou=Users,dc=example,dc=com" + filter = "(objectClass=inetOrgPerson)" +``` diff --git a/doc/authentication-backends/PKI-authentication-module.md b/doc/authentication-methods/pki.md similarity index 88% rename from doc/authentication-backends/PKI-authentication-module.md rename to doc/authentication-methods/pki.md index a62876769b0..638158150cb 100644 --- a/doc/authentication-backends/PKI-authentication-module.md +++ b/doc/authentication-methods/pki.md @@ -1,7 +1,7 @@ ## Overview -It is a simple auth backend, meant to be used with SASL EXTERNAL authentication mechanism. -It simply accepts all usernames as long as they are validated by SASL logic. +This is a simple authentication method, meant to be used with the `SASL EXTERNAL` mechanism. +It simply accepts all usernames as long as they are validated by the SASL logic. ## WARNING @@ -15,5 +15,4 @@ These include: ## Configuration options -None. - +None diff --git a/doc/authentication-methods/rdbms.md b/doc/authentication-methods/rdbms.md new file mode 100644 index 00000000000..3c3dc89dcb5 --- /dev/null +++ b/doc/authentication-methods/rdbms.md @@ -0,0 +1,27 @@ +## Overview + +This authentication method stores user accounts in a relational database, e.g. MySQL or PostgreSQL. + +## Configuration + +The `rdbms` method uses an outgoing connection pool of type `rdbms` with the `default` tag - it has to be defined in the `outgoing_pools` section. + +### Example + +Authentication: + +``` +[auth] + methods = ["rdbms"] +``` + +Outgoing pools: + +``` +[outgoing_pools.rdbms.default.connection] + driver = "pgsql" + host = "localhost" + database = "mongooseim" + username = "mongooseim" + password = "mongooseim_secret" +``` diff --git a/doc/authentication-methods/riak.md b/doc/authentication-methods/riak.md new file mode 100644 index 00000000000..0e21dadb08b --- /dev/null +++ b/doc/authentication-methods/riak.md @@ -0,0 +1,36 @@ +## Overview + +This authentication method stores user accounts in Riak. + +## Configuration options + +The `riak` method uses an outgoing connection pool of type `riak` with the `default` tag - it has to be defined in the `outgoing_pools` section. + +There is one additional option: + +### `auth.riak.bucket_type` +* **Syntax:** string +* **Default:** `"users"` +* **Example:** `bucket_type = "user_bucket"` + +Bucket type for storing users in Riak. + +### Example + +Authentication: + +```toml +[auth] + methods = ["riak"] + + [auth.riak] + bucket_type = "user" +``` + +Outgoing pools: + +```toml +[outgoing_pools.riak.default] + connection.address = "127.0.0.1" + connection.port = 8087 +``` diff --git a/doc/developers-guide/Basic-iq-handler.md b/doc/developers-guide/Basic-iq-handler.md index 178d42d6827..01dcb4590ae 100644 --- a/doc/developers-guide/Basic-iq-handler.md +++ b/doc/developers-guide/Basic-iq-handler.md @@ -62,20 +62,6 @@ process_iq(_From, _To, Acc, IQ) -> ``` -### IQ processing policies - -The server may use one of the following strategies to handle incoming stanzas: - -* `no_queue` registers a new IQ handler, which will be called in the - context of a process serving the connection on which the IQ arrives -* `one_queue` spawns a new process by which the incoming IQ stanzas will - be handled -* `{queues, N}` spawns **N** processes. Every incoming stanza will be then - handled by one of those processes -* `parallel` registers the handler without spawning a new process, a new process - will be spawned for each incoming stanza - - ## Test your handler Go to `big_tests/tests` and create a test suite for your handler. diff --git a/doc/developers-guide/logging.md b/doc/developers-guide/logging.md index f2fd5229222..1873b615ca9 100644 --- a/doc/developers-guide/logging.md +++ b/doc/developers-guide/logging.md @@ -28,17 +28,15 @@ Please be mindful of what is logged and which log level is used for it. # Logging levels -A system operator can choose the global log level by setting `loglevel` in `mongooseim.cfg`. - -- level 8 – all -- level 7 – debug -- level 6 – info -- level 5 - notice -- level 4 - warning -- level 3 - error -- level 2 - critical -- level 1 - alert -- level 0 - emergency +A system operator can choose the global log level by setting `loglevel` in `mongooseim.toml`. + +Possible values are the standard syslog severity levels, plus all or none: +`"all"`, `"debug"`, `"info"`, `"notice"`, `"warning"`, `"error"`, `"critical"`, `"alert"`, `"emergency"`, and `"none"`. + +```toml +[general] + loglevel = "notice" + ``` If a user sets the log level to `all`, then they would see all messages in logs. diff --git a/doc/migrations/3.7.0_3.7.x.md b/doc/migrations/3.7.0_3.7.x.md index fdb89e67a7f..5076683b895 100644 --- a/doc/migrations/3.7.0_3.7.x.md +++ b/doc/migrations/3.7.0_3.7.x.md @@ -1,3 +1,13 @@ +## TOML configuration file + +There is a new [TOML configuration file](../../Advanced-configuration): `mongooseim.toml`. +The legacy `mongooseim.cfg` file is still supported as an alternative, but deprecated. + +You are advised to rewrite your configuration file in the TOML format. +Until then, you can still make MongooseIM use the old format by setting the `MONGOOSEIM_CONFIG_FORMAT` environment variable to `cfg`: + +`MONGOOSEIM_CONFIG_FORMAT=cfg mongooseimctl start` + ## Changes in hooks If modified the code, e.g. by adding a custom extension module, you might want to update your handlers to the following hooks. You can find them in the `mongoose_hooks` module. diff --git a/doc/modules/mod_adhoc.md b/doc/modules/mod_adhoc.md index a1e4b384831..55edeb9a31a 100644 --- a/doc/modules/mod_adhoc.md +++ b/doc/modules/mod_adhoc.md @@ -3,8 +3,22 @@ This module implements [XEP-0050: Ad-Hoc Commands](http://xmpp.org/extensions/xe ### Options -* **iqdisc** (default: `one_queue`) -* **report_commands_node** (boolean, default: `false`): determines whether the Ad-Hoc Commands should be announced upon Service Discovery +#### `modules.mod_adhoc.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"one_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). + +#### `modules.mod_adhoc.report_commands_node` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `report_commands_node = true` + +Determines whether the Ad-Hoc Commands should be announced upon Service Discovery. ### Example configuration -` {mod_adhoc, [{report_commands_node, true}]} ` +``` +[modules.mod_adhoc] + report_commands_node = true +``` diff --git a/doc/modules/mod_amp.md b/doc/modules/mod_amp.md index 44586d50011..af27ee7eae7 100644 --- a/doc/modules/mod_amp.md +++ b/doc/modules/mod_amp.md @@ -16,7 +16,7 @@ none ### Example Configuration ``` - {mod_amp, []}, +[modules.mod_amp] ``` ### XEP Support diff --git a/doc/modules/mod_auth_token.md b/doc/modules/mod_auth_token.md index 684f529bfa6..f3d7d23d9fb 100644 --- a/doc/modules/mod_auth_token.md +++ b/doc/modules/mod_auth_token.md @@ -15,55 +15,17 @@ Generation of keys necessary to sign binary tokens is delegated to module `mod_k ### Options -#### Validity periods +#### `modules.mod_auth_token.validity_period` +* **Syntax:** Array of TOML tables with the following keys: `token`, `value`, `unit` and following values: {token = `values: "access", "refresh", "provision"`, value = `non-negative integer`, unit = `values: "days", "hours", "minutes", "seconds"`}. +* **Default:** `[{token = "access", value = 1, unit = "hours"}, {token = "refresh", value = 25, unit = "days"}]` +* **Example:** `[{token = "access", value = 13, unit = "minutes"}, {token = "refresh", value = 13, unit = "days"}]` Validity periods of access and refresh tokens can be defined independently. - -Allowed units are: - -* days -* hours -* minutes -* seconds - -The default values for tokens are: - -* 1 hour for an access token -* 25 days for a refresh token - -Example configuration from `mongooseim.cfg`, inside `modules` section: - -```erlang -{modules, [ - {mod_auth_token, [{{validity_period, access}, {13, minutes}}, - {{validity_period, refresh}, {13, days}}] -]}. -``` - Validity period configuration for provision tokens happens outside the module since the server does not generate provision tokens - it only validates them. #### Required keys -Keys are used for signing binary tokens using an HMAC with SHA-2 family function SHA-384. -Therefore, `mod_auth_token` requires `mod_keystore` to provide some predefined keys. - -The required keys are (example from `mongooseim.cfg`): - -```erlang -{mod_keystore, [{keys, [{token_secret, ram}, - {provision_pre_shared, {file, "priv/provision_pre_shared.key"}}]}]} -``` - -`token_secret` is a RAM-only (i.e. generated on cluster startup, never written to disk) key used for signing and verifying access and refresh tokens. - -`provision_pre_shared` is a key read from a file. -As its name suggests, it's shared with a service issuing provision tokens. -Clients then use these provision tokens to authenticate with MongooseIM. - -While it's not enforced by the server and left completely to the operator, `provision_pre_shared` keys probably should not be shared between virtual XMPP domains hosted by the server. -That is, make sure the module configuration specifying a `provision_pre_shared` key is specific to an XMPP domain. - -MongooseIM can't generate provision tokens on its own (neither can it distribute them to clients), so while configuring a `provision_pre_shared` key to be RAM-only is technically possible, it would in practice disable the provision token support (as no external service could generate a valid token with this particular RAM key). +To read more about the keys MongooseIM makes use of, please refer to [mod_keystore](mod_keystore.md) documentation. ### Token types @@ -193,9 +155,10 @@ Access token validity can't be sidestepped right now. ### Example configuration -```erlang -{modules, [ - {mod_auth_token, [{{validity_period, access}, {13, minutes}}, - {{validity_period, refresh}, {13, days}}] -]}. +``` +[modules.mod_auth_token] + validity_period = [ + {token = "access", value = 13, unit = "minutes"}, + {token = "refresh", value = 13, unit = "days"} + ] ``` diff --git a/doc/modules/mod_blocking.md b/doc/modules/mod_blocking.md index 8f465de7491..00b467454f5 100644 --- a/doc/modules/mod_blocking.md +++ b/doc/modules/mod_blocking.md @@ -7,7 +7,7 @@ The protocol is much simpler than privacy lists. ### Example Configuration ``` -{mod_blocking, []}, +[modules.mod_blocking] ``` The module is not configurable because internally it is an interface to privacy lists, so settings like storage backend apply to it automatically. diff --git a/doc/modules/mod_bosh.md b/doc/modules/mod_bosh.md index 11024b71266..1ce39ba93b5 100644 --- a/doc/modules/mod_bosh.md +++ b/doc/modules/mod_bosh.md @@ -1,30 +1,67 @@ ### Module Description -This module implements [XEP-0206: XMPP Over BOSH](http://xmpp.org/extensions/xep-0206.html) (using [XEP-0124: Bidirectional-streams Over Synchronous HTTP (BOSH)](http://xmpp.org/extensions/xep-0124.html)), allowing clients to connect to MongooseIM over regular HTTP long-lived connections. +This module implements [XEP-0206: XMPP Over BOSH](http://xmpp.org/extensions/xep-0206.html) (using [XEP-0124: Bidirectional-streams Over Synchronous HTTP (BOSH)](http://xmpp.org/extensions/xep-0124.html)), + allowing clients to connect to MongooseIM over regular HTTP long-lived connections. -If you want to use BOSH, you must enable it both in the `listen` section of `mongooseim.cfg` ([Listener Modules](../advanced-configuration/Listener-modules.md)) and as a module. +If you want to use BOSH, you must enable it both in the `listen` section of +`mongooseim.toml` ([Listener Modules](../advanced-configuration/listen.md)) + and as a module. ### Options -* `inactivity` (positive integer or `infinity`, default: 30): Maximum allowed inactivity time for a BOSH connection. Please note that a long-polling request is not considered to be an inactivity. -* `max_wait` (positive integer or `infinity`, default: `infinity`): This is the longest time (in seconds) that the connection manager will wait before responding to any request during the session. -* `server_acks` (boolean, default: `false`): Enables/disables [acks](http://xmpp.org/extensions/xep-0124.html#ack-request) sent by server. -* `backend` (atom, default: `mnesia`): Backend used for storing BOSH session data. `mnesia` is the only supported value. -* `maxpause` (positive integer, default: 120): Maximum allowed pause in seconds (e.g. to switch between pages and then resume connection) to request by client-side. +#### `modules.mod_bosh.inactivity` + * **Syntax:** positive integer or the string `"infinity"` + * **Default:** `30` + * **Example:** `inactivity = 30` + +Maximum allowed inactivity time for a BOSH connection. +Please note that a long-polling request is not considered to be an inactivity. + +#### `modules.mod_bosh.max_wait` + * **Syntax:** positive integer or the string `"infinity"` + * **Default:** `"infinity"` + * **Example:** `max_wait = 30` + + This is the longest time (in seconds) that the connection manager will wait before responding to any request during the session. + +#### `modules.mod_bosh.server_acks` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `server_acks = true` + +Enables/disables [acks](http://xmpp.org/extensions/xep-0124.html#ack-request) sent by server. + +#### `modules.mod_bosh.backend` + * **Syntax:** `"mnesia"` + * **Default:** `"mnesia"` + * **Example:** `backend = "mnesia"` + +Backend used for storing BOSH session data. `"mnesia"` is the only supported value. +#### `modules.mod_bosh.maxpause` + * **Syntax:** positive integer + * **Default:** `120` + * **Example:** `maxpause = 30` + +Maximum allowed pause in seconds (e.g. to switch between pages and then resume connection) to request by client-side. ### Example Configuration In the listener section: ``` -{listen, - [ - { 5280, ejabberd_cowboy, [ - {num_acceptors, 10}, - {max_connections, 1024}, - {modules, [ - {"_", "/http-bind", mod_bosh} - ]} - ]} -``` +[[listen.http]] + port = 5280 + transport.num_acceptors = 10 + transport.max_connections = 1024 + [[listen.http.handlers.mod_bosh]] + host = "_" + path = "/http-bind" +``` In the module section: -``` {mod_bosh, []} ``` +``` +[modules.mod_bosh] + inactivity = 20 + max_wait = "infinity" + server_acks = true + backend = "mnesia" + maxpause = 120 +``` diff --git a/doc/modules/mod_caps.md b/doc/modules/mod_caps.md index 6bcc38539db..c061218e5d2 100644 --- a/doc/modules/mod_caps.md +++ b/doc/modules/mod_caps.md @@ -6,5 +6,25 @@ It is not this module's responsibility to intercept and answer disco requests ro ### Options This module expects two optional arguments that apply to [cache tab](https://github.com/processone/cache_tab): -* `cache_size` (default: 1000) - the size of a cache_tab (the amount of entries) holding the information about capabilities of each user. -* `cache_life_time` (default: 86) - time (in seconds) after which entries will be removed + +#### `modules.mod_caps.cache_size` +* **Syntax:** non-negative integer +* **Default:** `1000` +* **Example:** `cache_size = 2000` + +The size of a cache_tab (the amount of entries) holding the information about capabilities of each user. + +#### `modules.mod_caps.cache_life_time` +* **Syntax:** non-negative integer or the string `"infinity"` +* **Default:** `86` +* **Example:** `cache_life_time = 30` + +Time (in seconds) after which entries will be removed. + +### Example Configuration + +``` +[modules.mod_caps] + cache_size = 2000 + cache_life_time = 86 +``` diff --git a/doc/modules/mod_carboncopy.md b/doc/modules/mod_carboncopy.md index 13b03ff398d..8b0719bf684 100644 --- a/doc/modules/mod_carboncopy.md +++ b/doc/modules/mod_carboncopy.md @@ -56,7 +56,16 @@ For an Erlang-based test suite, please see [/esl/ejabberd_tests/blob/master/test ### Options -* **iqdisc** (default: no_queue) +#### `modules.mod_carboncopy.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `no_queue` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). ### Example Configuration -` {mod_carboncopy, []} ` + +``` +[modules.mod_carboncopy] + iqdisc.type = "no_queue" +``` diff --git a/doc/modules/mod_commands.md b/doc/modules/mod_commands.md index 46d96f65b96..93693d6a96d 100644 --- a/doc/modules/mod_commands.md +++ b/doc/modules/mod_commands.md @@ -15,7 +15,7 @@ This module contains command definitions loaded when the module is activated. There are no more configuration parameters, so the following entry in the config file is sufficient: ``` -{mod_commands, []]}, +[modules.mod_commands] ``` ## Command definition diff --git a/doc/modules/mod_csi.md b/doc/modules/mod_csi.md index 5ca0a2cec31..15fa04466d3 100644 --- a/doc/modules/mod_csi.md +++ b/doc/modules/mod_csi.md @@ -9,12 +9,18 @@ The implementation in MongooseIM will simply buffer all packets (up to a configu ### Options -* `buffer_max` (default: 20): Buffer size for messages queued when session was `inactive` +#### `modules.mod_csi.buffer_max` +* **Syntax:** non-negative integer or the string `"infinity"` +* **Default:** `20` +* **Example:** `buffer_max = 40` + +Buffer size for messages queued when session was `inactive`. ### Example Configuration -```Erlang - {mod_csi, [{buffer_max, 40}]}, +``` +[modules.mod_csi] + buffer_max = 40 ``` ### Metrics diff --git a/doc/modules/mod_disco.md b/doc/modules/mod_disco.md index 4437c5cf079..d60146de49e 100644 --- a/doc/modules/mod_disco.md +++ b/doc/modules/mod_disco.md @@ -2,14 +2,57 @@ Implements [XEP-0030: Service Discovery](http://xmpp.org/extensions/xep-0030.html). The module itself provides only the essential disco interface, the actual capabilities announced by Service Discovery are gathered via executing a fold-type hook. ### Options -* **iqdisc** (default: `one_queue`) -* **extra_domains** (list of binaries, default: `[]`): Adds domains that are not registered with other means to a local item announcement (response to `http://jabber.org/protocol/disco#items` IQ get). - Please note that `mod_disco` doesn't verify these domains, so if no handlers are registered later for them, a client will receive a `service-unavailable` error for every stanza sent to one of these hosts. -* **server_info** (list of tuples `{[Module] | all, Name, [URL]}`, default: `[]`): Adds extra disco information to all or chosen modules. - Example: `{server_info, [{all, "abuse-address", ["admin@example.com"]}, {[mod_muc, mod_disco], "friendly-spirits", ["spirit1@localhost", "spirit2@localhost"]}]}`. - New fields will be added in a manner compliant with XEP-0157. -* **users_can_see_hidden_services** (boolean, default: `true`): MongooseIM node with this option set to `false` will exclude ["hidden services"](../advanced-configuration/Listener-modules.md#xmpp-components-ejabberd_service) from disco results sent to clients (identified by bare or full JID). -Other entities, with empty username part in their JIDs (e.g. `component.example.com`), will still receive full disco results. +#### `modules.mod_disco.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"no_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). + +#### `modules.mod_disco.extra_domains` +* **Syntax:** array of strings, valid domain names +* **Default:** no extra domains +* **Example:** `extra_domains = ["custom_domain"]` + +Adds domains that are not registered with other means to a local item announcement (response to `http://jabber.org/protocol/disco#items` IQ get). +Please note that `mod_disco` doesn't verify these domains, so if no handlers are registered later for them, a client will receive a `service-unavailable` error for every stanza sent to one of these hosts. + +#### `modules.mod_disco.server_info` +* **Syntax:** array of tables described below +* **Default:** no additional server info +* **Example:** +``` +server_info = [ + {module = "all", name = "abuse-address", urls = ["admin@example.com"]} + ] +``` +Adds extra disco information to all or chosen modules. +New fields will be added in a manner compliant with [XEP-0157](https://xmpp.org/extensions/xep-0157.html). + +Required keys and their values for each entry: + +* `module` - the string `"all"` or an array of module names for which the additional server information is to be returned +* `name` - a non-empty string with the name of the field +* `urls` - an array of valid addresses + +#### `modules.mod_disco.users_can_see_hidden_services` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `users_can_see_hidden_services = false` + +MongooseIM node with this option set to `false` will exclude ["hidden services"](../advanced-configuration/Listener-modules.md#xmpp-components-ejabberd_service) +from disco results sent to clients (identified by bare or full JID). +Other entities, with empty username part in their JIDs (e.g. `component.example.com`), +will still receive full disco results. ### Example Configuration -` {mod_disco, []} ` +``` +[modules.mod_disco] + iqdisc.type = "one_queue" + extra_domains = ["some_domain", "another_domain"] + server_info = [ + {module = "all", name = "abuse-address", urls = ["admin@example.com"]}, + {module = ["mod_muc", "mod_disco"], name = "friendly-spirits", urls = ["spirit1@localhost", "spirit2@localhost"]} + ] + users_can_see_hidden_services = true +``` diff --git a/doc/modules/mod_event_pusher.md b/doc/modules/mod_event_pusher.md index e36113eb76b..cdd580c8ffc 100644 --- a/doc/modules/mod_event_pusher.md +++ b/doc/modules/mod_event_pusher.md @@ -12,30 +12,37 @@ handler. ### Options -* **backends** (required, list) - Specifies backends to register with the frontend, -along with arguments that will be passed to the backend. -Currently supported backends include [sns], [push] and [http_notification]. +#### `modules.mod_event_pusher.backend` +* **Syntax:** Array of TOML tables. See description. +* **Default:** see description +* **Example:** see description + +Specifies backends to register with the frontend, along with arguments that will be passed to the backend. +Currently supported backends include [sns], [push], [http_notification] and [rabbit]. Refer to their specific documentation to learn more about their functions and configuration options. ### Example configuration -```Erlang -{mod_event_pusher, [ - {backends, [ - {sns, [ - {access_key_id, "AKIAIOSFODNN7EXAMPLE"}, - {secret_access_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}, - % ... - ]}, - {push, [ - {backend, mnesia}, - {wpool, [{workers, 200}]}, - {plugin_module, mod_event_pusher_push_plugin_defaults} - ]} - ]} -]} +``` +[modules.mod_event_pusher] + backend.sns.access_key_id = "AKIAIOSFODNN7EXAMPLE" + backend.sns.secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" +# ... + + backend.push.backend = "mnesia" + backend.push.wpool.workers = 200 +# ... + + backend.http.pool_name = "http_pool" + backend.http.path = "/notifications" +# ... + + backend.rabbit.presence_exchange.name ="presence" + backend.rabbit.presence_exchange.type = "topic" +# ... ``` [sns]: ./mod_event_pusher_sns.md [push]: ./mod_event_pusher_push.md [http_notification]: ./mod_event_pusher_http.md +[rabbit]: ./mod_event_pusher_rabbit.md \ No newline at end of file diff --git a/doc/modules/mod_event_pusher_http.md b/doc/modules/mod_event_pusher_http.md index 03e4ca0b919..dbea550f6ea 100644 --- a/doc/modules/mod_event_pusher_http.md +++ b/doc/modules/mod_event_pusher_http.md @@ -33,48 +33,56 @@ It must be defined in the [`outgoing_pools` settings](../advanced-configuration/ ## Options -* `pool_name`: name of the pool to use (as defined in outgoing_pools) -* `path`: path part of an URL to which a request should be sent (will be appended to the pool's prefix path). -* `callback_module`: name of a module which should be used to check whether a notification should be sent. +#### `modules.mod_event_pusher_http.pool_name` +* **Syntax:** non-empty string +* **Default:** `"http_pool"` +* **Example:** `pool_name = "http_pool"` + +Name of the pool to use (as defined in outgoing_pools). + +#### `modules.mod_event_pusher_http.path` +* **Syntax:** string +* **Default:** `""` +* **Example:** `path = "/notifications"` + +Path part of an URL to which a request should be sent (will be appended to the pool's prefix path). + +#### `modules.mod_event_pusher_http.callback_module` +* **Syntax:** string +* **Default:** `"mod_event_pusher_http_defaults"` +* **Example:** `callback_module = "mod_event_pusher_http_notifications"` + +Name of a module which should be used to check whether a notification should be sent. ## Example configuration -```erlang -{outgoing_pools, [ - {http, global, http_pool, [{workers, 50}], - [{server, "http://localhost:8000"}, - {path_prefix, "/webservice"}]} -]}. ``` - -```erlang -{mod_event_pusher, [ - {backends, [ - {http, [ - {pool_name, http_pool}, - {path, "/notifications"} - ]} - ]} -]} +[outgoing_pools.http.http_pool] + scope = "global" + workers = 50 + + [outgoing_pools.http.http_pool.connection] + host = "https://localhost:8000" + path_prefix = "/webservice" + request_timeout = 2000 + +[modules.mod_event_pusher.backend.http] + pool_name = "http_pool" + path = "/notifications" ``` Notifications will be POSTed to `http://localhost:8000/webservice/notifications`. -```erlang -{mod_event_pusher, [ - {backends, [ - {http, [ - {pool_name, http_pool}, - {path, "/notifications"}, - {callback_module, mod_event_pusher_http_notifications} - ]}, - {http, [ - {pool_name, http_pool}, - {path, "/alerts"}, - {callback_module, mod_event_pusher_http_alerts} - ]} - ]} -]} +``` +[[modules.mod_event_pusher.backend.http]] + pool_name = "http_pool" + path = "/notifications" + callback_module = "mod_event_pusher_http_notifications" + +[[modules.mod_event_pusher.backend.http]] + pool_name = "http_pool" + path = "/alerts" + callback_module = "mod_event_pusher_http_alerts" ``` Here, some notifications will be POSTed to `http://localhost:8000/webservice/notifications` and some to `http://localhost:8000/webservice/alerts`, depending on implementation of `should_make_req/6` in the two callback modules. diff --git a/doc/modules/mod_event_pusher_push.md b/doc/modules/mod_event_pusher_push.md index 524a9496471..84a5d883d99 100644 --- a/doc/modules/mod_event_pusher_push.md +++ b/doc/modules/mod_event_pusher_push.md @@ -15,11 +15,8 @@ attempts to enable them again. This module is very easy to enable, just paste the following to your MongooseIM configuration file: ``` -{mod_event_pusher, [ - {backends, [ - {push, [{wpool, [{workers, 100}]}]} - ]} -]}. +[modules.mod_event_pusher] + backend.push.wpool.workers = 100 ``` And that's basically it. You have just enabled the push notification support @@ -28,17 +25,37 @@ with 100 asynchronous workers that will handle all push notification related wor ## Options -* **backend** (atom, default: `mnesia`) - Backend to use for storing the registrations. - Possible options are `mnesia` and `rdbms`. -* **wpool** (list, default: `[]`) - List of options that will be passed to the `worker_pool` library that handles all the requests. - Please refer to the [Project Site](https://github.com/inaka/worker_pool) for more details. -* **plugin_module** (atom, default: `mod_event_pusher_push_plugin_defaults`) - module implementing - `mod_event_pusher_push_plugin` behaviour, used for dynamic configuration of push notifications. - See the [relevant section](#plugin-module) for more details. -* **virtual_pubsub_hosts** (list of strings, default: `[]`) - a list of "simulated" - Publish-Subscribe domains. You may use the `@HOSTS@` pattern in the domain name. It will - automatically be replaced by a respective XMPP domain (e.g. `localhost`). - See the [relevant section](#virtual-pubsub-hosts) for more details. +#### `modules.mod_event_pusher_push.backend` +* **Syntax:** string, one of `"mnesia"`, `"rdbms"` +* **Default:** `"mnesia"` +* **Example:** `backend = "rdbms"` + +Backend to use for storing the registrations. + +#### `modules.mod_event_pusher_push.wpool` +* **Syntax:** Array of TOML tables. See description. +* **Default:** `[]` +* **Example:** `wpool = [workers = 200]` + +List of options that will be passed to the `worker_pool` library that handles all the requests. +Please refer to the [Project Site](https://github.com/inaka/worker_pool) for more details. + +#### `modules.mod_event_pusher_push.plugin_module` +* **Syntax:** non-empty string +* **Default:** `"mod_event_pusher_push_plugin_defaults"` +* **Example:** `plugin_module = "mod_event_pusher_push_plugin_defaults"` + +The module implementing `mod_event_pusher_push_plugin` behaviour, used for dynamic configuration of push notifications. +See the [relevant section](#plugin-module) for more details. + +#### `modules.mod_event_pusher_push.virtual_pubsub_hosts` +* **Syntax:** array of strings +* **Default:** `["pubsub.@HOSTS@"]` +* **Example:** `virtual_pubsub_hosts = ["host1", "host2"]` + +The list of "simulated" Publish-Subscribe domains. You may use the `@HOSTS@` pattern in the domain name. +It will automatically be replaced by a respective XMPP domain (e.g. `localhost`). +See the [relevant section](#virtual-pubsub-hosts) for more details. ## Virtual PubSub hosts @@ -54,21 +71,17 @@ PubSub-less variants. This is an example of how you can migrate the existing setup to the new model. PubSub service still exists, just for the case of a user attempting to create a node. However, its domain is overridden for the purpose of sending push notifications. Please note the value of `virtual_pubsub_hosts` -option. `pubsub.@HOSTS@` is the default domain for `mod_pubsub`. - -```Erlang -{mod_pubsub, [{plugins, [<<"push">>]}]}, % mandatory minimal config - -{mod_event_pusher, [ - {backends, [ - {push, [ - {backend, mnesia}, % optional - {wpool, [{workers, 200}]}, % optional - {plugin_module, mod_event_pusher_push_plugin_defaults}, % optional - {virtual_pubsub_hosts, ["pubsub.@HOSTS@"]} - ]} - ]} -]} +option. `"pubsub.@HOSTS@"` is the default domain for `mod_pubsub`. + +``` +[modules.mod_pubsub] + plugins = ["push"] # mandatory minimal config + +[modules.mod_event_pusher] + backend.push.backend = "mnesia" # optional + backend.push.wpool.workers = 200 # optional + backend.push.plugin_module = "mod_event_pusher_push_plugin_defaults" # optional + backend.push.virtual_pubsub_hosts = ["pubsub.@HOSTS@"] ``` #### Advantages diff --git a/doc/modules/mod_event_pusher_rabbit.md b/doc/modules/mod_event_pusher_rabbit.md index de883c891d6..28c544f3249 100644 --- a/doc/modules/mod_event_pusher_rabbit.md +++ b/doc/modules/mod_event_pusher_rabbit.md @@ -27,39 +27,94 @@ to make the module work. It's well advised to read through [*Advanced configuration/Outgoing connections*](../advanced-configuration/outgoing-connections.md) section before enabling the module. -### Options - -* **presence_exchange** - Defines presence exchange options, such as: - * `name` - (string, default: `<<"presence">>`) - Defines RabbitMQ presence exchange name; - * `type` (string, default: `<<"topic">>`) - Defines RabbitMQ presence exchange type; -* **chat_msg_exchange** - Defines chat message exchange options, such as: - * `name` - (string, default: `<<"chat_msg">>`) - Defines RabbitMQ chat message exchange name; - * `type` (string, default: `<<"topic">>`) - Defines RabbitMQ chat message exchange type; - * `sent_topic` - (string, default: `<<"chat_msg_sent">>`) - Defines RabbitMQ chat message sent topic name; - * `recv_topic` - (string, default: `<<"chat_msg_recv">>`) - Defines RabbitMQ chat message received topic name; -* **groupchat_msg_exchange** - Defines group chat message exchange options, such as: - * `name` - (string, default: `<<"groupchat_msg">>`) - Defines RabbitMQ group chat message exchange name; - * `type` (string, default: `<<"topic">>`) - Defines RabbitMQ group chat message exchange type; - * `sent_topic` (string, default: `<<"groupchat_msg_sent">>`) - Defines RabbitMQ group chat message sent topic name; - * `recv_topic` (string, default: `<<"groupchat_msg_recv">>`) - Defines RabbitMQ group chat message received topic name; +### Presence exchange options + +#### `modules.mod_event_pusher.backend.rabbit.presence_exchange.name` +* **Syntax:** non-empty string +* **Default:** `"presence"` +* **Example:** `name = "custom_presence_name"` + +Defines RabbitMQ presence exchange name. + +#### `modules.mod_event_pusher.backend.rabbit.presence_exchange.type` +* **Syntax:** non-empty string +* **Default:** `"topic"` +* **Example:** `type = "custom_presence_topic"` + +Defines RabbitMQ presence exchange type. + +### Chat message options + +#### `modules.mod_event_pusher.backend.rabbit.chat_msg_exchange.name` +* **Syntax:** non-empty string +* **Default:** `"chat_msg"` +* **Example:** `name = "custom_msg_name"` + +Defines RabbitMQ chat message exchange name. + +#### `modules.mod_event_pusher.backend.rabbit.chat_msg_exchange.type` +* **Syntax:** non-empty string +* **Default:** `"topic"` +* **Example:** `type = "custom_msg_topic"` + +Defines RabbitMQ chat message exchange type. + +#### `modules.mod_event_pusher.backend.rabbit.chat_msg_exchange.sent_topic` +* **Syntax:** non-empty string +* **Default:** `"chat_msg_sent"` +* **Example:** `sent_topic = "custom_sent_topic"` + +Defines RabbitMQ chat message sent topic name. + +#### `modules.mod_event_pusher.backend.rabbit.chat_msg_exchange.recv_topic` +* **Syntax:** non-empty string +* **Default:** `"chat_msg_recv"` +* **Example:** `recv_topic = "custom_recv_topic"` + +Defines RabbitMQ chat message received topic name. + +### Group chat message options + +#### `modules.mod_event_pusher.backend.rabbit.groupchat_msg_exchange.name` +* **Syntax:** non-empty string +* **Default:** `"groupchat_msg"` +* **Example:** `name = "custom_group_msg_name"` + +Defines RabbitMQ group chat message exchange name. + +#### `modules.mod_event_pusher.backend.rabbit.groupchat_msg_exchange.type` +* **Syntax:** non-empty string +* **Default:** `"topic"` +* **Example:** `type = "custom_group_msg_topic"` + +Defines RabbitMQ group chat message exchange type. + +#### `modules.mod_event_pusher.backend.rabbit.groupchat_msg_exchange.sent_topic` +* **Syntax:** non-empty string +* **Default:** `"groupchat_msg_sent"` +* **Example:** `sent_topic = "custom_group_sent_topic"` + +Defines RabbitMQ group chat message sent topic name. + +#### `modules.mod_event_pusher.backend.rabbit.groupchat_msg_exchange.recv_topic` +* **Syntax:** non-empty string +* **Default:** `"groupchat_msg_recv"` +* **Example:** `recv_topic = "custom_group_recv_topic"` + +Defines RabbitMQ group chat message received topic name. ### Example configuration -```Erlang -{mod_event_pusher, [ - {backends, [ - {rabbit, [ - {presence_exchange, [{name, <<"presence">>}, - {type, <<"topic">>}]}, - {chat_msg_exchange, [{name, <<"chat_msg">>}, - {sent_topic, <<"chat_msg_sent">>}, - {recv_topic, <<"chat_msg_recv">>}]}, - {groupchat_msg_exchange, [{name, <<"groupchat_msg">>}, - {sent_topic, <<"groupchat_msg_sent">>}, - {recv_topic, <<"groupchat_msg_recv">>}]} - ]} - ]} -]} +``` +[modules.mod_event_pusher] + backend.rabbit.presence_exchange.name ="presence" + backend.rabbit.presence_exchange.type = "topic" + backend.rabbit.chat_msg_exchange.name = "chat_msg" + backend.rabbit.chat_msg_exchange.sent_topic = "chat_msg_sent" + backend.rabbit.chat_msg_exchange.recv_topic = "chat_msg_recv" + backend.rabbit.groupchat_msg_exchange.name = "groupchat_msg" + backend.rabbit.groupchat_msg_exchange.sent_topic = "groupchat_msg_sent" + backend.rabbit.groupchat_msg_exchange.recv_topic = "groupchat_msg_recv" ``` ### JSON Schema examples diff --git a/doc/modules/mod_event_pusher_sns.md b/doc/modules/mod_event_pusher_sns.md index 54c46e9fce4..0f136bb8505 100644 --- a/doc/modules/mod_event_pusher_sns.md +++ b/doc/modules/mod_event_pusher_sns.md @@ -13,19 +13,97 @@ Full topics for notifications (ARN as defined in [Amazon Resource Names][aws-arn ### Options -* **presence_updates_topic** (string, default: unset) - Defines Amazon SNS Topic for presence change notifications. Remove this option to disable these notifications. -* **pm_messages_topic** (string, default: unset) - Defines Amazon SNS Topic for private message notifications. Remove this option to disable these notifications. -* **muc_messages_topic** (string, default: unset) - Defines Amazon SNS Topic for group message notifications. Remove this option to disable these notifications. -* **plugin_module** (atom, default: 'mod_event_pusher_sns_defaults') - Sets a callback module used for creating user's GUID used in notifications (from user's JID) and for defining custom attributes attached to a published SNS message. -* **muc_host** (string, default: "conference.@HOST@") - Messages from this MUC host will be sent to the set SNS topic for MUCs. -* **sns_host** (string, default: unset) - URL to the Amazon SNS service. The URL may be in [virtual host form][aws-virtual-host], and for AWS needs to point at a specific regional endpoint. The scheme, port and path specified in the URL will be used to publish notifications via HTTP POST method. -* **region** (string, default: unset) - The [AWS region][aws-region] to use for requests. -* **access_key_id** (string, default: unset) - [ID of the access key][aws-keys] to use for authorization. -* **secret_access_key** (string, default: unset) - [Secret access key][aws-keys] to use for authorization. -* **account_id** (string, default: unset) - 12 digit number as defined in [AWS Account Identifiers][aws-acct-identifier] to use for creating TopicArn for publishing notifications. -* **pool_size** (integer, default: 100) - Worker pool size for publishing notifications -* **publish_retry_count** (integer, default: 2) - Retry count in case of a publish error -* **publish_retry_time_ms** (integer, default: 50) - Base exponential backoff time (in ms) for publish errors +#### `modules.mod_event_pusher_sns.presence_updates_topic` +* **Syntax:** string +* **Default:** `""` +* **Example:** `presence_updates_topic = "user_presence_updated"` + +Defines Amazon SNS Topic for presence change notifications. Remove this option to disable these notifications. + +#### `modules.mod_event_pusher_sns.pm_messages_topic` +* **Syntax:** string +* **Default:** no default is given +* **Example:** `pm_messages_topic = "user_message_sent"` + +Defines Amazon SNS Topic for private message notifications. Remove this option to disable these notifications. + +#### `modules.mod_event_pusher_sns.muc_messages_topic` +* **Syntax:** string +* **Default:** no default is given +* **Example:** `muc_messages_topic = "user_messagegroup_sent"` + +Defines Amazon SNS Topic for group message notifications. Remove this option to disable these notifications. + +#### `modules.mod_event_pusher_sns.plugin_module` +* **Syntax:** string +* **Default:** `"mod_event_pusher_sns_defaults"` +* **Example:** `plugin_module = "mod_event_pusher_sns_defaults"` + +Sets a callback module used for creating user's GUID used in notifications (from user's JID) and for defining custom attributes attached to a published SNS message. + +#### `modules.mod_event_pusher_sns.muc_host` +* **Syntax:** string +* **Default:** `"conference.@HOST@"` +* **Example:** `muc_host = "conference.HOST"` + +Messages from this MUC host will be sent to the set SNS topic for MUCs. + +#### `modules.mod_event_pusher_sns.sns_host` +* **Syntax:** string +* **Default:** none, this option is mandatory +* **Example:** `sns_host = "sns.eu-west-1.amazonaws.com"` + +URL to the Amazon SNS service. The URL may be in [virtual host form][aws-virtual-host], and for AWS needs to point at a specific regional endpoint. The scheme, port and path specified in the URL will be used to publish notifications via HTTP POST method. + + +#### `modules.mod_event_pusher_sns.region` +* **Syntax:** string +* **Default:** none, this option is mandatory +* **Example:** `region = "eu-west-1"` + +The [AWS region][aws-region] to use for requests. + +#### `modules.mod_event_pusher_sns.access_key_id` +* **Syntax:** string +* **Default:** none, this option is mandatory +* **Example:** `access_key_id = "AKIAIOSFODNN7EXAMPLE"` + +[ID of the access key][aws-keys] to use for authorization. + +#### `modules.mod_event_pusher_sns.secret_access_key` +* **Syntax:** string +* **Default:** none, this option is mandatory +* **Example:** `secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"` + +[Secret access key][aws-keys] to use for authorization. + +#### `modules.mod_event_pusher_sns.account_id` +* **Syntax:** string +* **Default:** none, this option is mandatory +* **Example:** `account_id = "123456789012"` + +12 digit number as defined in [AWS Account Identifiers][aws-acct-identifier] to use for creating TopicArn for publishing notifications. + +#### `modules.mod_event_pusher_sns.pool_size` +* **Syntax:** non-negative integer +* **Default:** `100` +* **Example:** `pool_size = 100` + +Worker pool size for publishing notifications + +#### `modules.mod_event_pusher_sns.publish_retry_count` +* **Syntax:** non-negative integer +* **Default:** `2` +* **Example:** `publish_retry_count = 2` + +Retry count in case of a publish error. + +#### `modules.mod_event_pusher_sns.publish_retry_time_ms` +* **Syntax:** non-negative integer +* **Default:** `50` +* **Example:** `publish_retry_time_ms = 50` + +Base exponential backoff time (in ms) for publish errors. [aws-acct-identifier]: http://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html [aws-virtual-host]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html @@ -35,25 +113,21 @@ Full topics for notifications (ARN as defined in [Amazon Resource Names][aws-arn ### Example configuration -```Erlang -{mod_event_pusher, [ - {backends, [ - {sns, [ - {access_key_id, "AKIAIOSFODNN7EXAMPLE"}, - {secret_access_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}, - {region, "eu-west-1"}, - {account_id, "123456789012"}, - {sns_host, "sns.eu-west-1.amazonaws.com"}, - {plugin_module, mod_event_pusher_sns_defaults}, - {presence_updates_topic, "user_presence_updated"}, - {pm_messages_topic, "user_message_sent"}, - {muc_messages_topic, "user_messagegroup_sent"}, - {pool_size, 100}, - {publish_retry_count, 2}, - {publish_retry_time_ms, 50} - ]} - ]} -]} +``` +[modules.mod_event_pusher] + backend.sns.access_key_id = "AKIAIOSFODNN7EXAMPLE" + backend.sns.secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + backend.sns.region = "eu-west-1" + backend.sns.account_id = "123456789012" + backend.sns.sns_host = "sns.eu-west-1.amazonaws.com" + backend.sns.muc_host = "conference.HOST" + backend.sns.plugin_module = "mod_event_pusher_sns_defaults" + backend.sns.presence_updates_topic = "user_presence_updated" + backend.sns.pm_messages_topic = "user_message_sent" + backend.sns.muc_messages_topic = "user_messagegroup_sent" + backend.sns.pool_size = 100 + backend.sns.publish_retry_count = 2 + backend.sns.publish_retry_time_ms = 50 ``` ## JSON Schema examples diff --git a/doc/modules/mod_extdisco.md b/doc/modules/mod_extdisco.md index f12df7a26c4..d25f92b720d 100644 --- a/doc/modules/mod_extdisco.md +++ b/doc/modules/mod_extdisco.md @@ -3,32 +3,68 @@ Implements [XEP-0215: External Service Discovery](http://xmpp.org/extensions/xep The main use-case is to help discover STUN/TURN servers to allow for negotiating media exchanges. ### Options -* **type** (atom, default: unset) - service type -#### Service Options -* **host** (string, required, default: unset): Hostname or an IP address where the service is hosted. -* **port** (string, recommended, default: unset): The communications port to be used at the host. -* **transport** (string, optional, default: unset): The underlying transport protocol to be used when communicating with the service. - * **Valid values:** `udp, tcp` -* **username** (string, optional, default: unset): A service-generated username for use at the service. -* **password** (string, optional, default: unset): A service-generated password for use at the service. +#### `modules.mod_extdisco.service.type` +* **Syntax:** string +* **Default:** none, this option is required +* **Example:** `type = "stun"` +Service type, common values are `"stun"`, `"turn"`, `"ftp"`. + +#### `modules.mod_extdisco.service.host` +* **Syntax:** string +* **Default:** none, this option is required +* **Example:** `host = "192.168.0.2"` + +Hostname or an IP address where the service is hosted. + +#### `modules.mod_extdisco.service.port` +* **Syntax:** string +* **Default:** none, this option is recommended +* **Example:** `port = "3478"` + +The communications port to be used at the host. + +#### `modules.mod_extdisco.service.transport` +* **Syntax:** string, one of `"udp"`, `"tcp"` +* **Default:** none, this option is optional +* **Example:** `transport = "udp"` + +The underlying transport protocol to be used when communicating with the service. + +#### `modules.mod_extdisco.service.username` +* **Syntax:** string +* **Default:** none, this option is optional +* **Example:** `username = "username"` + +A service-generated username for use at the service. + +#### `modules.mod_extdisco.service.password` +* **Syntax:** string +* **Default:** none, this option is optional +* **Example:** `password = "password"` + +A service-generated password for use at the service. ### Example Configuration -```Erlang -{mod_extdisco, [ - {stun, [ - {host, "127.0.0.1"}, - {port, "3478"}, - {transport, "udp"}, - {username, "username"}, - {password, "secret"} - ]}, - {turn, [ - {host, "hostname"}, - {port, "3478"}, - {transport, "tcp"}, - ]} -]}. +```toml +[modules.mod_extdisco] + [[modules.mod_extdisco.service]] + type = "stun" + host = "127.0.0.1" + port = 3478 + transport = "udp" + username = "username" + password = "password" + [[modules.mod_extdisco.service]] + type = "stun" + host = "stun.host.com" + port = 3478 + transport = "tcp" + username = "username2" + password = "password2" + [[modules.mod_extdisco.service]] + type = "turn" + host = "turn.host.com" ``` diff --git a/doc/modules/mod_global_distrib.md b/doc/modules/mod_global_distrib.md index 83998ac9f7c..f3ef547460a 100644 --- a/doc/modules/mod_global_distrib.md +++ b/doc/modules/mod_global_distrib.md @@ -58,9 +58,9 @@ Note that in the edge case of multi-datacenter routing, the messages may be rece #### Bounce -Consider the following edge case: user **U1** logged into datacenter **DC2** quickly reconnects to datacenter **DC3**. +Consider the following edge case: user **U1** logged into datacenter **DC2** and then quickly reconnected to datacenter **DC3**. Because session table has not yet been replicated, **DC2** does not see **U1** in the session table, while a different datacenter **DC1** still sees **U1** logged into **DC2**. -When **U2**, logged into **DC1**, sends a message to **U1**, it will now be rerouted to **DC2** even though the user is now available at **DC3**. +When **U2** logged into **DC1** and sent a message to **U1**, it will now be rerouted to **DC2** even though the user is now available at **DC3**. ![State after U1's reconnection](mod_global_distrib_bounce_example.svg) @@ -99,58 +99,201 @@ Global distribution modules expose several per-datacenter metrics that can be us ### Notes -* You should only start `mod_global_distrib` by configuring it under `modules` option in `mongooseim.cfg`. Do not add it as host-specific module via `host_config`. +* You should only start `mod_global_distrib` by configuring it under `modules` option in `mongooseim.toml`. Do not add it as host-specific module via `host_config`. * Do not use `mod_offline` on domains given via `global_host` or `local_host` options, as it will decrease messaging robustness; the users logged in other datacenters will not be registered as available by `mod_offline`, and so the messages will not be flushed. ### Options -* **global_host** (string, required): The XMPP domain that will be shared between datacenters. - *Note:* this needs to be one of the domains given in `host` option in `mongooseim.cfg`. -* **local_host** (string, required): XMPP domain that maps uniquely to the local datacenter; it will be used for inter-center routing. - *Note:* this needs to be one of the domains given in `host` option in `mongooseim.cfg`. -* **message_ttl** (integer, default: `4`): Number of times a message can be rerouted between datacenters. -* **connections** (list, default: `[]`): Options for connections maintained by the module; see *Connections' options* section. -* **cache** (list, default: `[]`): Options for caching database lookups; see *Database cache options* section. -* **bounce** (list | `false`, default: `[]`): Options for message bouncing; if `false`, message bouncing is disabled. - See *Message bouncing options* section. -* **redis** (list, default: `[]`): Options for Redis session storage backend. -* **hosts_refresh_interval** (integer, default: 3000) - The interval (in milliseconds) telling how often Redis should be asked if new hosts appeared. +#### `modules.mod_global_distrib.global_host` +* **Syntax:** string +* **Default:** none, this option is mandatory +* **Example:** `global_host = "example.com"` + +The XMPP domain that will be shared between datacenters. +*Note:* this needs to be one of the domains given in `general.hosts` option in `mongooseim.toml`. + +#### `modules.mod_global_distrib.local_host` +* **Syntax:** string +* **Default:** none, this option is mandatory +* **Example:** `local_host = "datacenter1.example.com"` + +XMPP domain that maps uniquely to the local datacenter; it will be used for inter-center routing. +*Note:* this needs to be one of the domains given in `general.hosts` option in `mongooseim.toml`. + +#### `modules.mod_global_distrib.message_ttl` +* **Syntax:** non-negative integer +* **Default:** `4` +* **Example:** `message_ttl = 5` + +Number of times a message can be rerouted between datacenters. + +#### `modules.mod_global_distrib.bounce` +* **Syntax:** boolean with only `false` being a valid option +* **Default:** not set and `bounce` is enabled. +* **Example:** `bounce = false` + +If this option is present and set to false, message bouncing will be disabled. Refer [here](#message-bouncing-options) for more details. + +#### `modules.mod_global_distrib.hosts_refresh_interval` +* **Syntax:** non-negative integer, value given in milliseconds +* **Default:** `3000` +* **Example:** `hosts_refresh_interval = 3000` + +The interval telling how often Redis should be asked if new hosts appeared. #### Connections' options -* **endpoints** (list, default: `[{LocalHost, 5555}]`): A list of `{Host, Port}` tuples on which the server will listen for connections. - `Host` can be given as a hostname, in which case it will be resolved to an IP address before first on module start. - The endpoint list will be shared with other datacenters via the replicated backend. -* **advertised_endpoints** (list | false, default: false): A list of `{Host, Port}` tuples which will be advertised in Redis and therefore used to establish connection with this node by other nodes. If not specified, `endpoints` value (after resolution) is considered `advertised_endpoints`. The host may be either IP or domain, just like in case of endpoints. The difference is, the domain name won't be resolved but inserted directly to the mappings backend instead. -* **connections_per_endpoint** (integer, default: `1`): Number of outgoing connections that will be established from the current node to each endpoint assigned to a remote domain. -* **endpoint_refresh_interval** (seconds, default: `60`): An interval between remote endpoint list refresh (and connection rebalancing). - A separate timer is maintained for every remote domain. -* **endpoint_refresh_interval_when_empty** (seconds, default: `3`): Endpoint refresh interval, when list of endpoints is empty. -* **disabled_gc_interval** (seconds, default: `60`): An interval between disabled endpoints "garbage collection". - It means that disabled endpoints are periodically verified and if Global Distribution detects that connections is no longer alive, the connection pool is closed completely. -* **tls_opts** (list, required): Options for TLS connections passed to the `fast_tls` driver. - May be set to `false`, in which case all data will be sent via standard TCP connections. - Otherwise, they should at least include `certfile` and `cafile` options. +#### `modules.mod_global_distrib.connections.endpoints` + * **Syntax:** Array of TOML tables with the following keys: `host` and `port`, and the following values: {host = `string`, port = `non_negative_integer`} + * **Default:** `[{host = "LocalHost", port = 5555}]` + * **Example:** `endpoints = [{host = "172.16.0.2", port = 5555}]` + +A list of endpoints on which the server will listen for connections. +`host` can be given as a hostname, in which case it will be resolved to an IP address on module start. +The endpoint list will be shared with other datacenters via the replicated backend. + +#### `modules.mod_global_distrib.connections.advertised_endpoints` +* **Syntax:** Array of TOML tables with the following keys: `host` and `port`, and the following values: {host = `string`, port = `non_negative_integer`} **or** `false` +* **Default:** `false` +* **Example:** `advertised_endpoints = [{host = "172.16.0.2", port = 5555}]` + +A list of endpoints which will be advertised in Redis and therefore used to establish connection with this node by other nodes. If not specified, `endpoints` value (after resolution) is considered `advertised_endpoints`. The host may be either IP or domain, just like in case of endpoints. The difference is, the domain name won't be resolved but inserted directly to the mappings backend instead. + +#### `modules.mod_global_distrib.connections.connections_per_endpoint` +* **Syntax:** non-negative integer +* **Default:** `1` +* **Example:** `connections_per_endpoint = 30` + +Number of outgoing connections that will be established from the current node to each endpoint assigned to a remote domain. + +#### `modules.mod_global_distrib.connections.endpoint_refresh_interval` +* **Syntax:** positive integer, value given in seconds +* **Default:** `60` +* **Example:** `endpoint_refresh_interval = 30` + +An interval between remote endpoint list refresh (and connection rebalancing). +A separate timer is maintained for every remote domain. + +#### `modules.mod_global_distrib.connections.endpoint_refresh_interval_when_empty` +* **Syntax:** positive integer, value given in seconds +* **Default:** `3` +* **Example:** `endpoint_refresh_interval_when_empty = 3` + +Endpoint refresh interval, when array of endpoints is empty. + +#### `modules.mod_global_distrib.connections.disabled_gc_interval` +* **Syntax:** non-negative integer, value given in seconds +* **Default:** `60` +* **Example:** `disabled_gc_interval = 60` + +An interval between disabled endpoints "garbage collection". +It means that disabled endpoints are periodically verified and if Global Distribution detects that connections is no longer alive, the connection pool is closed completely. + +#### `modules.mod_global_distrib.connections.tls` +* **Syntax:** boolean with only `false` being a valid option +* **Default:** none, this option is mandatory. Details in the description +* **Example:** `tls = false` + +If this option is present, all data will be sent via standard TCP connections. +To enable TLS support, refer to [TLS](#tls-options) options. + +#### TLS options +To enable TLS support at least the `cacertfile` and `certfile` options have to be present. These options will be passed to the `fast_tls` driver. + +#### `modules.mod_global_distrib.connections.tls.certfile` +* **Syntax:** string, path in the file system +* **Default:** none, this options is mandatory to enable TLS support +* **Example:** `certfile = "priv/dc1.pem"` + +#### `modules.mod_global_distrib.connections.tls.cacertfile` +* **Syntax:** string, path in the file system +* **Default:** none, this options is mandatory to enable TLS support +* **Example:** `cacertfile = "priv/ca.pem"` + +#### `modules.mod_global_distrib.connections.tls.ciphers` +* **Syntax:** string +* **Default:** not set +* **Example:** `ciphers = "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384"` + +Cipher suites to use with StartTLS or TLS. Please refer to the [OpenSSL documentation](http://www.openssl.org/docs/man1.0.2/apps/ciphers.html) for the cipher string format. + +#### `modules.mod_global_distrib.connections.tls.dhfile` +* **Syntax:** string, path in the file system +* **Default:** not set +* **Example:** `dhfile = "dh.pem"` #### Redis session storage options -* **pool** (atom, default: `global_distrib`): Name of the redis pool defined in [outgoing pools](../advanced-configuration/outgoing-connections.md). -* **expire_after** (integer, default: `120`): Number of seconds after which a session entry written by this cluster will expire. -* **refresh_after** (integer, default: `60`): Number of seconds after which session's expiration timer will be refreshed. +#### `modules.mod_global_distrib.redis.pool` +* **Syntax:** string +* **Default:** `"global_distrib"` +* **Example:** `pool = "global_distrib"` + +Name of the redis pool defined in [outgoing pools](../advanced-configuration/outgoing-connections.md). + +#### `modules.mod_global_distrib.redis.expire_after` +* **Syntax:** non-negative integer +* **Default:** `120` +* **Example:** `expire_after = 120` + +Number of seconds after which a session entry written by this cluster will expire. + +#### `modules.mod_global_distrib.redis.refresh_after` +* **Syntax:** non-negative integer +* **Default:** `60` +* **Example:** `refresh_after = 60` + +Number of seconds after which session's expiration timer will be refreshed. #### Database cache options +Options for caching database lookups, by default no options are passed. -* **cache_missed** (boolean, default: `true`): Determines whether an internal session cache should cache lookup failures. - When `false`, only successful database lookups will result in the value being cached. - Changing this option has great negative impact on performance. -* **domain_lifetime_seconds** (integer, default: `600`): How long should subdomain mappings be cached (e.g. `muc.example.com -> datacenter1.test`). -* **jid_lifetime_seconds** (integer, default: `5`): How long should full and bare JID mappings be cached (e.g. `user1@example.com/res1 -> datacenter1.test`). -* **max_jids** (integer, default: `10000`): The maximum number of JID entries that can be stored in cache at any point in time. +#### `modules.mod_global_distrib.cache.cache_missed` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `cache_missed = true` + +Determines whether an internal session cache should cache lookup failures. +When `false`, only successful database lookups will result in the value being cached. +Changing this option has great negative impact on performance. + +#### `modules.mod_global_distrib.cache.domain_lifetime_seconds` +* **Syntax:** non-negative integer, value given in seconds +* **Default:** `600` +* **Example:** `domain_lifetime_seconds = 600` + +How long should subdomain mappings be cached (e.g. `muc.example.com -> datacenter1.test`). + +#### `modules.mod_global_distrib.cache.jid_lifetime_seconds` +* **Syntax:** non-negative integer, value given in seconds +* **Default:** `5` +* **Example:** `jid_lifetime_seconds = 5` + +How long should full and bare JID mappings be cached (e.g. `user1@example.com/res1 -> datacenter1.test`). + +#### `modules.mod_global_distrib.cache.max_jids` +* **Syntax:** non-negative integer +* **Default:** `10000` +* **Example:** `max_jids = 10000` + +The maximum number of JID entries that can be stored in cache at any point in time. #### Message bouncing options +Options for message bouncing. + +#### `modules.mod_global_distrib.bounce.resend_after_ms` +* **Syntax:** non-negative integer +* **Default:** `200` +* **Example:** `resend_after_ms = 200` + +Time after which message will be resent in case of delivery error. -* **resend_after_ms** (integer, default: `200`): Time after which message will be resent in case of delivery error. -* **max_retries** (integer, default: `4`): Number of times message delivery will be retried in case of errors. +#### `modules.mod_global_distrib.bounce.max_retries` +* **Syntax:** non-negative integer +* **Default:** `4` +* **Example:** `max_retries = 4` + +Number of times message delivery will be retried in case of errors. #### Global Distribution and Service Discovery @@ -165,29 +308,19 @@ The endpoints used for connection to a remote datacenter may be overridden by gl #### Configuring mod_global_distrib -```Erlang -{mod_global_distrib, [ - {global_host, "example.com"}, - {local_host, "datacenter1.example.com"}, - {connections, [ - {endpoints, [{"172.16.0.2", 5555}]}, - {num_of_connections, 22}, - {tls_opts, [ - {certfile, "/home/user/dc1.pem"}, - {cafile, "/home/user/ca.pem"} - ]} - ]}, - {cache, [ - {domain_lifetime_seconds, 60} - ]}, - {bounce, [ - {resend_after_ms, 300}, - {max_retries, 3} - ]}, - {redis, [ - {pool, global_distrib} - ]} - ]} +``` +[modules.mod_global_distrib] + global_host = "example.com" + local_host = "datacenter1.example.com" + connections.endpoints = [{host = "172.16.0.2", port = 5555}] + connections.advertised_endpoints = [{host = "172.16.0.2", port = 5555}] + connections.tls.certfile = "priv/dc1.pem" + connections.tls.cacertfile = "priv/ca.pem" + connections.connections_per_endpoint = 30 + cache.domain_lifetime_seconds = 60 + bounce.resend_after_ms = 300 + bounce.max_retries = 3 + redis.pool = "global_distrib" ``` #### Overriding endpoints to a remote datacenter diff --git a/doc/modules/mod_http_upload.md b/doc/modules/mod_http_upload.md index 46ccf2c3f1f..f4e2224d170 100644 --- a/doc/modules/mod_http_upload.md +++ b/doc/modules/mod_http_upload.md @@ -7,23 +7,93 @@ Currently, the module supports only the [S3][s3] backend using [AWS Signature Ve ### Options -* **iqdisc** (default: `one_queue`) -* **host** (string, default: `"upload.@HOST@"`): Subdomain for the upload service to reside under. `@HOST@` is replaced with each served domain. -* **backend** (atom, default: `s3`) - Backend to use for generating slots. Currently only `s3` can be used. -* **expiration_time** (integer, default: `60`) - Duration (in seconds) after which the generated `PUT` URL will become invalid. -* **token_bytes** (integer, default: `32`) - Number of random bytes of a token that will be used in a generated URL. - The text representation of the token will be twice as long as the number of bytes, e.g. for the default value the token in the URL will be 64 characters long. -* **max_file_size** (integer, default: 10485760 (10 MB)) - Maximum file size (in bytes) accepted by the module. Disabled if set to `undefined`. -* **s3** (list, default: unset) - Options specific to [S3][s3] backend. +#### `modules.mod_http_upload.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"one_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). + +#### `modules.mod_http_upload.host` +* **Syntax:** string +* **Default:** `"upload.@HOST@"` +* **Example:** `host = "upload.@HOST@"` + +Subdomain for the upload service to reside under. `@HOST@` is replaced with each served domain. + +#### `modules.mod_http_upload.backend` +* **Syntax:** non-empty string +* **Default:** `"s3"` +* **Example:** `backend = "s3"` + +Backend to use for generating slots. Currently only `"s3"` can be used. + +#### `modules.mod_http_upload.expiration_time` +* **Syntax:** non-negative integer +* **Default:** `60` +* **Example:** `expiration_time = 120` + +Duration (in seconds) after which the generated `PUT` URL will become invalid. + +#### `modules.mod_http_upload.token_bytes` +* **Syntax:** positive integer +* **Default:** `32` +* **Example:** `token_bytes = 32` + +Number of random bytes of a token that will be used in a generated URL. +The text representation of the token will be twice as long as the number of bytes, e.g. for the default value the token in the URL will be 64 characters long. + +#### `modules.mod_http_upload.max_file_size` +* **Syntax:** non-negative integer +* **Default:** not set - no size limit +* **Example:** `max_file_size = 10485760` + +Maximum file size (in bytes) accepted by the module. Disabled if set to `"undefined"`. + +#### `modules.mod_http_upload.s3` +* **Syntax:** Array of TOML tables. See description. +* **Default:** see description +* **Example:** see description + +Options specific to [S3][s3] backend. #### [S3][s3] backend options -* **bucket_url** (string, default: unset) - A complete URL pointing at the used bucket. The URL may be in [virtual host form][aws-virtual-host], and for AWS it needs to point to a specific regional endpoint for the bucket. The scheme, port and path specified in the URL will be used to create `PUT` URLs for slots, e.g. specifying a value of `"https://s3-eu-west-1.amazonaws.com/mybucket/custom/prefix"` will result in `PUT` URLs of form `"https://s3-eu-west-1.amazonaws.com/mybucket/custom/prefix//?"`. -* **add_acl** (boolean, default: `false`) - If `true`, adds `x-amz-acl: public-read` header to the PUT URL. +##### `s3.bucket_url` +* **Syntax:** non-empty string +* **Default:** none, this option is mandatory +* **Example:** `s3.bucket_url = "https://s3-eu-west-1.amazonaws.com/mybucket"` + +A complete URL pointing at the used bucket. The URL may be in [virtual host form][aws-virtual-host], and for AWS it needs to point to a specific regional endpoint for the bucket. The scheme, port and path specified in the URL will be used to create `PUT` URLs for slots, e.g. specifying a value of `"https://s3-eu-west-1.amazonaws.com/mybucket/custom/prefix"` will result in `PUT` URLs of form `"https://s3-eu-west-1.amazonaws.com/mybucket/custom/prefix//?"`. + +##### `s3.add_acl` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `s3.add_acl = true` + +If `true`, adds `x-amz-acl: public-read` header to the PUT URL. This allows users to read the uploaded files even if the bucket is private. The same header must be added to the PUT request. -* **region** (string, default: unset) - The [AWS region][aws-region] to use for requests. -* **access_key_id** (string, default: unset) - [ID of the access key][aws-keys] to use for authorization. -* **secret_access_key** (string, default: unset) - [Secret access key][aws-keys] to use for authorization. + +##### `s3.region` +* **Syntax:** string +* **Default:** `""` +* **Example:** `s3.region = "https://s3-eu-west-1.amazonaws.com/mybucket"` + +The [AWS region][aws-region] to use for requests. + +##### `s3.access_key_id` +* **Syntax:** string +* **Default:** `""` +* **Example:** `s3.access_key_id = "AKIAIOSFODNN7EXAMPLE"` + +[ID of the access key][aws-keys] to use for authorization. + +##### `s3.secret_access_key` +* **Syntax:** string +* **Default:** `""` +* **Example:** `s3.secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"` + +[Secret access key][aws-keys] to use for authorization. [s3]: https://aws.amazon.com/s3/ [aws-virtual-host]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html @@ -32,18 +102,16 @@ This allows users to read the uploaded files even if the bucket is private. The ### Example configuration -```Erlang -{mod_http_upload, [ - {host, "upload.@HOST@"}, - {backend, s3}, - {expiration_time, 120}, - {s3, [ - {bucket_url, "https://s3-eu-west-1.amazonaws.com/mybucket"}, - {region, "eu-west-1"}, - {access_key_id, "AKIAIOSFODNN7EXAMPLE"}, - {secret_access_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"} - ]} - ]}. +``` +[modules.mod_http_upload] + host = "upload.@HOST@" + backend = "s3" + expiration_time = 120 + s3.bucket_url = "https://s3-eu-west-1.amazonaws.com/mybucket" + s3.region = "eu-west-1" + s3.add_acl = true + s3.access_key_id = "AKIAIOSFODNN7EXAMPLE" + s3.secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" ``` ### Testing [S3][s3] configuration diff --git a/doc/modules/mod_inbox.md b/doc/modules/mod_inbox.md index 62950cd8fb1..bd0f2926657 100644 --- a/doc/modules/mod_inbox.md +++ b/doc/modules/mod_inbox.md @@ -5,18 +5,53 @@ To use it, enable mod\_inbox in the config file. ### Options -* **backend** (atom, default: `rdbms`) - Database backend to use. For now, only `rdbms` is supported. -* **reset_markers** (list, default: `[displayed]`) - List of atom chat markers that when sent, will reset the unread message counter for a conversation. +#### `modules.mod_inbox.backend` +* **Syntax:** string +* **Default:** `"rdbms"` +* **Example:** `backend = "rdbms"` + +Database backend to use. For now, only `rdbms` is supported. + +#### `modules.mod_inbox.reset_markers` +* **Syntax:** array of strings, out of `"displayed"`, `"received"`, `"acknowledged"` +* **Default:** `["displayed"]` +* **Example:** `reset_markers = ["received"]` + +List of chat markers that when sent, will reset the unread message counter for a conversation. This works when [Chat Markers](https://xmpp.org/extensions/xep-0333.html) are enabled on the client side. -Possible values are from the set: `displayed`, `received`, `acknowledged`. Setting as empty list (not recommended) means that no chat marker can decrease the counter value. -* **groupchat** (list, default: `[muclight]`) - The list indicating which groupchats will be included in inbox. +Setting as empty list (not recommended) means that no chat marker can decrease the counter value. + +#### `modules.mod_inbox.groupchat` +* **Syntax:** array of strings +* **Default:** `["muclight"]` +* **Example:** `groupchat = ["muclight"]` + +The list indicating which groupchats will be included in inbox. Possible values are `muclight` [Multi-User Chat Light](https://xmpp.org/extensions/inbox/muc-light.html) or `muc` [Multi-User Chat](https://xmpp.org/extensions/xep-0045.html). -* **aff_changes** (boolean, default: `true`) - use this option when `muclight` is enabled. + +#### `modules.mod_inbox.aff_changes` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `aff_changes = true` + +Use this option when `muclight` is enabled. Indicates if MUC Light affiliation change messages should be included in the conversation inbox. Only changes that affect the user directly will be stored in their inbox. -* **remove_on_kicked** (boolean, default: `true`) - use this option when `muclight` is enabled. + +#### `modules.mod_inbox.remove_on_kicked` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `remove_on_kicked = true` + +Use this option when `muclight` is enabled. If true, the inbox conversation is removed for a user when they are removed from the groupchat. -* **iqdisc** (atom, default: `no_queue`) + +#### `modules.mod_inbox.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"no_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). ### Note about supported RDBMS @@ -194,11 +229,10 @@ value: ### Example Configuration ``` -{mod_inbox, [{backend, rdbms}, - {reset_markers, [displayed]}, - {aff_changes, true}, - {remove_on_kicked, true}, - {groupchat, [muclight]} - ]}, +[modules.mod_inbox] + backend = "rdbms" + reset_markers = ["displayed"] + aff_changes = true + remove_on_kicked = true + groupchat = ["muclight"] ``` - diff --git a/doc/modules/mod_jingle_sip.md b/doc/modules/mod_jingle_sip.md index 203e098666c..1d03cd86141 100644 --- a/doc/modules/mod_jingle_sip.md +++ b/doc/modules/mod_jingle_sip.md @@ -100,19 +100,48 @@ MongooseIM packages are built with Jingle/SIP support. ### Options -* `proxy_host` (default: "localhost") name or IP address of the SIP Proxy to which MongooseIM will send SIP messages -* `proxy_port` (default: 5600) port of the SIP Proxy -* `listen_port` (default: 5600) the port on which MongooseIM will listen for incomming SIP messages -* `local_host` (default: "localhost") value used to create SIP URIs (including VIA headers) -* `sdp_origin` (default: "127.0.0.1") value of the `c=` SDP attribute +#### `modules.mod_jingle_sip.proxy_host` +* **Syntax:** string +* **Default:** `"localhost"` +* **Example:** `proxy_host = "localhost"` + +The name or IP address of the SIP Proxy to which MongooseIM will send SIP messages. + +#### `modules.mod_jingle_sip.proxy_port` +* **Syntax:** non-negative integer +* **Default:** `5600` +* **Example:** `proxy_port = 5600` + +The port of the SIP Proxy. + +#### `modules.mod_jingle_sip.listen_port` +* **Syntax:** non-negative integer +* **Default:** `5600` +* **Example:** `listen_port = 5600` + +The port on which MongooseIM will listen for incomming SIP messages. + +#### `modules.mod_jingle_sip.local_host` +* **Syntax:** string +* **Default:** `"127.0.0.1"` +* **Example:** `local_host = "localhost"` + +The value used to create SIP URIs (including VIA headers). + +#### `modules.mod_jingle_sip.sdp_origin` +* **Syntax:** string +* **Default:** `"127.0.0.1"` +* **Example:** `sdp_origin = "127.0.0.1"` + +The value of the `c=` SDP attribute. The simplest configuration is the following: -```erlang -{mod_jingle_sip, []} +``` +[modules.mod_jingle_sip] ``` -With this configuration MongooseIM will try sending SIP messages to a SIP proxy listening on localhost and port 5060. +With this configuration MongooseIM will try sending SIP messages to a SIP proxy listening on localhost and port 5600. ### Use cases covered by tests diff --git a/doc/modules/mod_keystore.md b/doc/modules/mod_keystore.md index ff92d36075f..7a1dcb37fef 100644 --- a/doc/modules/mod_keystore.md +++ b/doc/modules/mod_keystore.md @@ -10,7 +10,7 @@ and pre-shared keys which can be read from a file. RAM-only keys provide better security since they are never written to persistent storage, at the cost of loss in case of a cluster-global failure or restart. -As of now [`mod_auth_token`](mod_auth_token) is the only module +As of now [`mod_auth_token`](mod_auth_token.md) is the only module dependent on `mod_keystore`. It's crucial to understand the distinction between single-tenant and @@ -20,18 +20,19 @@ for each virtual XMPP domain to avoid sharing keys between domains!** ### Options -* `ram_key_size`: size to use when generating RAM-only keys (designated - by type `ram`) -* `keys`: list of _specifiers_ of keys which will be provided by the - module at runtime +#### `modules.mod_keystore.ram_key_size` +* **Syntax:** non-negative integer +* **Default:** `2048` +* **Example:** `ram_key_size = 10000` -Each _key specifier_ is a pair of `{KeyName, KeyType}`, where: +Size to use when generating RAM-only keys (designated by type `ram`). -* `KeyName`: any Erlang term. For simplicity's sake atoms are advised. - Names have to be unique in the context of one virtual domain. -* `KeyType`: one of `ram` or `{file, "path/to/file"}`. - The file is read and its contents are provided - as the key (whitespace is trimmed). +#### `modules.mod_keystore.keys` +* **Syntax:** Array of TOML tables with the following keys: `"name"`, `"type"`, `"file"`, and following values: {name = `string`, type = `values: "file", "ram"`, file = `string`}. +* **Default:** `[]` +* **Example:** `modules.mod_keystore.keys = [name = "access_psk", type = "file", path = "priv/access_psk"]` + +Names, types, and optional filepaths of the keys. ### API @@ -40,43 +41,66 @@ The module public API is hook-based: ```erlang ejabberd_hooks:run_fold(get_key, Domain, [], [{KeyName, Domain}]). ``` -An example of usage can be found in [mod_auth_token:get_key_for_user/2](https://github.com/esl/MongooseIM/blob/26a23a260b14176c103339d745037cf4e3c1c188/apps/ejabberd/src/mod_auth_token.erl#L367) +An example of usage can be found in [mod_auth_token:get_key_for_user/2](https://github.com/esl/MongooseIM/blob/26a23a260b14176c103339d745037cf4e3c1c188/apps/ejabberd/src/mod_auth_token.erl#L367). ### Example Configuration Simple configuration - single tenant (i.e. server hosting just one XMPP domain): -```erlang -{mod_keystore, [{keys, [{access_secret, ram}, - {access_psk, {file, "priv/access_psk"}}, - {provision_psk, {file, "priv/provision_psk"}}]}]} - +``` +[modules.mod_keystore] + + [[modules.mod_keystore.keys]] + name = "access_secret" + type = "ram" + + [[modules.mod_keystore.keys]] + name = "access_psk" + type = "file" + path = "priv/access_psk" + + [[modules.mod_keystore.keys]] + name = "provision_psk" + type = "file" + path = "priv/provision_psk" ``` Multi-tenant setup (`mod_keystore` configured differently for each virtual XMPP domain): ``` -{host_config, "first.com", - [ - {modules, - [ - {mod_keystore, [ {keys, [{access_secret, ram}, - {access_psk, {file, "priv/first_access_psk"}}, - {provision_psk, {file, "priv/first_provision_psk"}}]} - ]} - ]} - ]}. - -{host_config, "second.com", - [ - {modules, - [ - {mod_keystore, [ {keys, [{access_secret, ram}, - {access_psk, {file, "priv/second_access_psk"}}, - {provision_psk, {file, "priv/second_provision_psk"}}]} - ]} - ]} - ]}. +[[host_config]] + host = "first.com" + + [[[modules.mod_keystore.keys]]] + name = "access_secret" + type = "ram" + + [[[modules.mod_keystore.keys]]] + name = "access_psk" + type = "file" + path = "priv/access_psk" + + [[[modules.mod_keystore.keys]]] + name = "provision_psk" + type = "file" + path = "priv/provision_psk" + +[[host_config]] + host = "second.com" + + [[[modules.mod_keystore.keys]]] + name = "access_secret" + type = "ram" + + [[[modules.mod_keystore.keys]]] + name = "access_psk" + type = "file" + path = "priv/access_psk" + + [[[modules.mod_keystore.keys]]] + name = "provision_psk" + type = "file" + path = "priv/provision_psk" ``` diff --git a/doc/modules/mod_last.md b/doc/modules/mod_last.md index b3192e0c0d8..3608c8116ea 100644 --- a/doc/modules/mod_last.md +++ b/doc/modules/mod_last.md @@ -6,16 +6,35 @@ Use with caution, as it was observed that a user disconnect spike might result i ### Options -* **iqdisc** (default: `one_queue`) -* **backend** (atom, default: `mnesia`): Storage backend. Currently `mnesia`, `rdbms` and `riak` are supported. +#### `modules.mod_last.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"no_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). + +#### `modules.mod_last.backend` +* **Syntax:** string, one of `"mnesia"`, `"rdbms"`, `"riak"` +* **Default:** `"mnesia"` +* **Example:** `backend = "rdbms"` + +Storage backend. ##### Riak-specific options -* `bucket_type` (default `<<"last">>`) - Riak bucket type. +###### `bucket_type` +* **Syntax:** string +* **Default:** `"last"` +* **Example:** `bucket_type = "last"` + +Riak bucket type. ### Example Configuration -` {mod_last, []} ` +``` +[modules.mod_last] + backend = "rdbms" +``` ### Metrics @@ -25,4 +44,3 @@ If you'd like to learn more about metrics in MongooseIM, please visit [MongooseI | ---- | -------------------------------------- | | `get_last` | A timestamp is fetched from DB. | | `set_last_info` | A timestamp is stored in DB. | - diff --git a/doc/modules/mod_mam.md b/doc/modules/mod_mam.md index 2477219e8c7..8cd22bc80c8 100644 --- a/doc/modules/mod_mam.md +++ b/doc/modules/mod_mam.md @@ -44,65 +44,183 @@ Also note that the default separator for the search query is `AND` (which roughl ### Options -* **backend** (atom, default: `rdbms`) - Database backend to use. `rdbms`, `riak`, `cassandra` and `elasticsearch` are supported. -* **no_stanzaid_element** (boolean, default: `false`) - Do not add a `` element from MAM v0.6. -* **is_archivable_message** (module, default: `mod_mam_utils`) - Name of a module implementing [`is_archivable_message/3` callback](#is_archivable_message) that determines if the message should be archived. - **Warning**: if you are using MUC Light, make sure this option is set to the MUC Light domain. -* **archive_chat_markers** (boolean, default: `false`) - If set to true, XEP-0333 chat markers will be archived. See more details [here](#archiving-chat-markers) -* **pm** (list | `false`, default: `[]`) - Override options for archivization of one-to-one messages. If the value of this option is `false`, one-to-one message archive is disabled. -* **muc** (list | `false`, default: `false`) - Override options for archivization of group chat messages. If the value of this option is `false`, group chat message archive is disabled. -* **extra_lookup_params** (atom, default: `undefined`) - a module implementing `mam_iq` behaviour. - If this option has value other then undefined, function `extra_lookup_params/2` from this module will be called when building MAM lookup parameters. - This can be used to extend currently supported MAM query fields by a custom field or fields. - This field(s) can be added to lookup params later passed to MAM backend. -* **message_retraction** (boolean, default: `true`) - Enables [XEP-0424: Message Retraction](http://xmpp.org/extensions/xep-0424.html). This functionality is currently implemented only for the `rdbms` backend. [Retraction messages](https://xmpp.org/extensions/xep-0424.html#example-4) are always archived regardless of this option. - -**backend**, **no_stanzaid_element**, **is_archivable_message** and **message_retraction** will be applied to both `pm` and `muc` (if they are enabled), unless overriden explicitly (see example below). - -#### PM-specific options - -* **archive_groupchats** (boolean, default: `true`) - When enabled, MAM will store groupchat messages in recipients' individual archives. **USE WITH CAUTION!** May increase archive size significantly. Disabling this option for existing installation will neither remove such messages from MAM storage, nor will filter out them from search results. +#### `modules.mod_mam_meta.backend` +* **Syntax:** string, one of `"rdbms"`, `"riak"`, `"cassandra"` and `"elasticsearch"` +* **Default:** `"rdbms"` +* **Example:** `backend = "riak"` + +Database backend to use. + +#### `modules.mod_mam_meta.no_stanzaid_element` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `no_stanzaid_element = true` + +Do not add a `` element from MAM v0.6. + +#### `modules.mod_mam_meta.is_archivable_message` +* **Syntax:** non-empty string +* **Default:** `"mod_mam_utils"` +* **Example:** `is_archivable_message = "mod_mam_utils"` +* **Warning**: if you are using MUC Light, make sure this option is set to the MUC Light domain + +Name of a module implementing [`is_archivable_message/3` callback](#is_archivable_message) that determines if the message should be archived. + +#### `modules.mod_mam_meta.archive_chat_markers` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `archive_chat_markers = true` + +If set to true, XEP-0333 chat markers will be archived. +See more details [here](#archiving-chat-markers). + +#### `modules.mod_mam_meta.message_retraction` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `message_retraction = false` + +Enables [XEP-0424: Message Retraction](http://xmpp.org/extensions/xep-0424.html). +This functionality is currently implemented only for the `rdbms` backend. +[Retraction messages](https://xmpp.org/extensions/xep-0424.html#example-4) are always archived regardless of this option. + +**backend**, **no_stanzaid_element**, **is_archivable_message** and **message_retraction** will be applied to both `pm` and `muc` (if they are enabled), unless overridden explicitly (see example below). + +#### Enable one-to-one message archive + +Archive for one-to-one messages can be enabled in one of two ways: + +* Specify `[mod_mam_meta.pm]` section +```toml +[modules.mod_mam_meta] +[modules.mod_mam_meta.pm] # defining this section enables PM support +``` +* Define any PM related option +```toml +[modules.mod_mam_meta] + pm.backend = "rdbms" # enables PM support and overrides its backend +``` + +#### Disable one-to-one message archive + +To disable archive for one-to-one messages please remove PM section or any PM related option from the config file. + +### PM-specific options + +#### `modules.mod_mam_meta.pm.archive_groupchats` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `modules.mod_mam_meta.muc = true` + +When enabled, MAM will store groupchat messages in recipients' individual archives. **USE WITH CAUTION!** May increase archive size significantly. Disabling this option for existing installation will neither remove such messages from MAM storage, nor will filter out them from search results. MongooseIM will print a warning on startup if `pm` MAM is enabled without `archive_groupchats` being explicitly set to a specific value. In one of the future MongooseIM releases this option will default to `false` (as it's more common use case and less DB-consuming) and the warning message will be removed. -#### MUC-specific options +#### Enable MUC message archive + +Archive for MUC messages can be enabled in one of two ways: + +* Specify `[mod_mam_meta.muc]` section +```toml +[modules.mod_mam_meta] +[modules.mod_mam_meta.muc] # defining this section enables MUC support +``` +* Define any MUC related option +```toml +[modules.mod_mam_meta] + muc.backend = "rdbms" # enables MUC support and overrides its backend +``` + +#### Disable MUC message archive + +To disable archive for MUC messages please remove MUC section or any MUC related option from the config file. -* **host** (string, default: `"conference.@HOST@"`) - MUC host that will be archived if MUC archiving is enabled. +### MUC-specific options + +#### `modules.mod_mam_meta.muc.host` +* **Syntax:** string +* **Default:** `"conference.@HOST@"` +* **Example:** `modules.mod_mam_meta.muc.host = "conference.@HOST@"` + +The MUC host that will be archived if MUC archiving is enabled. #### Example The example below presents how to override common option for `muc` module specifically. +Please note that you can override all common options in similar way. -```erlang -{mod_mam_meta, [ - {backend, rdbms}, - {async_writer, true}, %% this option enables async writer for RDBMS backend - {muc, [ - {async_writer, false} %% disable async writer for MUC archive only - ]} -]} +```toml +[modules.mod_mam_meta] + backend = "rdbms" + async_writer = true # this option enables async writer for RDBMS backend + + muc.async_writer = false # disable async writer for MUC archive only ``` #### RDBMS backend options These options will only have effect when the `rdbms` backend is used: -* **cache_users** (boolean, default: `true`) - Enables Archive ID to integer mappings cache. -* **rdbms_message_format** (atom, default: `internal`) - When set to `simple`, stores messages in XML and full JIDs. - When set to `internal`, stores messages and JIDs in internal format. - **Warning**: Archive MUST be empty to change this option. -* **async_writer** (boolean, default: `true`) - Enables an asynchronous writer that is faster than the synchronous one but harder to debug. - The async writers store batches of messages with a certain delay (see **flush_interval**), so the results of the lookup operations executed right after message routing may be incomplete until the configured time passes. -* **flush_interval** (integer, default: `2000`) How often (in milliseconds) the buffered messages are flushed to a DB. -* **max_batch_size** (integer, default, `30`) Max size of the batch insert query for an async writer. - If the buffer is full, messages are flushed to a database immediately and the flush timer is reset. +#### `modules.mod_mam_meta.cache_users` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `modules.mod_mam_meta.cache_users = false` + +Enables Archive ID to integer mappings cache. + +#### `modules.mod_mam_meta.rdbms_message_format` +* **Syntax:** string, one of `"internal"` and `"simple"` +* **Default:** `"internal"` +* **Example:** `modules.mod_mam_meta.rdbms_message_format = "simple"` +* **Warning**: archive MUST be empty to change this option + +When set to `simple`, stores messages in XML and full JIDs. +When set to `internal`, stores messages and JIDs in internal format. + +#### `modules.mod_mam_meta.async_writer` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `modules.mod_mam_meta.async_writer = false` + +Enables an asynchronous writer that is faster than the synchronous one but harder to debug. +The async writers store batches of messages with a certain delay (see **flush_interval**), so the results of the lookup operations executed right after message routing may be incomplete until the configured time passes. + +#### `modules.mod_mam_meta.flush_interval` +* **Syntax:** non-negative integer +* **Default:** `2000` +* **Example:** `modules.mod_mam_meta.flush_interval = 2000` + +How often (in milliseconds) the buffered messages are flushed to a DB. + +#### `modules.mod_mam_meta.max_batch_size` +* **Syntax:** non-negative integer +* **Default:** `30` +* **Example:** `modules.mod_mam_meta.max_batch_size = 30` + +Max size of the batch insert query for an async writer. +If the buffer is full, messages are flushed to a database immediately and the flush timer is reset. #### Common backend options -* **user_prefs_store** (atom, default: `false`) - Leaving this option as `false` will prevent users from setting their archiving preferences. It will also increase performance. Other possible values are: - * `rdbms` (RDBMS backend only) - User archiving preferences saved in RDBMS. Slow and not recommended, but might be used for simplicity (keeping everything in RDBMS). - * `cassandra` (Cassandra backend only) - User archiving preferences are saved in Cassandra. - * `mnesia` (recommended) - User archiving preferences saved in Mnesia and accessed without transactions. Recommended in most deployments, could be overloaded with lots of users updating their preferences at once. There's a small risk of an inconsistent (in a rather harmless way) state of the preferences table. -* **full_text_search** (boolean, default: `true`) - Enables full text search in message archive (see *Full Text Search* paragraph). Please note that the full text search is currently only implemented for `rdbms` and `riak` backends. Also, full text search works only for messages archived while this option is enabled. +#### `modules.mod_mam_meta.user_prefs_store` +* **Syntax:** one of `false`, `"rdbms"`, `"cassandra"`, `"mnesia"` +* **Default:** `false` +* **Example:** `modules.mod_mam_meta.user_prefs_store = 30` + +Leaving this option as `false` will prevent users from setting their archiving preferences. +It will also increase performance. +The possible values are: + +* `"rdbms"` (RDBMS backend only) - User archiving preferences saved in RDBMS. Slow and not recommended, but might be used for simplicity (keeping everything in RDBMS). +* `"cassandra"` (Cassandra backend only) - User archiving preferences are saved in Cassandra. +* `"mnesia"` (recommended) - User archiving preferences saved in Mnesia and accessed without transactions. Recommended in most deployments, could be overloaded with lots of users updating their preferences at once. There's a small risk of an inconsistent (in a rather harmless way) state of the preferences table. + +#### `modules.mod_mam_meta.full_text_search` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `modules.mod_mam_meta.full_text_search = false` + +Enables full text search in message archive (see *Full Text Search* paragraph). +Please note that the full text search is currently only implemented for `"rdbms"` and `"riak"` backends. +Also, full text search works only for messages archived while this option is enabled. #### `is_archivable_message/3` callback @@ -134,20 +252,25 @@ This backend works with Riak KV 2.0 and above, but we recommend version 2.1.1. ##### Riak-specific options -* `bucket_type` (default `<<"mam_yz">>`) - Riak bucket type. +#### `modules.mod_mam_meta.riak.bucket_type` +* **Syntax:** non-empty string +* **Default:** `"mam_yz"` +* **Example:** `modules.mod_mam_meta.riak.bucket_type = "mam_yz"` + +Riak bucket type. + +#### `modules.mod_mam_meta.riak.search_index` +* **Syntax:** non-empty string +* **Default:** `"mam"` +* **Example:** `modules.mod_mam_meta.riak.search_index = "mam"` -* `search_index` (default `<<"mam">>`) - Riak index name. +Riak index name. ### Cassandra backend Please consult [Outgoing connections](../advanced-configuration/outgoing-connections.md#cassandra-connection-setup) page to learn how to properly configure Cassandra connection pool. By default, `mod_mam` Cassandra backend requires `global` pool with `default` tag: -```erlang -{outgoing_pools, [ - {cassandra, global, default, [], []}. -]}. -``` ### ElasticSearch backend @@ -156,20 +279,18 @@ Please consult [Outgoing connections](../advanced-configuration/outgoing-connect ### Example configuration -```erlang -{mod_mam_meta, [ - {backend, rdbms}, - - {no_stanzaid_element, true}, - - {pm, [{user_prefs_store, rdbms}]}, - {muc, [ - {host, "muc.example.com"}, - {rdbms_message_format, simple}, - {async_writer, false}, - {user_prefs_store, mnesia} - ]} - ]}. +```toml +[modules.mod_mam_meta] + backend = "rdbms" + no_stanzaid_element = true + + pm.user_prefs_store = "rdbms" + + muc.host = "muc.example.com" + muc.host.rdbms_message_format = "simple" + muc.host.async_writer = false + muc.host.user_prefs_store = "mnesia" + ``` ### Metrics diff --git a/doc/modules/mod_muc.md b/doc/modules/mod_muc.md index 418138ceeb0..cd3f1cba8d9 100644 --- a/doc/modules/mod_muc.md +++ b/doc/modules/mod_muc.md @@ -6,70 +6,410 @@ Note that only `mod_muc` needs to be enabled in the configuration file. Also `mod_muc_log` is a logging submodule. ### Options -* `host` (string, default: `"conference.@HOST@"`): Subdomain for MUC service to reside under. - `@HOST@` is replaced with each served domain. -* `backend` (atom, default: `mnesia`): Storage backend, `mnesia` and `rdbms` are supported. -* `access` (atom, default: `all`): Access Rule to determine who is allowed to use the MUC service. -* `access_create` (atom, default: `all`): Who is allowed to create rooms. -* `access_admin` (atom, default: `none`): Who is the administrator in all rooms. -* `access_persistent` (atom, default: `all`): Who is allowed to make the rooms persistent. - In order to change this parameter, the user must not only match the Access Rule but also be the owner of the room. -* `history_size` (non-negative integer, default: 20): Room message history to be kept in RAM. - After node restart, the history is lost. -* `room_shaper` (atom, default: `none`): Limits per-room data throughput with traffic shaper. -* `max_room_id` (atom or positive integer, default: `infinite`): Maximum room username length (in JID). -* `max_room_name` (atom or positive integer, default: `infinite`): Maximum room name length. -* `max_room_desc` (atom or positive integer, default: `infinite`): Maximum room description length. -* `min_message_interval` (non-negative integer, default: 0): Minimal interval (in seconds) between messages processed by the room. -* `min_presence_interval` (non-negative integer, default: 0): Minimal interval (in seconds) between presences processed by the room. -* `max_users` (positive integer, default: 200): Absolute maximum user count per room on the node. -* `max_users_admin_threshold` (positive integer, default: 5): When the server checks if a new user can join a room and they are an admin, `max_users_admin_threshold` is added to `max_users` during occupant limit check. -* `user_message_shaper` (atom, default: `none`): Shaper for user messages processed by a room (global for the room). -* `user_presence_shaper` (atom, default: `none`): Shaper for user presences processed by a room (global for the room). -* `max_user_conferences` (non-negative, default: 10): Specifies the number of rooms that a user can occupy simultaneously. -* `http_auth_pool` (atom, default: `none`): If an external HTTP service is chosen to check passwords for password-protected rooms, this option specifies the HTTP pool name to use (see [External HTTP Authentication](#external-http-authentication) below). -* `load_permanent_rooms_at_startup` (boolean, default: false) - Load all rooms at startup (can be unsafe when there are many rooms, that's why disabled). -* `hibernate_timeout` (timeout, default: `90000`): Timeout (in milliseconds) defining the inactivity period after which the room's process should be hibernated. -* `hibernated_room_check_interval` (timeout, default: `infinity`): Interval defining how often the hibernated rooms will be checked (a timer is global for a node). -* `hibernated_room_timeout` (timeout, default: `inifitniy`): A time after which a hibernated room is stopped (deeply hibernated). +#### `modules.mod_muc.host` + * **Syntax:** string, a valid subdomain + * **Default:** `"conference.@HOST@"` + * **Example:** `host = "group.@HOST@"` + +Subdomain for MUC service to reside under. `@HOST@` is replaced with each served domain. + +#### `modules.mod_muc.backend` + * **Syntax:** string, one of `"mnesia"` or `"rdbms"` + * **Default:** `"mnesia"` + * **Example:** `backend = "rdbms"` + +Storage backend. + +####`modules.mod_muc.access` + * **Syntax:** non-empty string + * **Default:** `"all"` + * **Example:** `access = "muc"` + +Access Rule to determine who is allowed to use the MUC service. + +#### `modules.mod_muc.access_create` + * **Syntax:** non-empty string + * **Default:** `"all"` + * **Example:** `access_create = "muc_create"` + +Access Rule to determine who is allowed to create rooms. + +#### `modules.mod_muc.access_admin` + * **Syntax:** non-empty string + * **Default:** `"none"` + * **Example:** `access_admin = "muc_create"` + +Access Rule to determine who is the administrator in all rooms. + +#### `modules.mod_muc.access_persistent` + * **Syntax:** non-empty string + * **Default:** `"all"` + * **Example:** `access_persistent = "none"` + +Access Rule to determine who is allowed to make the rooms persistent. +In order to change this parameter, the user must not only match the Access Rule but also be the owner of the room. + +#### `modules.mod_muc.history_size` + * **Syntax:** non-negative integer + * **Default:** `20` + * **Example:** `history_size = 30` + +Room message history to be kept in RAM. After node restart, the history is lost. + +#### `modules.mod_muc.room_shaper` + * **Syntax:** non-empty string + * **Default:** `"none"` + * **Example:** `room_shaper = "muc_room_shaper"` + +Limits per-room data throughput with traffic shaper. + +#### `modules.mod_muc.max_room_id` + * **Syntax:** non-negative integer or the string `"infinity"` + * **Default:** `"infinity"` + * **Example:** `max_room_id = 30` + +Maximum room username length (in JID). + +#### `modules.mod_muc.max_room_name` + * **Syntax:** non-negative integer or the string `"infinity"` + * **Default:** `"infinity"` + * **Example:** `max_room_name = 30` + +Maximum room name length. + +#### `modules.mod_muc.max_room_desc` + * **Syntax:** non-negative integer or the string `"infinity"` + * **Default:** `"infinity"` + * **Example:** `max_room_desc = 140` + +Maximum room description length. + +#### `modules.mod_muc.min_message_interval` + * **Syntax:** non-negative integer + * **Default:** `0` + * **Example:** `min_message_interval = 1` + +Minimal interval (in seconds) between messages processed by the room. + +#### `modules.mod_muc.min_presence_interval` + * **Syntax:** non-negative integer + * **Default:** `0` + * **Example:** `min_presence_interval = 1` + +Minimal interval (in seconds) between presences processed by the room. + +#### `modules.mod_muc.max_users` + * **Syntax:** positive integer + * **Default:** `200` + * **Example:** `max_users = 100` + +Absolute maximum user count per room on the node. + +#### `modules.mod_muc.max_users_admin_threshold` + * **Syntax:** positive integer + * **Default:** `5` + * **Example:** `max_users_admin_threshold = 10` + +When the server checks if a new user can join a room and they are an admin, + `max_users_admin_threshold` is added to `max_users` during occupant limit check. + +#### `modules.mod_muc.user_message_shaper` + * **Syntax:** non-empty string + * **Default:** `"none"` + * **Example:** `user_message_shaper = "muc_user_msg_shaper"` + +Shaper for user messages processed by a room (global for the room). + +#### `modules.mod_muc.user_presence_shaper` + * **Syntax:** non-empty string + * **Default:** `"none"` + * **Example:** `user_presence_shaper = "muc_user_presence_shaper"` + +Shaper for user presences processed by a room (global for the room). + +#### `modules.mod_muc.max_user_conferences` + * **Syntax:** non-negative integer + * **Default:** `10` + * **Example:** `max_user_conferences = 5` + +Specifies the number of rooms that a user can occupy simultaneously. + +#### `modules.mod_muc.http_auth_pool` + * **Syntax:** non-empty string + * **Default:** `"none"` + * **Example:** `http_auth_pool = "external_auth"` + +If an external HTTP service is chosen to check passwords for password-protected rooms, +this option specifies the HTTP pool name to use (see [External HTTP Authentication](#external-http-authentication) below). + +#### `modules.mod_muc.load_permanent_rooms_at_startup` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `load_permanent_rooms_at_startup = true` + +Load all rooms at startup. Because it can be unsafe when there are many rooms, +it is disabled by default. + +#### `modules.mod_muc.hibernate_timeout` + * **Syntax:** non-negative integer or the string `"infinity"` + * **Default:** `90000` (milliseconds, 90 seconds) + * **Example:** `hibernate_timeout = 60000` + +Timeout (in milliseconds) defining the inactivity period after which the room's process should be hibernated. + +#### `modules.mod_muc.hibernated_room_check_interval` + * **Syntax:** non-negative integer or the string `"infinity"` + * **Default:** `"infinity"` + * **Example:** `hibernated_room_check_interval = 120000` + +Interval defining how often the hibernated rooms will be checked (a timer is global for a node). + +#### `modules.mod_muc.hibernated_room_timeout` + * **Syntax:** non-negative integer or the string `"infinity"` + * **Default:** `"infinity"` + * **Example:** `hibernated_room_timeout = 120000` + +A time after which a hibernated room is stopped (deeply hibernated). See [MUC performance optimisation](#performance-optimisations). -* `default_room_options` (list of key-value tuples, default: `[]`): List of room configuration options to be overridden in the initial state. - * `title` (binary, default: `<<>>`): Room title, short free text. - * `description` (binary, default: `<<>>`): Room description, long free text. - * `allow_change_subj` (boolean, default: `true`): Allow all occupants to change the room subject. - * `allow_query_users` (boolean, default: `true`): Allow occupants to send IQ queries to other occupants. - * `allow_private_messages` (boolean, default: `true`): Allow private messaging between occupants. - * `allow_visitor_status` (boolean, default: `true`): Allow occupants to use text statuses in presences. - When disabled, text is removed by the room before broadcasting. - * `allow_visitor_nickchange` (boolean, default: `true`): Allow occupants to change nicknames. - * `public` (boolean, default: `true`): Room is included in the list available via Service Discovery. - * `public_list` (boolean, default: `true`): Member list can be fetched by non-members. - * `persistent` (boolean, default: `false`): Room will be stored in DB and survive even when the last occupant leaves or the node is restarted. - * `moderated` (boolean, default: `true`): Only occupants with a "voice" can send group chat messages. - * `members_by_default` (boolean, default: `true`): All new occupants are members by default, unless they have a different affiliation assigned. - * `members_only` (boolean, default: `false`): Only users with a member affiliation can join the room. - * `allow_user_invites` (boolean, default: `false`): Allow ordinary members to send mediated invitations. - * `allow_multiple_sessions` (boolean, default: `false`): Allow multiple user session to use the same nick. - * `password_protected` (boolean, default: `false`): Room is protected with a password. - * `password` (binary, default: `<<>>`): Room password is required upon joining. - This option has no effect when `password_protected` is `false`. - * `anonymous` (boolean, default: `true`): Room is anonymous, meaning occupants can't see each others real JIDs, except for the room moderators. - * `max_users` (positive integer, default: 200): Maximum user count per room. - Admins and the room owner are not affected. - * `logging` (boolean, default: `false`): Enables logging of room events (messages, presences) to a file on the disk. Uses `mod_muc_log`. - * `maygetmemberlist` (list of atoms, default: `[]`): A list of roles and/or privileges that enable retrieving the room's member list. - * `affiliations` (list of `{{<<"user">>, <<"server">>, <<"resource">>}, affiliation}` tuples, default: `[]`): A default list of affiliations set for every new room. - * `subject` (binary, default: `<<>>`): A default subject for new room. - * `subject_author` (binary, default: `<<>>`): A nick name of the default subject's author. + +#### `modules.mod_muc.default_room` + * **Syntax:** A TOML table of options described below + * **Default:** Default room options + * **Example:** +``` + [modules.mod_muc.default_room] + password_protected = true + description = "An example description." + + [[modules.mod_muc.default_room.affiliations]] + user = "alice" + server = "localhost" + resource = "resource1" + affiliation = "member" +``` +or: +``` + default_room.password_protected = true + default_room.description = "An example description." + + [[modules.mod_muc.default_room.affiliations]] + user = "alice" + server = "localhost" + resource = "resource1" + affiliation = "member" +``` + +Available room configuration options to be overridden in the initial state: +* `modules.mod_muc.default_room.title` + * **Syntax:** string + * **Default:** `""` + * **Example:** `title = "example_title"` + + Room title, short free text. + +* `modules.mod_muc.default_room.description` + * **Syntax:** string + * **Default:** `""` + * **Example:** `description = "An example description."` + + Room description, long free text. + +* `modules.mod_muc.default_room.allow_change_subj` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `allow_change_subj = false` + + Allow all occupants to change the room subject. + +* `modules.mod_muc.default_room.allow_query_users` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `allow_query_users = false` + + Allow occupants to send IQ queries to other occupants. + +* `modules.mod_muc.default_room.allow_private_messages` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `allow_private_messages = false` + + Allow private messaging between occupants. + +* `modules.mod_muc.default_room.allow_visitor_status` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `allow_visitor_status = false` + + Allow occupants to use text statuses in presences. + When disabled, text is removed by the room before broadcasting. + +* `modules.mod_muc.default_room.allow_visitor_nickchange` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `allow_visitor_nickchange = false` + + Allow occupants to change nicknames. + +* `modules.mod_muc.default_room.public` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `public = false` + + Room is included in the list available via Service Discovery. + +* `modules.mod_muc.default_room.public_list` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `public_list = false` + + Member list can be fetched by non-members. + +* `modules.mod_muc.default_room.persistent` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `persistent = true` + + Room will be stored in DB and survive even when the last occupant leaves or the node is restarted. + +* `modules.mod_muc.default_room.moderated` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `moderated = false` + + Only occupants with a "voice" can send group chat messages. + +* `modules.mod_muc.default_room.members_by_default` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `members_by_default = false` + + All new occupants are members by default, unless they have a different affiliation assigned. + +* `modules.mod_muc.default_room.members_only` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `members_only = true` + + Only users with a member affiliation can join the room. + +* `modules.mod_muc.default_room.allow_user_invites` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `allow_user_invites = true` + + Allow ordinary members to send mediated invitations. + +* `modules.mod_muc.default_room.allow_multiple_sessions` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `allow_multiple_sessions = true` + + Allow multiple user session to use the same nick. + +* `modules.mod_muc.default_room.password_protected` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `password_protected = true` + + Room is protected with a password. + +* `modules.mod_muc.default_room.password` + * **Syntax:** string + * **Default:** `""` + * **Example:** `password = "secret"` + + Room password is required upon joining. + This option has no effect when `password_protected` is `false`. + +* `modules.mod_muc.default_room.anonymous` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `anonymous = false` + + Room is anonymous, meaning occupants can't see each others real JIDs, except for the room moderators. + +* `modules.mod_muc.default_room.max_users` + * **Syntax:** positive integer + * **Default:** `200` + * **Example:** `max_users = 100` + + Maximum user count per room. Admins and the room owner are not affected. + +* `modules.mod_muc.default_room.logging` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `logging = true` + + Enables logging of room events (messages, presences) to a file on the disk. + Uses `mod_muc_log`. + +* `modules.mod_muc.default_room.maygetmemberlist` + * **Syntax:** array of non-empty strings + * **Default:** `[]` + * **Example:** `maygetmemberlist = ["moderator"]` + + An array of roles and/or privileges that enable retrieving the room's member list. + +* `modules.mod_muc.default_room.affiliations` + * **Syntax:** array of tables with keys: + * `user` - non-empty string, + * `server` - string, a valid domain, + * `resource` - string, + * `affiliation` - non-empty string + * **Default:** `[]` + * **Example:** + +``` +[[modules.mod_muc.default_room.affiliations]] + user = "alice" + server = "localhost" + resource = "resource1" + affiliation = "member" + +[[modules.mod_muc.default_room.affiliations]] + user = "bob" + server = "localhost" + resource = "resource2" + affiliation = "owner" +``` + + This is the default list of affiliations set for every new room. + +* `modules.mod_muc.default_room.subject` + * **Syntax:** string + * **Default:** `""` + * **Example:** `subject = "Lambda days"` + + A default subject for new room. + +* `modules.mod_muc.default_room.subject_author` + * **Syntax:** string + * **Default:** `""` + * **Example:** `subject_author = "Alice"` + + A nick name of the default subject's author. ### Example Configuration ``` -{mod_muc, [ - {host, "muc.example.com"}, - {access, muc}, - {access_create, muc_create} - ]}, +[modules.mod_muc] + host = "muc.example.com" + access = "muc" + access_create = "muc_create" + http_auth_pool = "my_auth_pool" + default_room.password_protected = true + + [[modules.mod_muc.default_room.affiliations]] + user = "alice" + server = "localhost" + resource = "resource1" + affiliation = "member" + + [[modules.mod_muc.default_room.affiliations]] + user = "bob" + server = "localhost" + resource = "resource2" + affiliation = "owner" ``` ### Performance optimisations @@ -85,7 +425,7 @@ This timeout can be modified by `hibernate_timeout` option. #### Room deep hibernation -MongooseIM introduces an addtional option of deep hibernation for unused rooms. +MongooseIM introduces an additional option of deep hibernation for unused rooms. This optimisation works only for persistent rooms as only these can be restored on demand. The improvement works as follows: 1. All room processes are traversed at a chosen `hibernated_room_check_interval`. @@ -115,31 +455,17 @@ If the server returns something else, an error presence will be sent back to the **Example:** -```Erlang - -{outgoing_pools, - [{http, global, my_auth_pool, - [{strategy, available_worker}], - [{server, "http://my_server:8000"}]} - ] -}. - -{modules, [ - - (...) - - {mod_muc, [ - {host, "muc.example.com"}, - {access, muc}, - {access_create, muc_create}, - {http_auth_pool, my_auth_pool}, - {default_room_options, [{password_protected, true}]} - ]}, - - (...) - -]}. +``` +[outgoing_pools.http.my_auth_pool] + strategy = "available_worker" + connection.host = "http://my_server:8000" +[modules.mod_muc] + host = "muc.example.com" + access = "muc" + access_create = "muc_create" + http_auth_pool = "my_auth_pool" + default_room.password_protected = true ``` ### Metrics diff --git a/doc/modules/mod_muc_commands.md b/doc/modules/mod_muc_commands.md index b5f7229b113..9082b38d345 100644 --- a/doc/modules/mod_muc_commands.md +++ b/doc/modules/mod_muc_commands.md @@ -8,7 +8,7 @@ This module contains command definitions which are loaded when the module is act There are no options to be provided, therefore the following entry in the config file is sufficient: ``` -{mod_muc_commands, []} +[modules.mod_muc_commands] ``` ## Commands diff --git a/doc/modules/mod_muc_light.md b/doc/modules/mod_muc_light.md index 744aa04105b..1a92b499ed1 100644 --- a/doc/modules/mod_muc_light.md +++ b/doc/modules/mod_muc_light.md @@ -6,61 +6,160 @@ This extension consists of several modules but only `mod_muc_light` needs to be ### Options -* **host** (string, default: `"muclight.@HOST@"`) - Domain for the MUC Light service to reside under. +#### `modules.mod_muc_light.host` + * **Syntax:** string, a valid subdomain + * **Default:** `"muclight.@HOST@"` + * **Example:** `host = "group.@HOST@"` + +Domain for the MUC Light service to reside under. `@HOST@` is replaced with each served domain. -* **backend** (atom, default: `mnesia`) - Database backend to use. - `mnesia` and `rdbms` are supported. -* **equal_occupants** (boolean, default: `false`) - When enabled, MUC Light rooms won't have owners. - It means that every occupant will be a `member`, even the room creator. + +#### `modules.mod_muc_light.backend` + * **Syntax:** string, one of `"mnesia"`, `"rdbms"` + * **Default:** `"mnesia"` + * **Example:** `backend = "rdbms"` + +Database backend to use. + +#### `modules.mod_muc_light.equal_occupants` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `equal_occupants = true` + + When enabled, MUC Light rooms won't have owners. + It means that every occupant will be a `member`, even the room creator. **Warning:** This option does not implicitly set `all_can_invite` to `true`. If that option is set to `false`, nobody will be able to join the room after the initial creation request. -* **legacy_mode** (boolean, default: `false`) - Enables XEP-0045 compatibility mode. - It allows using a subset of classic MUC stanzas with some MUC Light functions limited. -* **rooms_per_user** (positive integer or `infinity`, default: `infinity`) - Specifies a cap on a number of rooms a user can occupy. + +#### `modules.mod_muc_light.legacy_mode` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `legacy_mode = true` + +Enables XEP-0045 compatibility mode. +It allows using a subset of classic MUC stanzas with some MUC Light functions limited. + +#### `modules.mod_muc_light.rooms_per_user` + * **Syntax:** positive integer or the string `"infinity"` + * **Default:** `"infinity"` + * **Example:** `rooms_per_user = 100` + + Specifies a cap on a number of rooms a user can occupy. **Warning:** Setting such a limit may trigger expensive DB queries for every occupant addition. -* **blocking** (boolean, default: `true`) - Blocking feature enabled/disabled. -* **all_can_configure** (boolean, default: `false`) - When enabled, all room occupants can change all configuration options. - If disabled, everyone can still the change room subject. -* **all_can_invite** (boolean, default: `false`) - When enabled, all room occupants can add new occupants to the room. + +#### `modules.mod_muc_light.blocking` + * **Syntax:** boolean + * **Default:** `true` + * **Example:** `blocking = false` + +Blocking feature enabled/disabled. + +#### `modules.mod_muc_light.all_can_configure` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `all_can_configure = true` + + When enabled, all room occupants can change all configuration options. + If disabled, everyone can still change the room subject. + +#### `modules.mod_muc_light.all_can_invite` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `all_can_invite = true` + +When enabled, all room occupants can add new occupants to the room. Occupants added by `members` become `members` as well. -* **max_occupants** (positive integer or `infinity`, default: `infinity`) - Specifies a cap on the occupant count per room. -* **rooms_per_page** (positive integer or `infinity`, default: 10) - Specifies maximal number of rooms returned for a single Disco request. -* **rooms_in_rosters** (boolean, default: `false`) - When enabled, rooms the user occupies are included in their roster. -* **config_schema** (list; see below, default: `[{"roomname", "Untitled"}, {"subject", ""}]`) - A list of fields allowed in the room configuration. - The field type may be specified but the default is "binary", i.e. effectively a string. - **WARNING!** Lack of the `roomname` field will cause room names in Disco results and Roster items be set to the room username. +#### `modules.mod_muc_light.max_occupants` + * **Syntax:** positive integer or the string `"infinity"` + * **Default:** `"infinity"` + * **Example:** `max_occupants = 100` + +Specifies a cap on the occupant count per room. + +#### `modules.mod_muc_light.rooms_per_page` + * **Syntax:** positive integer or the string `"infinity"` + * **Default:** `10` + * **Example:** `rooms_per_page = 100` + +Specifies maximal number of rooms returned for a single Disco request. -### Config schema +#### `modules.mod_muc_light.rooms_in_rosters` + * **Syntax:** boolean + * **Default:** `false` + * **Example:** `rooms_in_rosters = true` -Allowed `config_schema` list items are (may be mixed): +When enabled, rooms the user occupies are included in their roster. -* Field name and a default value: `{"field", "value"}` - will be expanded to "field" of a type `binary` (string) with a default "value" -* Field name, a default value, an atom (internal key representation) and a type: `{"field", "value", field, float}` - useful only for debugging or custom applications +#### `modules.mod_muc_light.config_schema` + * **Syntax:** an array of `config_schema` items, as described below + * **Default:** -Example of such list: `[{"roomname", "My Room"}, {"subject", "Hi"}, {"priority", 0, priority, integer}]` + [[modules.mod_muc_light.config_schema]] + field = "roomname" + value = "Untitled" + + [[modules.mod_muc_light.config_schema]] + field = "subject" + value = "" + * **Example:** + + [[modules.mod_muc_light.config_schema]] + field = "display-lines" + value = 30 + internal_key = "display_lines" + type = "integer" + + Defines fields allowed in the room configuration. + + Allowed `config_schema` items are (may be mixed): + +* Field name and a default value. The value has to be a string. An example: + ``` + field = "field_name" + value = "default_value" + ``` +* Field name, a default value, an internal key representation string and a type. Valid config field types are: -* `binary` (i.e. any valid XML CDATA) -* `integer` -* `float` + * `binary` (i.e. any valid XML CDATA) + * `integer` + * `float` + + Useful only for debugging or custom applications. An example: + ``` + field = "display-lines" + value = 30 + internal_key = "display_lines" + type = "integer" + ``` +**WARNING!** Lack of the `roomname` field will cause room names in Disco results and Roster items be set to the room username. + ### Example Configuration ``` -{mod_muc_light, [ - {host, "muclight.example.com"}, - {equal_occupants, true}, - {legacy_mode, true}, - {rooms_per_user, 10}, - {blocking, false}, - {all_can_configure, true}, - {all_can_invite, true}, - {max_occupants, 50}, - {rooms_per_page, 5}, - {rooms_in_rosters, true}, - {config_schema, [{"roomname", "The Room"}, {"display-lines", 30, display_lines, integer}]} - ]}, +[modules.mod_muc_light] + host = "muclight.example.com" + equal_occupants = true + legacy_mode = true + rooms_per_user = 10 + blocking = false + all_can_configure = true + all_can_invite = true + max_occupants = 50 + rooms_per_page = 5 + rooms_in_rosters = true + + [[modules.mod_muc_light.config_schema]] + field = "roomname" + value = "The Room" + + [[modules.mod_muc_light.config_schema]] + field = "display-lines" + value = 30 + internal_key = "display_lines" + type = "integer" ``` ### Metrics @@ -80,4 +179,3 @@ If you'd like to learn more about metrics in MongooseIM, please visit [MongooseI | `set_blocking` | Blocking data is updated in a DB. | | `get_aff_users` | An affiliated users list is fetched from a DB. | | `modify_aff_users` | Affiliations in a room are updated in a DB. | - diff --git a/doc/modules/mod_muc_light_commands.md b/doc/modules/mod_muc_light_commands.md index 70f3085dea6..2861ddc6d8e 100644 --- a/doc/modules/mod_muc_light_commands.md +++ b/doc/modules/mod_muc_light_commands.md @@ -9,7 +9,7 @@ This module contains command definitions which are loaded when the module is act There are no options to be provided, therefore the following entry in the config file is sufficient: ``` -{mod_muc_light_commands, []} +[modules.mod_muc_light_commands] ``` ## Commands diff --git a/doc/modules/mod_muc_log.md b/doc/modules/mod_muc_log.md index 7b13f109461..5e2731dadb6 100644 --- a/doc/modules/mod_muc_log.md +++ b/doc/modules/mod_muc_log.md @@ -5,35 +5,98 @@ It writes room-related information (configuration) and events (messages, presenc ### Options -* `outdir` (string, default: `"www/muc"`): Filesystem directory where the files are stored. -* `access_log` (atom, default: `muc_admin`): ACL that defines who can enable/disable logging for specific rooms. -* `dirtype` (atom, default: `subdirs`): Specifies the log directory structure. - * `subdirs`: Module will use the following directory structure `[Logs root]/[dirname]/YYYY/MM/` with file names being `DD.[extension]`. - * `plain`: Module will use the following directory structure `[Logs root]/[dirname]/` with file names being `YYYY-MM-DD.[extension]`. -* `dirname` (atom, default: `room_jid`): Specifies directory name created for each room. - * `room_jid`: Uses the room bare JID. - * `room_name`: Uses the room name from its configuration. -* `file_format` (atom, default: `html`): - * `html`: The output is a fancy-formatted HTML page. - * `plaintext`: Just a text file, better suited for processing than HTML. -* `css_file` (binary or atom, default: `false`): - * `false`: Uses default styles for HTML logs. - * `<<"path to custom CSS file">>`: Links custom CSS inside HTML logs. Please note it won't be copied to the logs directory but the given path will be linked in HTML files instead. -* `timezone` (atom, default: `local`): - * `local`: Uses the local server timezone in dates written into the logs. - * `universal`: Uses GMT in dates written into the logs. -* `top_link` (default: `{"/", "Home"}): Allows setting a custom link at the top of the HTML log file. - First tuple element is the link target and the second one is the text to be displayed. - You can put any HTML instead of just plain text. -* `spam_prevention` (boolean, default: `true`): When enabled, MongooseIM will enforce `rel="nofollow"` attribute in links sent by user and written to MUC logs. +#### `modules.mod_muc_log.outdir` +* **Syntax:** string +* **Default:** `"www/muc"` +* **Example:** `outdir = "www/muc"` +Filesystem directory where the files are stored. + +#### `modules.mod_muc_log.access_log` +* **Syntax:** non-empty string +* **Default:** `"muc_admin"` +* **Example:** `access_log = "muc_admin"` + +ACL that defines who can enable/disable logging for specific rooms. + +#### `modules.mod_muc_log.dirtype` +* **Syntax:** string, one of `"subdirs"`, `"plain"` +* **Default:** `"subdirs"` +* **Example:** `dirtype = "subdirs"` + +Specifies the log directory structure: + +* `"subdirs"`: Module will use the following directory structure `[Logs root]/[dirname]/YYYY/MM/` with file names being `DD.[extension]`. +* `"plain"`: Module will use the following directory structure `[Logs root]/[dirname]/` with file names being `YYYY-MM-DD.[extension]`. + +#### `modules.mod_muc_log.dirname` +* **Syntax:** string, one of `"room_jid"`, `"room_name"` +* **Default:** `"room_jid"` +* **Example:** `dirname = "room_jid"` + +Specifies directory name created for each room: + +* `"room_jid"`: Uses the room bare JID. +* `"room_name"`: Uses the room name from its configuration. + + +#### `modules.mod_muc_log.file_format` +* **Syntax:** string, one of `"html"`, `"plaintext"` +* **Default:** `"html"` +* **Example:** `file_format = "html"` + +Specifies the format of output files: + +* `"html"`: The output is a fancy-formatted HTML page. +* `"plaintext"`: Just a text file, better suited for processing than HTML. + +#### `modules.mod_muc_log.css_file` +* **Syntax:** non-empty string +* **Default:** `"false"` +* **Example:** `css_file = "path/to/css/file"` + +Specifies the css file used for logs rendering: + +* `"false"`: Uses default styles for HTML logs. +* `path to custom CSS file`: Links custom CSS inside HTML logs. Please note it won't be copied to the logs directory but the given path will be linked in HTML files instead. + +#### `modules.mod_muc_log.timezone` +* **Syntax:** string, one of `"local"`, `"universal"` +* **Default:** `"local"` +* **Example:** `timezone = "universal"` + +Specifies the timezone to be used in timestamps written into the logs: + +* `local`: Uses the local server timezone. +* `universal`: Uses GMT. + +#### `modules.mod_muc_log.top_link` +* **Syntax:** TOML table with the following keys: `"target"`, `"text"` and string values. +* **Default:** `[target = "", text = ""]` +* **Example:** `top_link = [target = "/", text = "Home"]` + +Allows setting a custom link at the top of the HTML log file. +First tuple element is the link target and the second one is the text to be displayed. +You can put any HTML instead of just plain text. + +#### `modules.mod_muc_log.spam_prevention` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `spam_prevention = false` + +When enabled, MongooseIM will enforce `rel="nofollow"` attribute in links sent by user and written to MUC logs. ### Example Configuration ``` - {mod_muc_log, - [ - {outdir, "/tmp/muclogs"}, - {access_log, muc} - ]}, +[modules.mod_muc_log] + outdir = "/tmp/muclogs" + access_log = "muc" + dirtype = "plain" + dirname = "room_name" + file_format = "html" + css_file = "path/to/css/file" + timezone = "universal" + top_link.target = "/" + top_link.text = "Home" ``` diff --git a/doc/modules/mod_offline.md b/doc/modules/mod_offline.md index e751e10cba9..e20fc8327d8 100644 --- a/doc/modules/mod_offline.md +++ b/doc/modules/mod_offline.md @@ -5,16 +5,35 @@ It is not well suited for applications supporting multiple user devices, because Although `mod_offline` may be sufficient in some cases, it is preferable to use [mod_mam](mod_mam.md). ### Options -* `access_max_user_messages` (atom, default: `max_user_offline_messages`): Access Rule to use for limiting the storage size per user. -* `backend` (atom, default: `mnesia`): Storage backend. Currently `mnesia`, `rdbms` and `riak` are supported. +#### `modules.mod_offline.access_max_user_messages` + * **Syntax:** non-empty string + * **Default:** `"max_user_offline_messages"` + * **Example:** `access_max_user_messages = "custom_max_user_offline_messages"` + + Access Rule to use for limiting the storage size per user. + +#### `modules.mod_offline.backend` + * **Syntax:** string, one of `mnesia`, `rdbms`, `riak` + * **Default:** `"mnesia"` + * **Example:** `backend = "rdbms"` -##### Riak-specific options + Storage backend. -* `bucket_type` (default `<<"offline">>`) - Riak bucket type. +### Riak-specific options + +#### `modules.mod_offline.riak.bucket_type` + * **Syntax:** non-empty string + * **Default:** `"offline"` + * **Example:** `bucket_type = "offline_bucket_type"` + +Riak bucket type. ### Example Configuration ``` -{mod_offline, [{access_max_user_messages, max_user_offline_messages}]}, +[modules.mod_offline] + access_max_user_messages = "max_user_offline_messages" + backend = "riak" + riak.bucket_type = "offline" ``` ### Metrics diff --git a/doc/modules/mod_offline_stub.md b/doc/modules/mod_offline_stub.md index 6dc4b421c24..d9d209cbeee 100644 --- a/doc/modules/mod_offline_stub.md +++ b/doc/modules/mod_offline_stub.md @@ -12,6 +12,6 @@ None. ### Example Configuration ``` -{mod_offline_stub, []}, +[modules.mod_offline_stub] ``` diff --git a/doc/modules/mod_ping.md b/doc/modules/mod_ping.md index 9b0959eb9d3..526ba4792c2 100644 --- a/doc/modules/mod_ping.md +++ b/doc/modules/mod_ping.md @@ -2,19 +2,51 @@ This module implements XMPP Ping functionality as described in [XEP-0199: XMPP Ping](http://www.xmpp.org/extensions/xep-0199.html). - ### Options -* `send_pings` (boolean, default `false`): If set to true, the server will send ping iqs to the client if they are not active for a `ping_interval`. -* `ping_interval` (seconds, default `60`): Defines the client inactivity timeout after which the server will send a ping request if the above option is set to `true`. -* `timeout_action` (`none` | `kill`, default `none`): Defines if the client connection should be closed if it doesn't reply to a ping request in less than `ping_req_timeout`. -* `ping_req_timeout` (seconds, default `32`) Defines how long the server waits for the client to reply to the ping request. -* `iqdisc` (default: `no_queue`) +#### `modules.mod_ping.send_pings` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `send_pings = true` + +If set to true, the server will send ping iqs to the client if they are not active for a `ping_interval`. + +#### `modules.mod_ping.ping_interval` +* **Syntax:** positive integer +* **Default:** `60` +* **Example:** `ping_interval = 30` + +Defines the client inactivity timeout after which the server will send a ping request if the above option is set to `true`. + +#### `modules.mod_ping.timeout_action` +* **Syntax:** string, one of `"none"`, `"kill"` +* **Default:** `"none"` +* **Example:** `timeout_action = "kill"` + +Defines if the client connection should be closed if it doesn't reply to a ping request in less than `ping_req_timeout`. + +#### `modules.mod_ping.ping_req_timeout` +* **Syntax:** positive integer +* **Default:** `32` +* **Example:** `ping_req_timeout = 60` + +Defines how long the server waits for the client to reply to the ping request. + +#### `modules.mod_ping.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"no_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). ### Example Configuration ``` - {mod_ping, [{send_pings, true}]}, +[modules.mod_ping] + send_pings = true + ping_interval = 60 + timeout_action = "none" + ping_req_timeout = 32 ``` ### Metrics diff --git a/doc/modules/mod_privacy.md b/doc/modules/mod_privacy.md index 8bad2bd472d..2c7c59e8c71 100644 --- a/doc/modules/mod_privacy.md +++ b/doc/modules/mod_privacy.md @@ -1,20 +1,43 @@ ### Module Description -This module implements [XEP-0016: Privacy Lists](http://xmpp.org/extensions/xep-0016.html). This extension allows user to block IQs, messages, presences, or all, based on JIDs, subscription, and roster groups. +This module implements [XEP-0016: Privacy Lists](http://xmpp.org/extensions/xep-0016.html). +This extension allows user to block IQs, messages, presences, or all, based on JIDs, subscription, and roster groups. ### Options -* `backend` (atom, default: `mnesia`): Storage backend. Currently supported are `mnesia`, `rdbms` and `riak`. -### Example Configuration -``` -{mod_privacy, []}, -``` +#### `modules.mod_privacy.backend` +* **Syntax:** string, one of `"mnesia"`, `"rdbms"`, `"riak"`. +* **Default:** `"mnesia"` +* **Example:** `backend = "mnesia"` + +### Riak-specific options + +#### `modules.mod_privacy.riak.defaults_bucket_type` +* **Syntax:** string. +* **Default:** `"privacy_defaults"` +* **Example:** `riak.defaults_bucket_type = "privacy_defaults"` + +Riak bucket type for information about default list name. +#### `modules.mod_privacy.riak.names_bucket_type` +* **Syntax:** string. +* **Default:** `"privacy_lists_names"` +* **Example:** `riak.names_bucket_type = "privacy_lists_names"` -##### Riak-specific options +Riak bucket type for information about privacy list names. -* `defaults_bucket_type` (default `<<"privacy_defaults">>`) - Riak bucket type for information about default list name. -* `names_bucket_type` (default `<<"privacy_lists_names">>`) - Riak bucket type for information about privacy list names. -* `bucket_type` (default `<<"privacy_lists">>`) - Riak bucket type for privacy lists. +#### `modules.mod_privacy.riak.bucket_type` +* **Syntax:** string. +* **Default:** `"privacy_lists"` +* **Example:** `riak.bucket_type = "privacy_lists"` + +Riak bucket type for privacy lists. + +### Example Configuration +``` +[modules.mod_privacy] + backend = "riak" + riak.defaults_bucket_type = "privacy_defaults" +``` ### Metrics diff --git a/doc/modules/mod_private.md b/doc/modules/mod_private.md index ecc4331f4d6..a0fce15d537 100644 --- a/doc/modules/mod_private.md +++ b/doc/modules/mod_private.md @@ -1,21 +1,41 @@ ### Module Description -This module implements [XEP-0049: Private XML Storage](http://xmpp.org/extensions/xep-0049.html), allowing users to store custom XML data in the server's database. Used e.g. for storing roster groups separator. +This module implements [XEP-0049: Private XML Storage](http://xmpp.org/extensions/xep-0049.html). +It allows users to store custom XML data in the server's database. Used e.g. for storing roster groups separator. ### Options -* `iqdisc` (default: `one_queue`) -* `backend` (atom, default: `mnesia`): Storage backend. Currently `mnesia`, `rdbms`, `riak` and `mysql` are supported . `mysql` uses MySQL-specific queries so in some cases it is more efficient than generic `rdbms`. -**CAUTION:** Riak KV backend doesn't support transactions (rollbacks), so please avoid inserting more -than one value in a single set request, otherwise you may end up with partially saved data. Backend returns the -first error. +#### `modules.mod_private.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"one_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). + +#### `modules.mod_private.backend` +* **Syntax:** string, one of `"mnesia"`, `"rdbms"`, `"riak"`, `"mysql"`. +* **Default:** "mnesia" +* **Example:** `backend = "mnesia"` + +Database backend to use. +`mysql` uses MySQL-specific queries so in some cases it is more efficient than generic `rdbms`. + +**CAUTION:** Riak KV backend doesn't support transactions (rollbacks), so please avoid inserting +more than one value in a single set request, otherwise you may end up with partially saved data. +Backend returns the first error. ##### Riak-specific options -* `bucket_type` (default `<<"private">>`) - Riak bucket type. +###### `modules.mod_privacy.riak.bucket_type` +* **Syntax:** string +* **Default:** `"private"` +* **Example:** `bucket_type = "private"` + +Riak bucket type. ### Example Configuration ``` -{mod_private, []} +[modules.mod_private] + backend = "mnesia" ``` ### Metrics @@ -26,4 +46,3 @@ If you'd like to learn more about metrics in MongooseIM, please visit [MongooseI | ---- | -------------------------------------- | | `multi_get_data` | XML data is fetched from a DB. | | `multi_set_data` | XML data is stored in a DB. | - diff --git a/doc/modules/mod_pubsub.md b/doc/modules/mod_pubsub.md index bb9a4d4c5fa..da6c5d546f7 100644 --- a/doc/modules/mod_pubsub.md +++ b/doc/modules/mod_pubsub.md @@ -13,25 +13,109 @@ It's all about tailoring PubSub to your needs! ### Options -* `iqdisc` (default: `one_queue`) -* `host` (string, default: `"pubsub.@HOST@"`): Subdomain for Pubsub service to reside under. +#### `modules.mod_pubsub.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"no_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). + +#### `modules.mod_pubsub.host` +* **Syntax:** string +* **Default:** `"pubsub.@HOST@"` +* **Example:** `host = "pubsub.localhost"` + +Subdomain for Pubsub service to reside under. `@HOST@` is replaced with each served domain. -* `backend` (atom, default: `mnesia`) - Database backend to use. `mnesia` and `rdbms` are supported currently. -* `access_create` (atom, default: `all`): Who is allowed to create pubsub nodes. -* `max_items_node` (integer, default: `10`): Define the maximum number of items that can be stored in a node. -* `max_subscriptions_node` (integer, default: `undefined` - no limitation): The maximum number of subscriptions managed by a node. -* `nodetree` (binary, default: `<<"tree">>`): Specifies the storage and organisation of the pubsub nodes. See the section below. -* `ignore_pep_from_offline` (boolean, default: `true`): specify whether or not we should get last published PEP items from users in our roster which are offline when we connect. + +#### `modules.mod_pubsub.backend` +* **Syntax:** string, one of `"mnesia"`, `"rdbms"` +* **Default:** `"mnesia"` +* **Example:** `backend = "rdbms"` + +Database backend to use. + +#### `modules.mod_pubsub.access_createnode` +* **Syntax:** string, rule name, or `"all"` +* **Default:** `"all"` +* **Example:** `access_createnode = "all"` + +Specifies who is allowed to create pubsub nodes. The access rule referenced here needs to be defined in the [access](../../advanced-configuration/access) section. + +#### `modules.mod_pubsub.max_items_node` +* **Syntax:** non-negative integer +* **Default:** `10` +* **Example:** `max_items_node = 10` + +Defines the maximum number of items that can be stored in a node. + +#### `modules.mod_pubsub.max_subscriptions_node` +* **Syntax:** non-negative integer +* **Default:** not specified (no limit) +* **Example:** `max_subscriptions_node = 10` + +The maximum number of subscriptions managed by a node. By default there is no limit. + +#### `modules.mod_pubsub.nodetree` +* **Syntax:** string +* **Default:** `"tree"` +* **Example:** `nodetree = "tree"` + +Specifies the storage and organisation of the pubsub nodes. See the section below. + +#### `modules.mod_pubsub.ignore_pep_from_offline` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `ignore_pep_from_offline = false` + +Specifies whether or not we should get last published PEP items from users in our roster which are offline when we connect. The default option is `true` hence we will get only the last items from the online contacts. -* `last_item_cache` (atom, default `false`): If enabled, PubSub will cache the last published items in the nodes. It may increase PubSub performance but at a price of an increased memory usage. Valid values are `mnesia`, `rdbms` and `false`. -* `plugins` ([Plugin, ...], default: `[<<"flat">>]`): List of enabled pubsub plugins. -* `pep_mapping` ([{Key, Value}, ...]): This permits creating a Key-Value list to define a custom node plugin on a given PEP namespace. + +#### `modules.mod_pubsub.last_item_cache` +* **Syntax:** string, one of `"mnesia"`, `"rdbms"`, `"false"` +* **Default:** `"false"` +* **Example:** `last_item_cache = "mnesia"` + +If enabled, PubSub will cache the last published items in the nodes. It may increase PubSub performance but at a price of an increased memory usage. + +#### `modules.mod_pubsub.plugins` +* **Syntax:** array of strings +* **Default:** `["flat"]` +* **Example:** `plugins = ["flat", "pep"]` + +List of enabled pubsub plugins. + +#### `modules.mod_pubsub.pep_mapping` +* **Syntax:** TOML table with the following keys: `"namespace"`, `"node"` and string values. +* **Default:** `[]` +* **Example:** `pep_mapping = [{namespace = "urn:xmpp:microblog:0", node = "mb"}]` + +This permits creating a Key-Value list to define a custom node plugin on a given PEP namespace. E.g. pair `{"urn:xmpp:microblog:0", "mb"}` will use module `node_mb` instead of `node_pep` when the specified namespace is used. -* `default_node_config` ([{Key, Value}, ...]): Overrides the default node configuration, regradless of the node plugin. + + +#### `modules.mod_pubsub.default_node_config` +* **Syntax:** TOML table with the following values: string, boolean or non-negative integer. +* **Default:** `[]` +* **Example:** `default_node_config = {deliver_payloads = true, max_payload_size, 10000, node_type = "leaf"}` + +Overrides the default node configuration, regradless of the node plugin. Node configuration still uses the default configuration defined by the node plugin, and overrides any items by the value defined in this configurable list. -* `item_publisher` (boolean, default: `false`): When enabled, a JID of the publisher will be saved in the item metadata. - This effectively makes them an owner of this item. -* `sync_broadcast` (boolean, default: `false`): If false, routing of notifications to subscribers is done in a separate Erlang process. As a consequence, some notifications *may* arrive to the subscribers in the wrong order (however, the two events would have to be published at the exact same time). + +#### `modules.mod_pubsub.item_publisher` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `item_publisher = false` + +When enabled, a JID of the publisher will be saved in the item metadata. +This effectively makes them an owner of this item. + +#### `modules.mod_pubsub.sync_broadcast` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `sync_broadcast = false` + +If false, routing of notifications to subscribers is done in a separate Erlang process. As a consequence, some notifications *may* arrive to the subscribers in the wrong order (however, the two events would have to be published at the exact same time). #### Cache Backend @@ -42,13 +126,17 @@ It is not coupled with the main DB backend, so it is possible to store the cache ### Example Configuration ``` - {mod_pubsub, [{access_createnode, pubsub_createnode}, - {ignore_pep_from_offline, false}, - {backend, rdbms}, - {last_item_cache, mnesia}, - {max_items_node, 1000}, - {plugins, [<<"flat">>, <<"pep">>]} - ]}, +[modules.mod_pubsub] + access_createnode = "pubsub_createnode" + ignore_pep_from_offline = false + backend = "rdbms" + last_item_cache = "mnesia" + max_items_node = 1000 + plugins = ["flat", "pep"] + + [[modules.mod_pubsub.pep_mapping]] + namespace = "urn:xmpp:microblog:0" + node = "mb" ``` ### Nodetrees @@ -56,7 +144,7 @@ It is not coupled with the main DB backend, so it is possible to store the cache Called on `get`, `create` and `delete` node. Only one nodetree can be used per host and is shared by all node plugins. -#### `<<"tree">>` +#### `"tree"` Stores nodes in a tree structure. Every node name must be formatted like a UNIX path (e.g. `/top/middle/leaf`). @@ -65,10 +153,10 @@ A user may create any top-level node. A user may create a subnode of a node, only if they own it or it was created by the service. -#### `<<"dag">>` +#### `"dag"` Provides experimental support for [XEP-0248 (PubSub Collection Nodes)](http://xmpp.org/extensions/xep-0248.html). -In this case you should also add the `<<"dag">>` node plugin as default, for example: `{plugins, [<<"dag">>,<<"flat">>,<<"hometree">>,<<"pep">>]}` +In this case you should also add the `"dag"` node plugin as default, for example: `plugins = ["dag", "flat", "hometree", "pep"]`. ### Plugins @@ -76,31 +164,31 @@ They handle affiliations, subscriptions and items and also provide default node PubSub clients can define which plugin to use when creating a node by adding `type='plugin-name'` attribute to the create stanza element. If such an attribute is not specified, the default plugin will be the first on the plugin list. -#### `<<"flat">>` +#### `"flat"` No node hierarchy. It handles the standard PubSub case. -#### `<<"hometree">>` +#### `"hometree"` Uses the exact same features as the flat plugin but additionally organises nodes in a tree. Basically it follows a scheme similar to the filesystem's structure. Every user can create nodes in their own home root: e.g `/home/user`. Each node can contain items and/or sub-nodes. -#### `<<"pep">>` +#### `"pep"` Implementation of [XEP-0060 (Personal Eventing Protocol)](http://xmpp.org/extensions/xep-0163.html). In this case, items are not persisted but kept in an in-memory cache. When the `pep` plugin is enabled, a user can have their own node (exposed as their bare jid) with a common namespace. Requires module `mod_caps` to be enabled. -#### `<<"dag">>` +#### `"dag"` Implementation of [XEP-0248 (PubSub Collection Nodes)](https://xmpp.org/extensions/xep-0248.html). Every node takes a place in a collection and becomes either a collection node (and have only sub-nodes) or a leaf node (contains only items). -#### `<<"push">>` +#### `"push"` Special node type that may be used as a target node for [XEP-0357 (Push Notifications)](https://xmpp.org/extensions/xep-0357.html) capable services (e.g. `mod_event_pusher_push`). For each published notification, a hook `push_notification` is run. @@ -176,4 +264,3 @@ Metrics for these actions may be found under `mod_pubsub_db` subkey. | `get_subnodes` | Subnodes of a node are fetched. | | `get_subnodes_tree` | Full tree of subnodes of a node is fetched. | | `get_parentnodes_tree` | All parents of a node are fetched. | - diff --git a/doc/modules/mod_push_service_mongoosepush.md b/doc/modules/mod_push_service_mongoosepush.md index 7d85f6305b3..acc20f87fff 100644 --- a/doc/modules/mod_push_service_mongoosepush.md +++ b/doc/modules/mod_push_service_mongoosepush.md @@ -11,21 +11,41 @@ It must be defined in [outgoing_pools setting](../advanced-configuration/outgoin ### Options -* **pool_name** (atom, required) - name of the pool to use (as defined in `outgoing_pools`) -* **api_version** (string, default: `v3`) - REST API version to be used. -* **max_http_connections** (integer, default: 100) - the maximum amount of concurrent http connections +#### `modules.mod_push_service_mongoosepush.pool_name` +* **Syntax:** non-empty string +* **Default:** `"undefined"` +* **Example:** `pool_name = "mongoose_push_http"` + +The name of the pool to use (as defined in `outgoing_pools`). + +#### `modules.mod_push_service_mongoosepush.api_version` +* **Syntax:** string +* **Default:** `"v3"` +* **Example:** `api_version = "v3"` + +REST API version to be used. + +#### `modules.mod_push_service_mongoosepush.max_http_connections` +* **Syntax:** non-negative integer +* **Default:** `100` +* **Example:** `max_http_connections = 100` + +The maximum amount of concurrent HTTP connections. ### Example configuration -```Erlang -{outgoing_pools, - {http, global, mongoose_push_http, [], - [{server, "https://localhost:8443"}] - } -]}. - -{mod_push_service_mongoosepush, [ - {pool_name, mongoose_push_http} - {api_version, "v3"} -]}. +``` +[outgoing_pools.http.mongoose_push_http] + scope = "global" + workers = 50 + + [outgoing_pools.http.mongoose_push_http.connection] + host = "https://localhost:8443" + path_prefix = "/" + request_timeout = 2000 + +[modules.mod_push_service_mongoosepush] + pool_name = "mongoose_push_http" + api_version = "v3" + max_http_connections = 100 ``` diff --git a/doc/modules/mod_register.md b/doc/modules/mod_register.md index 7c162635df2..8b2358df55e 100644 --- a/doc/modules/mod_register.md +++ b/doc/modules/mod_register.md @@ -3,27 +3,75 @@ This module implements [XEP-0077: In-Band Registration](http://xmpp.org/extensio ### Options -* `iqdisc` (default: `one_queue`) -* `access` (atom, default: `all`): Defines which ACL should be used for checking if a chosen username is allowed for registration. -* `welcome_message` (`{Subject :: string(), Body :: string()}`, default: `{"", ""}`): Body and subject of a `` stanza sent to new users. -* `registration_watchers` (list of binaries, default: `[]`): List of JIDs, which should receive a `` notification about every successful registration. -* `password_strength` (non-negative integer, default: 0): Specifies minimal entropy of allowed password. - Entropy is measured with `ejabberd_auth:entropy/1`. - Recommended minimum is 32. - The entropy calculation algorithm is described in a section below. -* `ip_access` (list of `{deny|allow, StringIP|StringSubnet, default: `[]`): Access list for specified IPs or networks. - Default value allows registration from every IP. +#### `modules.mod_register.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"no_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). + +#### `modules.mod_register.access` +* **Syntax:** string, rule name or `"all"` +* **Default:** `"all"` +* **Example:** `access = "all"` + +Defines which [access rule](../../advanced-configuration/access#registration) should be used for checking if a chosen username is allowed for registration. + +#### `modules.mod_register.welcome_message` +* **Syntax:** TOML table with the following keys: `"body"`, `"subject"` and string values. +* **Default:** `{subject = "", body = ""}` +* **Example:** `welcome_message = {subject = "Hello from MIM!", body = "Message body."}` + +Body and subject of a `` stanza sent to new users. Only one of the fields (but non-empty) is mandatory for the message to be sent. + +#### `modules.mod_register.registration_watchers` +* **Syntax:** array of strings +* **Default:** `[]` +* **Example:** `registration_watchers = ["JID1", "JID2"]` + +List of JIDs, which should receive a `` notification about every successful registration. + +#### `modules.mod_register.password_strength` +* **Syntax:** non-negative integer +* **Default:** `0` +* **Example:** `password_strength = 32` + +Specifies minimal entropy of allowed password. +Entropy is measured with `ejabberd_auth:entropy/1`. +Recommended minimum is 32. +The entropy calculation algorithm is described in a section below. + +#### `modules.mod_register.ip_access` +* **Syntax:** Array of TOML tables with the following mandatory content: + - `address` - string, IP address + - `policy` - string, one of: `"allow"`, `"deny"`. +* **Default:** `[]` +* **Example:** `ip_access = [ + {address = "127.0.0.0/8", policy = "allow"}, +{address = "0.0.0.0/0", policy = "deny"} +]` + +Access list for specified IPs or networks. +Default value allows registration from every IP. ### Example configuration Allow registrations from localhost: ``` -{mod_register, [{allow, "127.0.0.1"}]} +[modules.mod_register] + welcome_message = {subject = "Hello from MIM!", body = "Message body."} + ip_access = [ + {address = "127.0.0.1", policy = "allow"} + ] + access = "register" ``` Deny registration from network 10.20.0.0 with mask 255.255.0.0. ``` -{mod_register, [{deny, "10.20.0.0/16"}]} +[modules.mod_register] + ip_access = [ + {address = "10.20.0.0/16", policy = "deny"} + ] ``` ### Metrics @@ -59,4 +107,3 @@ Where `X` is initially set to 0 and certain values are added if at least one of * `CamelCase`: ~51.3 * `lowUP1#:`: ~45.9 * `lowUP1#❤`: ~78 - diff --git a/doc/modules/mod_revproxy.md b/doc/modules/mod_revproxy.md index f60fe79793a..39b9498d1df 100644 --- a/doc/modules/mod_revproxy.md +++ b/doc/modules/mod_revproxy.md @@ -1,55 +1,41 @@ ### Module Description -MongooseIM can be used as a reverse proxy thanks to `mod_revproxy` module. -To enable this functionality, add a new entry to the listeners and modules sections in the mongooseim.cfg file. - -#### Setting up listener - -Add the following entry in the listeners section: - -```Erlang - { {8090, ejabberd_cowboy, [ - {num_acceptors, 10}, - {max_connections, 1024}, - {modules, [ - - %% Example usage of mod_revproxy, please note that mod_revproxy - %% needs to be included in MODULES as well. - {"_", "/[...]", mod_revproxy, [{timeout, 5000}, - % time limit for upstream to respond - {body_length, 8000000}, - % maximum body size (may be infinity) - {custom_headers, [{<<"header">>,<<"value">>}]} - % list of extra headers that are send to upstream - ]}, - - ]} - ]}, +MongooseIM can be used as a reverse proxy thanks to `mod_revproxy` module. +To enable this functionality, configure the appropriate listener and change the +module options in `mongooseim.toml`. +#### Configuring routes -``` +To define reverse proxy rules, add entries defining routes to `modules.mod_revproxy.routes`. -For more details about the listeners configuration please take a look at [ejabberd_cowboy section on Listeners config page](../advanced-configuration/Listener-modules.md#ejabberd_cowboy) +#### `modules.mod_revproxy.routes.host` +* **Syntax:** non-empty string +* **Default:** no default +* **Example:** `host = "www.erlang-solutions.com"` -#### Configuring routes +#### `modules.mod_revproxy.routes.path` +* **Syntax:** string +* **Default:** no default +* **Example:** `path = "/"` -To define reverse proxy rules, add the following entry to the modules section. +#### `modules.mod_revproxy.routes.method` +* **Syntax:** string +* **Default:** `"_"` +* **Example:** `method = "_"` -```Erlang -{mod_revproxy, - [{routes, [{"www.erlang-solutions.com", "/admin", "_", - "https://www.erlang-solutions.com/"}, - {":var.com", "/:var", "_", "http://localhost:8080/"}, - {":domain.com", "/", "_", "http://localhost:8080/:domain"}] - }]}, -``` +#### `modules.mod_revproxy.routes.upstream` +* **Syntax:** non-empty string +* **Default:** no default +* **Example:** `upstream = "https://www.erlang-solutions.com/"` -Routes are defined in the options of mod_revproxy module using either `{Host, Path, Method, Upstream}` or `{Host, Path, Upstream}`. -The latter one is the equivalent of `{Host, Path, "_", Upstream}`. -"_" can be used as a wildcard for `Host`, `Path` and `Method` and it matches on everything. +Routes are defined in the options of mod_revproxy module using `host`, `path`, +`method` and `upstream` keys. All except `method` are mandatory. +`"_"` can be used as a wildcard for `host`, `path` and `method` and it matches on everything. Upstreams can be defined either by host (just `http(s)://host:port`) or URI. -The difference between them is that the host upstreams are concatenated by the whole request path while the URI upstreams are concatenated only by the remainder that follows the matched `Path`. +The difference between them is that the host upstreams are concatenated by the +whole request path while the URI upstreams are concatenated only by the remainder +that follows the matched `path`. This behaviour is similar to the nginx's proxy_pass rules. Moreover, bindings may be used to match certain parts of host and/or path. @@ -62,3 +48,22 @@ For example, for the shown example configuration, requests for: * `Host: www.erlang-solutions.com /admin/resources/case-studies` will be rewritten to `https://www.erlang-solutions.com/resources/case-studies` (rule 1) * `Host: domain.com /domain/index.html` will be rewritten to `http://localhost:8080/index.html` (rule 2, since binding `:var` matches in both host and path) * `Host: abc.com /def` will be rewritten to `http://localhost:8080/abc/def` (rule 3) + +``` +[[modules.mod_revproxy.routes]] + host = "www.erlang-solutions.com" + path = "/admin" + method = "_" + upstream = "https://www.erlang-solutions.com/" + +[[modules.mod_revproxy.routes]] + host = ":var.com" + path = "/:var" + upstream = "http://localhost:8080/" + +[[modules.mod_revproxy.routes]] + host = ":domain.com" + path = "/" + method = "_" + upstream = "http://localhost:8080/:domain" +``` diff --git a/doc/modules/mod_roster.md b/doc/modules/mod_roster.md index ace4abb9185..037b4bbb97a 100644 --- a/doc/modules/mod_roster.md +++ b/doc/modules/mod_roster.md @@ -1,34 +1,60 @@ ### Module Description -The module implements roster support, specified in [RFC 6121](http://xmpp.org/rfcs/rfc6121.html). -Includes support for [XEP-0237: Roster Versioning](http://xmpp.org/extensions/xep-0237.html). +The module implements roster support, specified in [RFC 6121](http://xmpp.org/rfcs/rfc6121.html). +Includes support for [XEP-0237: Roster Versioning](http://xmpp.org/extensions/xep-0237.html). It can sometimes become quite a heavyweight feature, so there is an option to disable it. ### Options -* `iqdisc` (default: `one_queue`) -* `versioning` (boolean, default: `false`): Turn on/off support for Roster Versioning. -* `store_current_id` (boolean, default: `false`): Stores the last roster hash in DB (used in Roster Versioning). - Improves performance but should be disabled, when shared rosters are used. -* `backend` (atom, default: `mnesia`): Storage backend. - Currently `mnesia`, `rdbms` and `riak` are supported. +#### `modules.mod_roster.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** "one_queue" + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). + +#### `modules.mod_roster.versioning` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `versioning = true` + +Turn on/off support for Roster Versioning. + +#### `modules.mod_roster.store_current_id` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `store_current_id = true` + +Stores the last roster hash in DB (used in Roster Versioning). +Improves performance but should be disabled, when shared rosters are used. + +#### `modules.mod_roster.backend` +* **Syntax:** string, one of `"mnesia"`, `"rdbms"`, `"riak"` +* **Default:** `"mnesia"` +* **Example:** `backend = "mnesia"` ### Example configuration ``` -{mod_roster, [ - {versioning, true}, - {store_current_id, true} - ]} +[modules.mod_roster] + versioning = true + store_current_id = true ``` ##### Riak-specific options -* `bucket_type` (default `<<"rosters">>`) - Riak bucket type. +#### `modules.mod_roster.riak.bucket_type` +* **Syntax:** string +* **Default:** `"rosters"` +* **Example:** `riak.bucket_type = "rosters"` -* `version_bucket_type` (default `<<"roster_versions">>`) - Riak bucket type for versions information +#### `modules.mod_roster.riak.version_bucket_type` +* **Syntax:** string +* **Default:** `"roster_versions"` +* **Example:** `riak.version_bucket_type = "roster_versions"` ### Metrics -If you'd like to learn more about metrics in MongooseIM, please visit [MongooseIM metrics](../operation-and-maintenance/Mongoose-metrics.md) page. +If you'd like to learn more about metrics in MongooseIM, +please visit [MongooseIM metrics](../operation-and-maintenance/Mongoose-metrics.md) page. | Backend action | Description (when it gets incremented) | | ---- | -------------------------------------- | @@ -41,4 +67,3 @@ If you'd like to learn more about metrics in MongooseIM, please visit [MongooseI | `roster_subscribe_t` | A subscription status between users is updated inside a transaction. | | `update_roster_t` | A roster entry is updated in a transaction. | | `del_roster_t` | A roster entry is removed inside a transaction. | - diff --git a/doc/modules/mod_shared_roster_ldap.md b/doc/modules/mod_shared_roster_ldap.md index 05005adebdd..1493d61f92e 100644 --- a/doc/modules/mod_shared_roster_ldap.md +++ b/doc/modules/mod_shared_roster_ldap.md @@ -8,41 +8,139 @@ If it is not defined, XXX becomes the default value. ### Options: general -* `ldap_pool_tag`, `ldap_base`, `ldap_deref`: these options are the same as for the [LDAP authentication module](../authentication-backends/LDAP-authentication-module.md#configuration-options). +#### `modules.mod_shared_roster_ldap.ldap_pool_tag` +#### `modules.mod_shared_roster_ldap.ldap_base` +#### `modules.mod_shared_roster_ldap.ldap_deref` + +These 3 options are the same as for the [LDAP authentication module](../../authentication-methods/ldap#configuration-options). ### Options: attributes -* `ldap_groupattr` (string, default: `"cn"`): Provides a group name. -* `ldap_groupdesc` (string, default: value of `ldap_groupattr`): Provides a group description. -* `ldap_userdesc` (string, default: `"cn"`): Provides a human-readable user name. -* `ldap_useruid` (string, default: `"cn"`): Provides a username. -* `ldap_memberattr` (string, default: `"memberUid"`): Holds group members' IDs. -* `ldap_memberattr_format` (string, default: `"%u"`): Simple LDAP expression for extracting a user ID. -* `ldap_memberattr_format_re` (string, default: `""`): Allows extracting the user ID with a regular expression. +#### `modules.mod_shared_roster_ldap.ldap_groupattr` +* **Syntax:** string +* **Default:** `"cn"` +* **Example:** `ldap_groupattr = "cn"` + +Provides a group name. + +#### `modules.mod_shared_roster_ldap.ldap_groupdesc` +* **Syntax:** string +* **Default:** the value of `ldap_groupattr` +* **Example:** `ldap_groupdesc = "cn"` + +Provides a group description. + +#### `modules.mod_shared_roster_ldap.ldap_userdesc` +* **Syntax:** string +* **Default:** `"cn"` +* **Example:** `ldap_userdesc = "cn"` + +Provides a human-readable user name. + +#### `modules.mod_shared_roster_ldap.ldap_useruid` +* **Syntax:** string +* **Default:** `"cn"` +* **Example:** `ldap_useruid = "cn"` + +Provides a username. + +#### `modules.mod_shared_roster_ldap.ldap_memberattr` +* **Syntax:** string +* **Default:** `"memberUid"` +* **Example:** `ldap_memberattr = "memberUid"` + +Holds group members' IDs. + +#### `modules.mod_shared_roster_ldap.ldap_memberattr_format` +* **Syntax:** string +* **Default:** `"%u"` +* **Example:** `ldap_memberattr_format = "%u"` + +Simple LDAP expression for extracting a user ID. + +#### `modules.mod_shared_roster_ldap.ldap_memberattr_format_re` +* **Syntax:** string +* **Default:** `""` +* **Example:** `ldap_memberattr_format_re = ""` + +Allows extracting the user ID with a regular expression. ### Options: parameters -* `ldap_auth_check` (boolean, default: `true`): Enables checking if a shared roster entry actually exists in the XMPP database. -* `ldap_user_cache_validity` (integer, default: top-level/300): Specifies in seconds how long are the roster entries kept in the cache. -* `ldap_group_cache_validity` (integer, default: top-level/300): Specifies in seconds how long is the user's membership in a group kept in the cache . -* `ldap_user_cache_size` (integer, default: top-level/1000): Specifies how many shared roster items are kept in the cache. -* `ldap_group_cache_size` (integer, default: top-level/1000): Specifies how many roster group entries are kept in cache. +#### `modules.mod_shared_roster_ldap.ldap_auth_check` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `ldap_auth_check = true` + +Enables checking if a shared roster entry actually exists in the XMPP database. + +#### `modules.mod_shared_roster_ldap.ldap_user_cache_validity` +* **Syntax:** non-negative integer +* **Default:** top-level/`300` +* **Example:** `ldap_user_cache_validity = 300` + +Specifies in seconds how long are the roster entries kept in the cache. + +#### `modules.mod_shared_roster_ldap.ldap_group_cache_validity` +* **Syntax:** non-negative integer +* **Default:** top-level/`300` +* **Example:** `ldap_group_cache_validity = 300` + +Specifies in seconds how long is the user's membership in a group kept in the cache. + +#### `modules.mod_shared_roster_ldap.ldap_user_cache_size` +* **Syntax:** non-negative integer +* **Default:** top-level/`1000` +* **Example:** `ldap_user_cache_size = 1000` + +Specifies how many shared roster items are kept in the cache. + +#### `modules.mod_shared_roster_ldap.ldap_group_cache_size` +* **Syntax:** non-negative integer +* **Default:** top-level/`1000` +* **Example:** `ldap_group_cache_size = 1000` + +Specifies how many roster group entries are kept in cache. ### Options: LDAP filters -* `ldap_rfilter` (string, default: top-level/`""`): Used to find names of all shared roster groups. -* `ldap_gfilter` (string, default: top-level/`""`): Used for retrieving the human-readable name and the members of a group. -* `ldap_ufilter` (string, default: top-level/`""`): Used for retrieving the human-readable name of the roster entries. -* `ldap_filter` (string, default: top-level/`""`): Filter AND-ed with previous filters. +#### `modules.mod_shared_roster_ldap.ldap_rfilter` +* **Syntax:** string +* **Default:** top-level/`""` +* **Example:** `ldap_rfilter = "(objectClass=inetOrgPerson)"` + +Used to find names of all shared roster groups. + +#### `modules.mod_shared_roster_ldap.ldap_gfilter` +* **Syntax:** string +* **Default:** top-level/`""` +* **Example:** `ldap_gfilter = ""` + +Used for retrieving the human-readable name and the members of a group. + +#### `modules.mod_shared_roster_ldap.ldap_ufilter` +* **Syntax:** string +* **Default:** top-level/`""` +* **Example:** `ldap_ufilter = ""` + +Used for retrieving the human-readable name of the roster entries. + +#### `modules.mod_shared_roster_ldap.ldap_filter` +* **Syntax:** string +* **Default:** top-level/`""` +* **Example:** `ldap_filter = "(objectClass=inetOrgPerson)"` + +Filter AND-ed with previous filters. ### Example Configuration ``` -{mod_shared_roster_ldap, [ - {ldap_base, "ou=Users,dc=ejd,dc=com"}, - {ldap_groupattr, "ou"}, - {ldap_memberattr, "cn"},{ldap_userdesc, "cn"}, - {ldap_filter, "(objectClass=inetOrgPerson)"}, - {ldap_rfilter, "(objectClass=inetOrgPerson)"}, - {ldap_group_cache_validity, 1}, - {ldap_user_cache_validity, 1}]} +[modules.mod_shared_roster_ldap] + ldap_base = "ou=Users,dc=ejd,dc=com" + ldap_groupattr = "ou" + ldap_memberattr = "cn" + ldap_userdesc = "cn" + ldap_filter = "(objectClass=inetOrgPerson)" + ldap_rfilter = "(objectClass=inetOrgPerson)" + ldap_group_cache_validity = 1 + ldap_user_cache_validity = 1 ``` diff --git a/doc/modules/mod_sic.md b/doc/modules/mod_sic.md index 0f7ffd2e46c..de29299305c 100644 --- a/doc/modules/mod_sic.md +++ b/doc/modules/mod_sic.md @@ -3,11 +3,15 @@ This module implements [XEP-0279: Server IP Check](http://xmpp.org/extensions/xe ### Options -* `iqdisc` (default: `one_queue`) +#### `modules.mod_sic.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"one_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). ### Example Configuration ``` -{mod_sic, []} - +[modules.mod_sic] ``` diff --git a/doc/modules/mod_stream_management.md b/doc/modules/mod_stream_management.md index a983614507e..0a5f6f27689 100644 --- a/doc/modules/mod_stream_management.md +++ b/doc/modules/mod_stream_management.md @@ -7,30 +7,61 @@ while the management of the session tables and configuration is implemented in ### Options -* `buffer_max` (default: 100): Buffer size for messages yet to be acknowledged. -* `ack_freq` (default: 1): Frequency of ack requests sent from the server to the client, e.g. 1 - means a request after each stanza, 3 means a request after each 3 stanzas. -* `resume_timeout` (default: 600): Timeout for the session resumption. Sessions will be removed - after the specified number of seconds. -* `stale_h`: enable keeping old server's `` values after the resumption timed out. Defaults to - `[{enabled, false}]`. When enabled, parameters for the garbage collection of these tables should - be provided, for example as `[{enabled, true}, {stale_h_repeat_after, 1800}, {stale_h_geriatric, - 3600}]` — 1800 for `stale_h_repeat_after` and 3600 for `stale_h_geriatric` are the defaults. - - `stale_h_repeat_after`: How often the garbage collection will run in the background to clean this - table. Defaults to 1800 seconds (half an hour). - - `stale_h_geriatric`: The maximum lifespan of a record in memory. After this, they will be chased - for cleanup. Defaults to 3600 seconds (one hour). +#### `modules.mod_stream_management.buffer_max` +* **Syntax:** positive integer or string `"infinity"` or string `"no_buffer"` +* **Default:** `100` +* **Example:** `buffer_max = "no_buffer"` + +Buffer size for messages yet to be acknowledged. + +#### `modules.mod_stream_management.ack_freq` +* **Syntax:** positive integer or string `"never"` +* **Default:** `1` +* **Example:** `ack_freq = "never"` + +Frequency of ack requests sent from the server to the client, e.g. 1 means a request after each stanza, 3 means a request after each 3 stanzas. + +#### `modules.mod_stream_management.resume_timeout` +* **Syntax:** positive integer, value given in seconds +* **Default:** `600` +* **Example:** `resume_timeout = 600` + +Timeout for the session resumption. Sessions will be removed after the specified number of seconds. + +#### Stale_h options +Enables keeping old server's `` values after the resumption timed out. Disabled by default. When enabled, parameters for the garbage collection of these tables should be provided. + +#### `modules.mod_stream_management.stale_h.enabled` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `enabled = true` + +Enables `stale_h` configuration + +#### `modules.mod_stream_management.stale_h.repeat_after` +* **Syntax:** positive integer, value given in seconds +* **Default:** `1800` (half an hour) +* **Example:** `repeat_after = 1800` + +How often the garbage collection will run in the background to clean this table. + +#### `modules.mod_stream_management.stale_h.geriatric` +* **Syntax:** positive integer, value given in seconds +* **Default:** `3600` (one hour) +* **Example:** `geriatric = 3600` + +The maximum lifespan of a record in memory. After this, they will be chased for cleanup. ### Example Configuration ``` - {mod_stream_management, [{buffer_max, 30}, - {ack_freq, 1}, - {resume_timeout, 600} - {stale_h, [{enabled, true}, - {stale_h_repeat_after, 1800}, - {stale_h_geriatric, 3600}]} - ]}, +[modules.mod_stream_management] + buffer_max = 30 + ack_freq = 1 + resume_timeout = 600 + stale_h.enabled = true + stale_h.repeat_after = 1800 + stale_h.geriatric = 3600 ``` ### Implementation details diff --git a/doc/modules/mod_time.md b/doc/modules/mod_time.md index ed50eb54f9e..40d0f8e0470 100644 --- a/doc/modules/mod_time.md +++ b/doc/modules/mod_time.md @@ -6,10 +6,15 @@ Protocol is described under [XEP-0202: Entity Time](http://www.xmpp.org/extensio ### Options -* `iqdisc` (default: `one_queue`) +#### `modules.mod_time.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"one_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). ### Example Configuration ``` - {mod_time, []}, +[modules.mod_time] ``` diff --git a/doc/modules/mod_vcard.md b/doc/modules/mod_vcard.md index 6b0b211c2af..77c58b51d7d 100644 --- a/doc/modules/mod_vcard.md +++ b/doc/modules/mod_vcard.md @@ -3,47 +3,139 @@ This module provides support for vCards, as specified in [XEP-0054: vcard-temp]( ### Options -* `iqdisc` (default: `one_queue`) -* `host` (string, default: `"vjud.@HOST@"`): Domain of the vCard User Directory, used for searching. - `@HOST@` is replaced with the domain(s) supported by the cluster. -* `search` (boolean, default: `true`): Enables/disables the domain set in the previous option. - `false` makes searching for users impossible. -* `backend` (atom, default: `mnesia`): vCard storage backend. - Valid values are `ldap`, `rdbms`, `riak` and `mnesia`. - **Warning:** LDAP backend is read-only. -* `matches` (`inifnity` or positive integer, default: 30): Maxmimum search results to be returned to the user. +#### `modules.mod_vcard.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"no_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). + +#### `modules.mod_vcard.host` +* **Syntax:** string +* **Default:** `"vjud.@HOST@"` +* **Example:** `host = "vjud.@HOST@"` + +Domain of the vCard User Directory, used for searching. +`@HOST@` is replaced with the domain(s) supported by the cluster. + +#### `modules.mod_vcard.search` +* **Syntax:** boolean +* **Default:** `true` +* **Example:** `search = false` + +Enables/disables the domain set in the previous option. `false` makes searching for users impossible. + +#### `modules.mod_vcard.backend` +* **Syntax:** string, one of `"ldap"`, `"rdbms"`, `"riak"`, `"mnesia"` +* **Default:** `"mnesia"` +* **Example:** `backend = "rdbms"` + +vCard storage backend. **Warning:** LDAP backend is read-only. + +#### `modules.mod_vcard.matches` +* **Syntax:** non-negative integer or the string `"infinity"` +* **Default:** `30` +* **Example:** `matches = 10` + +Maximum search results to be returned to the user. ##### LDAP-specific options -* `ldap_pool_tag`, `ldap_base`, `ldap_uids`, `ldap_filter`, `ldap_deref`: - These options are the same as for the [LDAP authentication module](../authentication-backends/LDAP-authentication-module.md#configuration-options). +The following options are the same as for the [LDAP authentication module](../../authentication-methods/ldap#configuration-options): -* `ldap_vcard_map` (list of `{VCardField, LDAPPattern, LDAPField}`, default: see description): Mappings between VCard and LDAP fields. For the default setting, please see `[MongooseIM root]/src/mod_vcard_ldap.erl`, line 74. +* [`modules.mod_vcard.ldap_pool_tag`](../../authentication-methods/ldap#authldappool_tag) +* [`modules.mod_vcard.ldap_base`](../../authentication-methods/ldap#authldapbase) +* [`modules.mod_vcard.ldap_uids`](../../authentication-methods/ldap#authldapuids) +* [`modules.mod_vcard.ldap_filter`](../../authentication-methods/ldap#authldapfilter) +* [`modules.mod_vcard.ldap_deref`](../../authentication-methods/ldap#authldapderef) -* `ldap_search_fields` (list of `{SearchField, LDAPField}`, default: see description): Mappings between the human-readable search fields and LDAP fields. - For the default setting, please see `[MongooseIM root]/src/mod_vcard_ldap.erl`, line 96. +###### `modules.mod_vcard.ldap_vcard_map` +* **Syntax:** Array of TOML tables with the following keys: `"vcard_field"`, `"ldap_pattern"`, `"ldap_field"` and string values. +* **Default:** see description +* **Example:** `ldap_vcard_map = [{vcard_field = "FN", ldap_pattern = "%s", ldap_field = "displayName"}]` -* `ldap_search_reported` (list of `{SearchField, VCardField}`, default: see description): Mappings between the human-readable search fields and VCard fields. - For the default setting, please see `[MongooseIM root]/src/mod_vcard_ldap.erl`, line 109. +Mappings between VCard and LDAP fields. For the default settings, please see `[MongooseIM root]/src/mod_vcard_ldap.erl`, line 79. -* `ldap_search_operator` (`or` | `and`, default: `and`): A default operator used for search query items. +###### `modules.mod_vcard.ldap_search_fields` +* **Syntax:** Array of TOML tables with the following keys: `"search_field"`, `"ldap_field"` and string values. +* **Default:** see description +* **Example:** `ldap_search_fields = [{search_field = "User", ldap_field = "%u"}]` -* `ldap_binary_search_fields` (list of binaries, default: `[]`): A list of search fields, which values should be Base64-encoded by MongooseIM before sending to LDAP. +Mappings between the human-readable search fields and LDAP fields. +For the default settings, please see `[MongooseIM root]/src/mod_vcard_ldap.erl`, line 101. + +###### `modules.mod_vcard.ldap_search_reported` +* **Syntax:** Array of TOML tables with the following keys: `"search_field"`, `"vcard_field"` and string values. +* **Default:** see description +* **Example:** `ldap_search_reported = [{search_field = "Full Name", vcard_field = "FN"}]` + +Mappings between the human-readable search fields and VCard fields. +For the default settings, please see `[MongooseIM root]/src/mod_vcard_ldap.erl`, line 114. + +###### `modules.mod_vcard.ldap_search_operator` +* **Syntax:** string, one of `"or"`, `"and"` +* **Default:** `"and"` +* **Example:** `ldap_search_operator = "or"` + +A default operator used for search query items. + +###### `modules.mod_vcard.ldap_binary_search_fields` +* **Syntax:** array of strings +* **Default:** `[]` +* **Example:** `ldap_binary_search_fields = ["User", "Full Name"]` + +An array of search fields, which values should be Base64-encoded by MongooseIM before sending to LDAP. ##### Riak-specific options -* `bucket_type` (default `<<"vcard">>`) - Riak bucket type. +###### `modules.mod_vcard.riak.bucket_type` +* **Syntax:** string +* **Default:** `"vcard"` +* **Example:** `bucket_type = "vcard"` + +Riak bucket type. + +###### `modules.mod_vcard.riak.search_index` +* **Syntax:** string +* **Default:** `"vcard"` +* **Example:** `search_index = "vcard"` -* `search_index` (default `<<"vcard">>`) - Riak index name. +Riak index name. ### Example Configuration ``` -{mod_vcard, [ {allow_return_all, true}, - {search_all_hosts, true}, - {matches, 1}, - {search, true}, - {host, "directory.example.com"} - ]} +[modules.mod_vcard] + allow_return_all = true + search_all_hosts = true + matches = 1 + search = true + host = "directory.example.com" + + [[modules.mod_vcard.ldap_vcard_map]] + vcard_field = "FAMILY" + ldap_pattern = "%s" + ldap_field = "sn" + + [[modules.mod_vcard.ldap_vcard_map]] + vcard_field = "FN" + ldap_pattern = "%s" + ldap_field = "displayName" + + [[modules.mod_vcard.ldap_search_fields]] + search_field = "User" + ldap_field = "%u" + + [[modules.mod_vcard.ldap_search_fields]] + search_field = "Full Name" + ldap_field = "displayName" + + [[modules.mod_vcard.ldap_search_reported]] + search_field = "Full Name" + vcard_field = "FN" + + [[modules.mod_vcard.ldap_search_reported]] + search_field = "Given Name" + vcard_field = "FIRST" ``` ### Metrics diff --git a/doc/modules/mod_version.md b/doc/modules/mod_version.md index 26a7dc5c6b1..0f4ab04ab1a 100644 --- a/doc/modules/mod_version.md +++ b/doc/modules/mod_version.md @@ -4,11 +4,23 @@ This module provides the functionality specified in [XEP-0092: Software Version] ### Options -* `iqdisc` (default: `one_queue`) -* `os_info` (boolean, default: `false`): Determines wheter information about the operating system will be included. +#### `modules.mod_version.iqdisc.type` +* **Syntax:** string, one of `"one_queue"`, `"no_queue"`, `"queues"`, `"parallel"` +* **Default:** `"no_queue"` + +Strategy to handle incoming stanzas. For details, please refer to +[IQ processing policies](../../advanced-configuration/Modules/#iq-processing-policies). + +#### `modules.mod_version.os_info` +* **Syntax:** boolean +* **Default:** `false` +* **Example:** `os_info = true` + +Determines whether information about the operating system will be included. ### Example configuration ``` -{mod_version, [{os_info, true}]} +[modules.mod_version] + os_info = true ``` diff --git a/doc/operation-and-maintenance/System-Metrics-Privacy-Policy.md b/doc/operation-and-maintenance/System-Metrics-Privacy-Policy.md index 94492806e10..7c9f77410e6 100644 --- a/doc/operation-and-maintenance/System-Metrics-Privacy-Policy.md +++ b/doc/operation-and-maintenance/System-Metrics-Privacy-Policy.md @@ -69,13 +69,10 @@ To create a new Tracking ID, please follow the steps below: ## Example configuration New Tracking ID can be added to the list of options ``` -{service_mongoose_system_metrics, [ - report, - {intial_report, 300000}, - {periodic_report, 108000000}, - {tracking_id, UA-XXXX-Y} - ] -} +[services.service_mongoose_system_metrics] + initial_report = 300_000 + periodic_report = 10_800_000 + tracking_id = "UA-XXXX-Y" ``` For more details regarding service configuration, please see [Services](../advanced-configuration/Services.md) section. diff --git a/doc/user-guide/Getting-started.md b/doc/user-guide/Getting-started.md index 1349d16bede..1d8e3a6524b 100644 --- a/doc/user-guide/Getting-started.md +++ b/doc/user-guide/Getting-started.md @@ -148,7 +148,7 @@ mongooseimctl unregister dan localhost For a given user (`localuser` and `localserver`), add a contact (`user` and `server`): ```bash -mongooseimctl add_rosteritem localuser localserver user server nick group subs +mongooseimctl add_rosteritem localuser localserver user server nick group subs ``` Examples: ```bash @@ -160,7 +160,7 @@ Note: The `subs`parameter is the "subscription" to a user's presence. Possible v Verify the contact list: ```bash -mongooseimctl get_roster user host +mongooseimctl get_roster user host ``` Examples: ```bash @@ -171,9 +171,9 @@ mongooseimctl get_roster carol localhost ## Basic MongooseIM configuration -You can edit the `mongooseim.cfg` file: +You can edit the `mongooseim.toml` file: ```bash -/etc/mongooseim/mongooseim.cfg +/etc/mongooseim/mongooseim.toml ``` Warning: We recommend you do not touch the advanced settings at this stage. @@ -184,9 +184,9 @@ Save (and optionally backup, archive, or version) the configuration file and res ### Logging Set your own loglevel in the configuration file: -```erlang -% {loglevel, 3}. -{loglevel, 4}. +```toml +[general] + loglevel = "notice" ``` Save and exit your editor, restart MongooseIM and check your loglevel from the command line: @@ -208,12 +208,12 @@ Type `Ctrl+C` to exit. ### MUC (Multi-User Chat) for groupchats -Enable MUC, or Multi-User Chat, for groupchats/channels in the `mongooseim.cfg` file: -```erlang -{mod_muc, [{host, "muc.@HOST@"}, - {access, muc}, - {access_create, muc_create} - ]}, +Enable MUC, or Multi-User Chat, for groupchats/channels in the `mongooseim.toml` file: +```toml +[modules.mod_muc] + host = "muc.@HOST@" + access = "muc" + access_create = "muc_create" ``` Verify with: @@ -230,11 +230,10 @@ mongooseimctl print_flat_config | grep muc ### Roster versioning For faster contact list downloads at each client/app (re)connection, edit the configuration file: -```erlang -{mod_roster, [ - {versioning, true}, - {store_current_id, true} - ]}, +```toml +[modules.mod_roster] + versioning = true + store_current_id = true ``` Verify with: ```bash @@ -351,8 +350,8 @@ You can even run `mongooseimctl` without arguments for a list of available comma ### Summary: files -You know basic entries in files: -`/etc/mongooseim/mongooseim.cfg` +You know basic entries in the files: +`/etc/mongooseim/mongooseim.toml` `/var/log/mongooseim/mongooseim.log` ### Summary: client/app diff --git a/doc/user-guide/ICE_tutorial/mongooseim.cfg b/doc/user-guide/ICE_tutorial/mongooseim.cfg index 55e554e7490..654c07f7f75 100755 --- a/doc/user-guide/ICE_tutorial/mongooseim.cfg +++ b/doc/user-guide/ICE_tutorial/mongooseim.cfg @@ -646,7 +646,7 @@ %% After successful registration, the user receives %% a message with this subject and body. %% - {welcome_message, {""}}, + {welcome_message, {"", ""}}, %% %% When a user registers, send a notification to diff --git a/doc/user-guide/Jingle-SIP-setup.md b/doc/user-guide/Jingle-SIP-setup.md index 4344c7fbd7c..789bf6d73c9 100644 --- a/doc/user-guide/Jingle-SIP-setup.md +++ b/doc/user-guide/Jingle-SIP-setup.md @@ -89,25 +89,28 @@ Now we need to use this to update `/etc/hosts` file like below: At this point I assume that MongooseIM was built with `make rel`, that it is running and the current working directory is `_build/prod/rel/mongooseim`. Similar to Routr, MongooseIM also needs to know which hosts to server. -Please replace the default host defined in `etc/mongooseim.cfg`; the line: +Please replace the default host defined in `etc/mongooseim.toml`; the line: -```erlang -{hosts, ["localhost"] }. +```toml +[general] + hosts = ["localhost"] ``` should be changed to: -```erlang -{hosts, ["xmpp.example", "sip.example"] }. +```toml +[general] + hosts = ["xmpp.example", "sip.example"] ``` Now we need to enable `mod_jingle_sip`, please add the following line in modules list (somewhere around line 740 in the same file) -```erlang -{mod_jingle_sip, [{proxy_host, "sip.example"}]}, +```toml +[modules.mod_jingle_sip] + proxy_host = "sip.example" ``` -More details on MongooseIM configuration you can find in [Basic Configuration](../Basic-configuration.md) +More details on MongooseIM configuration you can find in [Configuration](../Advanced-configuration.md) and in [Modules configuration](../advanced-configuration/Modules.md) Now we are registering both users in MongooseIM by calling the following commands: diff --git a/doc/authentication-methods/client-certificate.md b/doc/user-guide/client-certificate.md similarity index 87% rename from doc/authentication-methods/client-certificate.md rename to doc/user-guide/client-certificate.md index 16dbd2d135c..cb801b0f4c7 100644 --- a/doc/authentication-methods/client-certificate.md +++ b/doc/user-guide/client-certificate.md @@ -33,27 +33,18 @@ The `SASL EXTERNAL` authentication method requires a digital client certificate. This digital certificate should contain `xmppAddr` field(s), which is always checked first. If there is more than one JID specified in the `xmppAddr` fields, the client must include the authorisation entity which corresponds to the one of the specified JIDs. -When no `xmppAddr` is specified, the `cn` (common name) field might be used to provide client's username, but it is optional (not enabled by default). -There are three possible ways of using the `SASL EXTERNAL` authentication -method. It can be configured by adding one of the following options to -the list of `auth_opts` in MongooseIM config file: - -* `{cyrsasl_external, standard}` - do not accept a certificate with no `xmpp_addrs` field (default); -* `{cyrsasl_external, use_common_name}` - use the `common_name` field if it is provided in the certificate; -* `{cyrsasl_external, allow_just_user_identity}` - accept a certificate if there are no `xmpp_addrs` -provided and use the user identity from the authentication request. - +When no `xmppAddr` is specified, the `cn` (common name) field might be used to provide client's username, but it is optional and can be configured with the [`sasl_external`](../advanced-configuration/auth.md#auth-sasl-external) option in the `auth` section. If the client certificate does not contain a JID, the client must provide one in authorisation entity. For the details please refer to [XEP-0178 Best Practices for Use of SASL EXTERNAL with Certificates](https://xmpp.org/extensions/xep-0178.html). -### Enable compatible authentication backend +### Enable compatible authentication method -You need to enable one of the following authentication backends by using the [`auth_method` option](../Advanced-configuration.md#authentication) in the MongooseIM configuration file. +You need to enable one of the following authentication methods by using the [`auth_method` option](../Advanced-configuration.md#authentication) in the MongooseIM configuration file. * `pki` - accepts user credentials, -* `http` - accepts user credentials if the provided certificate is [known and valid](../authentication-backends/HTTP-authentication-module.md#method-get_certs) +* `http` - accepts user credentials if the provided certificate is [known and valid](../../authentication-methods/http#method-get_certs) * `ldap` - accepts user credentials if a corresponding user account exists in LDAP. ### Self-signed certificates diff --git a/include/mongoose.hrl b/include/mongoose.hrl index a5c34abd2e8..858de563010 100644 --- a/include/mongoose.hrl +++ b/include/mongoose.hrl @@ -26,7 +26,7 @@ -define(MYNAME, hd(ejabberd_config:get_global_option(hosts))). -define(MYLANG, ejabberd_config:get_global_option(language)). --define(CONFIG_PATH, "etc/mongooseim.cfg"). +-define(CONFIG_PATH, "etc/mongooseim.toml"). -define(MONGOOSE_URI, <<"https://www.erlang-solutions.com/products/mongooseim.html">>). diff --git a/mkdocs.yml b/mkdocs.yml index f9d86b0e8a4..3fcd848f51f 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -15,6 +15,7 @@ pages: - 'Getting started': 'user-guide/Getting-started.md' - 'Release/Installation configuration': 'user-guide/release_config.md' - 'Bootstrap scripts': 'user-guide/Bootstrap-Scripts.md' + - 'Client certificate authentication': 'user-guide/client-certificate.md' - Tutorials: - 'How to Build MongooseIM from source code': 'user-guide/How-to-build.md' - 'How to Set up Push Notifications': 'user-guide/push-notifications/Push-notifications.md' @@ -35,26 +36,32 @@ pages: - 'Differentiators': 'Differentiators.md' - 'History': 'History.md' - 'Configuration': - - 'Basic Configuration': 'Basic-configuration.md' + - 'Configuration files': 'Advanced-configuration.md' + - 'Options: General': 'advanced-configuration/general.md' + - 'Options: Listen': 'advanced-configuration/listen.md' + - 'Options: Auth': 'advanced-configuration/auth.md' + - 'Options: Outgoing connections': 'advanced-configuration/outgoing-connections.md' + - 'Options: Services': 'advanced-configuration/Services.md' + - 'Options: Extension Modules': 'advanced-configuration/Modules.md' + - 'Options: Shaper': 'advanced-configuration/shaper.md' + - 'Options: Acl': 'advanced-configuration/acl.md' + - 'Options: Access': 'advanced-configuration/access.md' + - 'Options: S2S': 'advanced-configuration/s2s.md' + - 'Options: Host config': 'advanced-configuration/host_config.md' + - 'Release options': 'advanced-configuration/release-options.md' - 'Erlang Cookie Security': 'Erlang-cookie-security.md' - - 'Advanced Configuration': 'Advanced-configuration.md' - 'TLS hardening': 'advanced-configuration/TLS-hardening.md' - 'Database backends configuration': 'advanced-configuration/database-backends-configuration.md' - - 'Outgoing connections': 'advanced-configuration/outgoing-connections.md' - - 'Extension Modules': 'advanced-configuration/Modules.md' - - 'Listener Modules': 'advanced-configuration/Listener-modules.md' - - 'ACL': 'advanced-configuration/acl.md' - - 'Services': 'advanced-configuration/Services.md' - 'Authentication methods': - - 'Client Certificate': 'authentication-methods/client-certificate.md' - - 'Authentication backends': - - 'External Authentication Module': 'authentication-backends/External-authentication-module.md' - - 'HTTP Authentication Module': 'authentication-backends/HTTP-authentication-module.md' - - 'JWT Authentication Module': 'authentication-backends/JWT-authentication-module.md' - - 'LDAP Authentication Module': 'authentication-backends/LDAP-authentication-module.md' - - 'PKI Authentication Module': 'authentication-backends/PKI-authentication-module.md' - - 'Riak Authentication Module': 'authentication-backends/Riak-authentication-module.md' - - 'Dummy Authentication Module': 'authentication-backends/Dummy-authentication-module.md' + - 'RDBMS': 'authentication-methods/rdbms.md' + - 'External': 'authentication-methods/external.md' + - 'Anonymous': 'authentication-methods/anonymous.md' + - 'LDAP': 'authentication-methods/ldap.md' + - 'JWT': 'authentication-methods/jwt.md' + - 'Riak': 'authentication-methods/riak.md' + - 'HTTP': 'authentication-methods/http.md' + - 'PKI': 'authentication-methods/pki.md' + - 'Dummy': 'authentication-methods/dummy.md' - 'MongooseIM open XMPP extensions': - 'MUC light': 'open-extensions/muc_light.md' - 'Token-based reconnection': 'open-extensions/token-reconnection.md' diff --git a/rebar.config b/rebar.config index cb21ef414d0..d7e98335f04 100644 --- a/rebar.config +++ b/rebar.config @@ -83,7 +83,8 @@ %% trails and cowboy_swagger for generating of Swagger documentation {trails, "2.1.0"}, - {cowboy_swagger, "2.2.0"} + {cowboy_swagger, "2.2.0"}, + {tomerl, "0.4.0"} ]}. {relx, [{release, { mongooseim, {cmd, "cat VERSION | tr -d '\r\n'"} }, @@ -132,20 +133,20 @@ ]}. {profiles, [ {prod, [{relx, [ {dev_mode, false}, - {overlay_vars, "rel/vars.config"}, - {overlay, [{template, "rel/files/mongooseim.cfg", "etc/mongooseim.cfg"}]} ]}, + {overlay_vars, "rel/vars-toml.config"}, + {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}, {erl_opts, [{d, 'PROD_NODE'}]} ]}, %% development nodes - {mim1, [{relx, [ {overlay_vars, ["rel/vars.config", "rel/mim1.vars.config"]}, - {overlay, [{template, "rel/files/mongooseim.cfg", "etc/mongooseim.cfg"}]} ]}]}, - {mim2, [{relx, [ {overlay_vars, ["rel/vars.config", "rel/mim2.vars.config"]}, - {overlay, [{template, "rel/files/mongooseim.cfg", "etc/mongooseim.cfg"}]} ]}]}, - {mim3, [{relx, [ {overlay_vars, ["rel/vars.config", "rel/mim3.vars.config"]}, - {overlay, [{template, "rel/files/mongooseim.cfg", "etc/mongooseim.cfg"}]} ]}]}, - {fed1, [{relx, [ {overlay_vars, ["rel/vars.config", "rel/fed1.vars.config"]}, - {overlay, [{template, "rel/files/mongooseim.cfg", "etc/mongooseim.cfg"}]} ]}]}, - {reg1, [{relx, [ {overlay_vars, ["rel/vars.config", "rel/reg1.vars.config"]}, - {overlay, [{template, "rel/files/mongooseim.cfg", "etc/mongooseim.cfg"}]} ]}]} + {mim1, [{relx, [ {overlay_vars, ["rel/vars-toml.config", "rel/mim1.vars-toml.config"]}, + {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, + {mim2, [{relx, [ {overlay_vars, ["rel/vars-toml.config", "rel/mim2.vars-toml.config"]}, + {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, + {mim3, [{relx, [ {overlay_vars, ["rel/vars-toml.config", "rel/mim3.vars-toml.config"]}, + {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, + {fed1, [{relx, [ {overlay_vars, ["rel/vars-toml.config", "rel/fed1.vars-toml.config"]}, + {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]}, + {reg1, [{relx, [ {overlay_vars, ["rel/vars-toml.config", "rel/reg1.vars-toml.config"]}, + {overlay, [{template, "rel/files/mongooseim.toml", "etc/mongooseim.toml"}]} ]}]} ]}. {plugins, diff --git a/rebar.lock b/rebar.lock index 777fd5c9cb9..334aefd2360 100644 --- a/rebar.lock +++ b/rebar.lock @@ -164,6 +164,7 @@ {ref,"a1117a2571f40a3f1f3da286ba54d76cbbca9fa7"}}, 0}, {<<"tirerl">>,{pkg,<<"tirerl">>,<<"1.1.0">>},0}, + {<<"tomerl">>,{pkg,<<"tomerl">>,<<"0.4.0">>},0}, {<<"trails">>,{pkg,<<"trails">>,<<"2.1.0">>},0}, {<<"unicode_util_compat">>,{pkg,<<"unicode_util_compat">>,<<"0.5.0">>},1}, {<<"usec">>, @@ -217,6 +218,7 @@ {<<"ssl_verify_fun">>, <<"28A4D65B7F59893BC2C7DE786DEC1E1555BD742D336043FE644AE956C3497FBE">>}, {<<"stringprep">>, <<"BF962FE2A4D01298D220B6474689755103F703942A043908CA6CD323E8FA0947">>}, {<<"tirerl">>, <<"A80B45AED46342A6985FD52DEBB32BA26214EEF5C43EE2AE88BFBF815FA4F9F8">>}, + {<<"tomerl">>, <<"279393D94115F7944688B3CFEE8BD97AF7D1606AC40472D4DC2DA2EB376C9311">>}, {<<"trails">>, <<"D51D7F065F2B692654C611C663BFAC377D47EAFF9D38D0C17ECD031D1A59399A">>}, {<<"unicode_util_compat">>, <<"8516502659002CEC19E244EBD90D312183064BE95025A319A6C7E89F4BCCD65B">>}, {<<"uuid">>, <<"280014F8FF57FCE36EE6E91C3ECF21CBFCE78AAE9854C09597BB4C11E27B66D6">>}, diff --git a/rel/fed1.vars-toml.config b/rel/fed1.vars-toml.config new file mode 100644 index 00000000000..17ca364f056 --- /dev/null +++ b/rel/fed1.vars-toml.config @@ -0,0 +1,44 @@ +{node_name, "fed1@localhost"}. + +{c2s_port, 5242}. +{incoming_s2s_port, 5299}. +{http_port, 5282}. +{https_port, 5287}. +{http_api_endpoint_port, 5294}. +{http_api_old_endpoint_port, 5293}. +{http_api_client_endpoint_port, 8095}. + +%% This node is for s2s testing. +%% "localhost" host should NOT be defined. +{hosts, "\"fed1\""}. + +{s2s_addr, "[[s2s.address]] + host = \"localhost\" + ip_address = \"127.0.0.1\" + [[s2s.address]] + host = \"pubsub.localhost\" + ip_address = \"127.0.0.1\" + [[s2s.address]] + host = \"muc.localhost\" + ip_address = \"127.0.0.1\" + [[s2s.address]] + host = \"localhost.bis\" + ip_address = \"127.0.0.1\""}. +{s2s_default_policy, "\"allow\""}. +{highload_vm_args, ""}. +{listen_service, ""}. + +{tls_config, "tls.certfile = \"priv/ssl/fake_server.pem\" + tls.mode = \"starttls\" + tls.ciphers = \"ECDHE-RSA-AES256-GCM-SHA384\""}. +{secondary_c2s, ""}. + +{http_api_old_endpoint, "ip_address = \"127.0.0.1\" + port = {{ http_api_old_endpoint_port }}"}. +{http_api_endpoint, "ip_address = \"127.0.0.1\" + port = {{ http_api_endpoint_port }}"}. +{http_api_client_endpoint, "port = {{ http_api_client_endpoint_port }}"}. + +{c2s_dhfile, "tls.dhfile = \"priv/ssl/fake_dh_server.pem\""}. +{s2s_dhfile, "tls.dhfile = \"priv/ssl/fake_dh_server.pem\""}. + diff --git a/rel/files/mongooseim b/rel/files/mongooseim index 78bc8f5d7da..f7ca0c5abc8 100755 --- a/rel/files/mongooseim +++ b/rel/files/mongooseim @@ -18,7 +18,7 @@ EJABBERD_STATUS_PATH="{{mongooseim_status_dir}}/status" export EJABBERD_STATUS_PATH="$EJABBERD_STATUS_PATH" EJABBERD_SO_PATH=`ls -dt "$RUNNER_BASE_DIR"/lib/mongooseim-*/priv/lib | head -1` -EJABBERD_CONFIG_PATH="$RUNNER_ETC_DIR"/mongooseim.cfg +EJABBERD_CONFIG_PATH="$RUNNER_ETC_DIR"/mongooseim.${MONGOOSEIM_CONFIG_FORMAT:-toml} export EJABBERD_SO_PATH export EJABBERD_CONFIG_PATH diff --git a/rel/files/mongooseim.cfg b/rel/files/mongooseim.cfg index 2588f4159a8..82f1380d293 100755 --- a/rel/files/mongooseim.cfg +++ b/rel/files/mongooseim.cfg @@ -119,15 +119,13 @@ [ %% BOSH and WS endpoints over HTTP { {{{cowboy_port}}}, ejabberd_cowboy, [ - {num_acceptors, 10}, - {transport_options, [{max_connections, 1024}]}, + {transport_options, [{max_connections, 1024}, {num_acceptors, 10}]}, {modules, [ {"_", "/http-bind", mod_bosh}, {"_", "/ws-xmpp", mod_websockets, [{ejabberd_service, [ {access, all}, {shaper_rule, fast}, - {ip, {127, 0, 0, 1}}, {password, "secret"}]} %% Uncomment to enable connection dropping or/and server-side pings %{timeout, 600000}, {ping_rate, 2000} @@ -164,8 +162,7 @@ %% BOSH and WS endpoints over HTTPS { {{{cowboy_secure_port}}}, ejabberd_cowboy, [ - {num_acceptors, 10}, - {transport_options, [{max_connections, 1024}]}, + {transport_options, [{max_connections, 1024}, {num_acceptors, 10}]}, {{{https_config}}} {modules, [ {"_", "/http-bind", mod_bosh}, @@ -185,16 +182,14 @@ %% At least start it on different port which will be hidden behind firewall { {{{http_api_endpoint}}} , ejabberd_cowboy, [ - {num_acceptors, 10}, - {transport_options, [{max_connections, 1024}]}, + {transport_options, [{max_connections, 1024}, {num_acceptors, 10}]}, {modules, [ {"localhost", "/api", mongoose_api_admin, []} ]} ]}, { {{{http_api_client_endpoint}}} , ejabberd_cowboy, [ - {num_acceptors, 10}, - {transport_options, [{max_connections, 1024}]}, + {transport_options, [{max_connections, 1024}, {num_acceptors, 10}]}, {protocol_options, [{compress, true}]}, {{{https_config}}} {modules, [ @@ -206,7 +201,7 @@ {"_", "/api/rooms/:id/users/[:user]", mongoose_client_api_rooms_users, []}, {"_", "/api/rooms/[:id]/messages", mongoose_client_api_rooms_messages, []}, %% Swagger - {"_", "/api-docs", cowboy_swagger_redirect_handler, {priv_file, cowboy_swagger, "swagger/index.html"}}, + {"_", "/api-docs", cowboy_swagger_redirect_handler, #{}}, {"_", "/api-docs/swagger.json", cowboy_swagger_json_handler, #{}}, {"_", "/api-docs/[...]", cowboy_static, {priv_dir, cowboy_swagger, "swagger", [{mimetypes, cow_mimetypes, all}]}} ]} @@ -215,8 +210,7 @@ %% Following HTTP API is deprected, the new one abouve should be used instead { {{{http_api_old_endpoint}}} , ejabberd_cowboy, [ - {num_acceptors, 10}, - {transport_options, [{max_connections, 1024}]}, + {transport_options, [{max_connections, 1024}, {num_acceptors, 10}]}, {modules, [ {"localhost", "/api", mongoose_api, [{handlers, [mongoose_api_metrics, mongoose_api_users]}]} @@ -698,13 +692,12 @@ {{{mod_amp}}} {mod_disco, [{users_can_see_hidden_services, false}]}, % {mod_extdisco, [ -% {stun, [ +% [{type, stun}, % {host, "127.0.0.1"}, % {port, "3478"}, % {transport, "udp"}, % {username, "username"}, -% {password, "secret"} -% ]} +% {password, "secret"}] % ]}, {mod_commands, []}, {mod_muc_commands, []}, @@ -752,7 +745,7 @@ %% After successful registration, the user receives %% a message with this subject and body. %% - {welcome_message, {""}}, + {welcome_message, {"", ""}}, %% %% When a user registers, send a notification to diff --git a/rel/files/mongooseim.toml b/rel/files/mongooseim.toml new file mode 100644 index 00000000000..55354140bab --- /dev/null +++ b/rel/files/mongooseim.toml @@ -0,0 +1,329 @@ +[general] + loglevel = "warning" + hosts = [{{{hosts}}}] + registration_timeout = "infinity" + language = "en" + all_metrics_are_global = {{{all_metrics_are_global}}} + sm_backend = {{{sm_backend}}} + max_fsm_queue = 1000 + {{{http_server_name}}} + {{{rdbms_server_type}}} + +[[listen.http]] + port = {{{http_port}}} + transport.num_acceptors = 10 + transport.max_connections = 1024 + + [[listen.http.handlers.mod_bosh]] + host = "_" + path = "/http-bind" + + [[listen.http.handlers.mod_websockets]] + host = "_" + path = "/ws-xmpp" + + [listen.http.handlers.mod_websockets.service] + access = "all" + shaper_rule = "fast" + password = "secret" + +[[listen.http]] + port = {{{https_port}}} + transport.num_acceptors = 10 + transport.max_connections = 1024 + {{{https_config}}} + + [[listen.http.handlers.mod_bosh]] + host = "_" + path = "/http-bind" + + [[listen.http.handlers.mod_websockets]] + host = "_" + path = "/ws-xmpp" + +[[listen.http]] + {{{http_api_endpoint}}} + transport.num_acceptors = 10 + transport.max_connections = 1024 + + [[listen.http.handlers.mongoose_api_admin]] + host = "localhost" + path = "/api" + +[[listen.http]] + {{{http_api_client_endpoint}}} + transport.num_acceptors = 10 + transport.max_connections = 1024 + protocol.compress = true + {{{https_config}}} + + [[listen.http.handlers.lasse_handler]] + host = "_" + path = "/api/sse" + module = "mongoose_client_api_sse" + + [[listen.http.handlers.mongoose_client_api_messages]] + host = "_" + path = "/api/messages/[:with]" + + [[listen.http.handlers.mongoose_client_api_contacts]] + host = "_" + path = "/api/contacts/[:jid]" + + [[listen.http.handlers.mongoose_client_api_rooms]] + host = "_" + path = "/api/rooms/[:id]" + + [[listen.http.handlers.mongoose_client_api_rooms_config]] + host = "_" + path = "/api/rooms/[:id]/config" + + [[listen.http.handlers.mongoose_client_api_rooms_users]] + host = "_" + path = "/api/rooms/:id/users/[:user]" + + [[listen.http.handlers.mongoose_client_api_rooms_messages]] + host = "_" + path = "/api/rooms/[:id]/messages" + + [[listen.http.handlers.cowboy_swagger_redirect_handler]] + host = "_" + path = "/api-docs" + + [[listen.http.handlers.cowboy_swagger_json_handler]] + host = "_" + path = "/api-docs/swagger.json" + + [[listen.http.handlers.cowboy_static]] + host = "_" + path = "/api-docs/[...]" + type = "priv_dir" + app = "cowboy_swagger" + content_path = "swagger" + +[[listen.http]] + {{{http_api_old_endpoint}}} + transport.num_acceptors = 10 + transport.max_connections = 1024 + + [[listen.http.handlers.mongoose_api]] + host = "localhost" + path = "/api" + handlers = ["mongoose_api_metrics", "mongoose_api_users"] + +[[listen.c2s]] + port = {{{c2s_port}}} + {{{tls_config}}} + {{{tls_module}}} + {{{proxy_protocol}}} + {{{zlib}}} + access = "c2s" + shaper = "c2s_shaper" + max_stanza_size = 65536 + {{{c2s_dhfile}}} + +{{{secondary_c2s}}} + +[[listen.s2s]] + port = {{{incoming_s2s_port}}} + shaper = "s2s_shaper" + max_stanza_size = 131072 + {{{s2s_dhfile}}} + +{{{listen_service}}} + +[auth] + {{{auth_ldap}}} + methods = [{{{auth_method}}}] + {{{password_format}}} + {{{scram_iterations}}} + sasl_external = [{{{cyrsasl_external}}}] + {{{sasl_mechanisms}}} + +{{{outgoing_pools}}} +#[outgoing_pools.redis.global_distrib] +# scope = "single_host" +# host = "localhost" +# workers = 10 +# +#[outgoing_pools.rdbms.default] +# scope = "global" +# workers = 5 +# +# [outgoing_pools.rdbms.default.connection] +# driver = "pgsql" +# host = "localhost" +# database = "ejabberd" +# username = "ejabberd" +# password = "mongooseim_secret" +# tls.required = true +# tls.verify_peer = true +# tls.cacertfile = "priv/ssl/cacert.pem" +# tls.server_name_indication = false + +[services.service_admin_extra] + submods = ["node", "accounts", "sessions", "vcard", "gdpr", "upload", + "roster", "last", "private", "stanza", "stats"] + +[services.service_mongoose_system_metrics] + initial_report = 300_000 + periodic_report = 10_800_000 + +[modules.mod_adhoc] + +{{{mod_amp}}} + +[modules.mod_disco] + users_can_see_hidden_services = false + +[modules.mod_commands] + +[modules.mod_muc_commands] + +[modules.mod_muc_light_commands] + +{{{mod_last}}} + +[modules.mod_stream_management] + +{{{mod_offline}}} + +{{{mod_privacy}}} + +{{{mod_blocking}}} + +{{{mod_private}}} + +[modules.mod_register] + welcome_message = {body = "", subject = ""} + ip_access = [ + {address = "127.0.0.0/8", policy = "allow"}, + {address = "0.0.0.0/0", policy = "deny"} + ] + access = "register" + +{{{mod_roster}}} + +[modules.mod_sic] + +{{{mod_vcard}}} + +[modules.mod_bosh] + +[modules.mod_carboncopy] + +{{{mod_http_notification}}} + +[shaper.normal] + max_rate = 1000 + +[shaper.fast] + max_rate = 50_000 + +[shaper.mam_shaper] + max_rate = 1 + +[shaper.mam_global_shaper] + max_rate = 1000 + +[acl] + local = [ + {user_regexp = ""} + ] + +[access] + max_user_sessions = [ + {acl = "all", value = 10} + ] + + max_user_offline_messages = [ + {acl = "admin", value = 5000}, + {acl = "all", value = 100} + ] + + local = [ + {acl = "local", value = "allow"} + ] + + c2s = [ + {acl = "blocked", value = "deny"}, + {acl = "all", value = "allow"} + ] + + c2s_shaper = [ + {acl = "admin", value = "none"}, + {acl = "all", value = "normal"} + ] + + s2s_shaper = [ + {acl = "all", value = "fast"} + ] + + muc_admin = [ + {acl = "admin", value = "allow"} + ] + + muc_create = [ + {acl = "local", value = "allow"} + ] + + muc = [ + {acl = "all", value = "allow"} + ] + + register = [ + {acl = "all", value = "allow"} + ] + + mam_set_prefs = [ + {acl = "all", value = "default"} + ] + + mam_get_prefs = [ + {acl = "all", value = "default"} + ] + + mam_lookup_messages = [ + {acl = "all", value = "default"} + ] + + mam_set_prefs_shaper = [ + {acl = "all", value = "mam_shaper"} + ] + + mam_get_prefs_shaper = [ + {acl = "all", value = "mam_shaper"} + ] + + mam_lookup_messages_shaper = [ + {acl = "all", value = "mam_shaper"} + ] + + mam_set_prefs_global_shaper = [ + {acl = "all", value = "mam_global_shaper"} + ] + + mam_get_prefs_global_shaper = [ + {acl = "all", value = "mam_global_shaper"} + ] + + mam_lookup_messages_global_shaper = [ + {acl = "all", value = "mam_global_shaper"} + ] + +[s2s] + {{{s2s_use_starttls}}} + {{{s2s_certfile}}} + default_policy = {{{s2s_default_policy}}} + outgoing.port = {{{outgoing_s2s_port}}} + + {{{s2s_addr}}} + +{{{host_config}}} +#[[host_config]] +# host = "anonymous.localhost" +# +# [host_config.auth] +# methods = ["anonymous"] +# anonymous.allow_multiple_connections = true +# anonymous.protocol = "both" diff --git a/rel/mim1.vars-toml.config b/rel/mim1.vars-toml.config new file mode 100644 index 00000000000..3333698b47d --- /dev/null +++ b/rel/mim1.vars-toml.config @@ -0,0 +1,65 @@ +{c2s_tls_port, 5223}. +{outgoing_s2s_port, 5299}. +{service_port, 8888}. +{kicking_service_port, 8666}. +{hidden_service_port, 8189}. + +{hosts, "\"localhost\", + \"anonymous.localhost\", + \"localhost.bis\" + "}. +{host_config, + "[[host_config]] + host = \"anonymous.localhost\" + + [host_config.auth] + methods = [\"anonymous\"] + anonymous.allow_multiple_connections = true + anonymous.protocol = \"both\""}. +{password_format, "password.format = \"scram\" + password.hash = [\"sha256\"]"}. +{scram_iterations, "scram_iterations = 64"}. +{s2s_addr, "[[s2s.address]] + host = \"fed1\" + ip_address = \"127.0.0.1\""}. +{s2s_default_policy, "\"allow\""}. + +% Disable highload args to save memory for dev builds +{highload_vm_args, ""}. + +{secondary_c2s, + "[[listen.c2s]] + port = {{ c2s_tls_port }} + zlib = 4096 + access = \"c2s\" + shaper = \"c2s_shaper\" + max_stanza_size = 65536"}. +{listen_service, + "[[listen.service]] + port = {{ service_port }} + access = \"all\" + shaper_rule = \"fast\" + ip_address = \"127.0.0.1\" + password = \"secret\" + +[[listen.service]] + port = {{ kicking_service_port }} + access = \"all\" + conflict_behaviour = \"kick_old\" + shaper_rule = \"fast\" + ip_address = \"127.0.0.1\" + password = \"secret\" + +[[listen.service]] + port = {{ hidden_service_port }} + access = \"all\" + hidden_components = true + shaper_rule = \"fast\" + ip_address = \"127.0.0.1\" + password = \"secret\""}. + +{mod_amp, "[modules.mod_amp]"}. +{mod_private, "[modules.mod_private]"}. +{zlib, "zlib = 10_000"}. +{c2s_dhfile, "tls.dhfile = \"priv/ssl/fake_dh_server.pem\""}. +{s2s_dhfile, "tls.dhfile = \"priv/ssl/fake_dh_server.pem\""}. diff --git a/rel/mim1.vars.config b/rel/mim1.vars.config index bcbea5b22a3..af6e631dcf1 100644 --- a/rel/mim1.vars.config +++ b/rel/mim1.vars.config @@ -23,7 +23,8 @@ {host_config, "{host_config, \"anonymous.localhost\", [{auth_method, anonymous}, {allow_multiple_connections, true}, - {anonymous_protocol, both}]}." }. + {anonymous_protocol, both}, + {auth_opts, []}]}." }. {sm_backend, "{mnesia, []}"}. {auth_method, "internal"}. {password_format, "{password_format, {scram, [sha256]}}"}. diff --git a/rel/mim2.vars-toml.config b/rel/mim2.vars-toml.config new file mode 100644 index 00000000000..977272ed647 --- /dev/null +++ b/rel/mim2.vars-toml.config @@ -0,0 +1,55 @@ +{node_name, "ejabberd2@localhost"}. + +{c2s_port, 5232}. +{c2s_tls_port, 5233}. +{incoming_s2s_port, 5279}. +{http_port, 5281}. +{https_port, 5286}. +{http_api_old_endpoint_port, 5289}. +{http_api_endpoint_port, 8090}. +{http_api_client_endpoint_port, 8091}. +{service_port, 8899}. + +{hosts, "\"localhost\", + \"anonymous.localhost\", + \"localhost.bis\" + "}. +{s2s_addr, "[[s2s.address]] + host = \"localhost2\" + ip_address = \"127.0.0.1\""}. +{s2s_default_policy, "\"allow\""}. +{highload_vm_args, ""}. + +{http_api_old_endpoint, "ip_address = \"127.0.0.1\" + port = {{ http_api_old_endpoint_port }}"}. +{http_api_endpoint, "ip_address = \"127.0.0.1\" + port = {{ http_api_endpoint_port }}"}. +{http_api_client_endpoint, "port = {{ http_api_client_endpoint_port }}"}. + +{tls_config, "tls.certfile = \"priv/ssl/fake_server.pem\" + tls.mode = \"starttls\" + tls.ciphers = \"ECDHE-RSA-AES256-GCM-SHA384\""}. + +{secondary_c2s, + "[[listen.c2s]] + port = {{ c2s_tls_port }} + zlib = 4096 + access = \"c2s\" + shaper = \"c2s_shaper\" + max_stanza_size = 65536 + tls.certfile = \"priv/ssl/fake_server.pem\" + tls.mode = \"tls\" + tls.ciphers = \"ECDHE-RSA-AES256-GCM-SHA384\""}. +{listen_service, + "[[listen.service]] + port = {{ service_port }} + access = \"all\" + shaper_rule = \"fast\" + ip_address = \"127.0.0.1\" + password = \"secret\""}. +{all_metrics_are_global, "true"}. + +{http_server_name, "http_server_name = \"Classified\""}. + +{c2s_dhfile, "tls.dhfile = \"priv/ssl/fake_dh_server.pem\""}. +{s2s_dhfile, "tls.dhfile = \"priv/ssl/fake_dh_server.pem\""}. diff --git a/rel/mim3.vars-toml.config b/rel/mim3.vars-toml.config new file mode 100644 index 00000000000..68fdb938ef1 --- /dev/null +++ b/rel/mim3.vars-toml.config @@ -0,0 +1,54 @@ +{node_name, "mongooseim3@localhost"}. + +{c2s_port, 5262}. +{c2s_tls_port, 5263}. +{outgoing_s2s_port, 5295}. +{incoming_s2s_port, 5291}. +{http_port, 5283}. +{https_port, 5290}. +{http_api_old_endpoint_port, 5292}. +{http_api_endpoint_port, 8092}. +{http_api_client_endpoint_port, 8093}. + +{hosts, "\"localhost\", + \"anonymous.localhost\", + \"localhost.bis\""}. + +{s2s_addr, "[[s2s.address]] + host = \"localhost2\" + ip_address = \"127.0.0.1\""}. +{s2s_default_policy, "\"allow\""}. +{highload_vm_args, ""}. +{listen_service, ""}. +{mod_http_notification, "[modules.mod_http_notification]"}. + +{tls_config, "tls.certfile = \"priv/ssl/fake_server.pem\" + tls.mode = \"starttls\" + tls.ciphers = \"ECDHE-RSA-AES256-GCM-SHA384\""}. + +{secondary_c2s, + "[[listen.c2s]] + port = {{ c2s_tls_port }} + zlib = 4096 + access = \"c2s\" + shaper = \"c2s_shaper\" + max_stanza_size = 65536 + tls.certfile = \"priv/ssl/fake_server.pem\" + tls.mode = \"tls\" + tls.module = \"just_tls\" + + [[listen.c2s.tls.ciphers]] + cipher = \"aes_256_gcm\" + key_exchange = \"ecdhe_rsa\" + mac = \"aead\" + prf = \"sha384\""}. + +{http_api_old_endpoint, "ip_address = \"127.0.0.1\" + port = {{ http_api_old_endpoint_port }}"}. +{http_api_endpoint, "ip_address = \"127.0.0.1\" + port = {{ http_api_endpoint_port }}"}. +{http_api_client_endpoint, "port = {{ http_api_client_endpoint_port }}"}. + +{c2s_dhfile, "tls.dhfile = \"priv/ssl/fake_dh_server.pem\""}. +{s2s_dhfile, "tls.dhfile = \"priv/ssl/fake_dh_server.pem\""}. + diff --git a/rel/reg1.vars-toml.config b/rel/reg1.vars-toml.config new file mode 100644 index 00000000000..2b82df4229f --- /dev/null +++ b/rel/reg1.vars-toml.config @@ -0,0 +1,46 @@ +{node_name, "reg1@localhost"}. + +{c2s_port, 5252}. +{incoming_s2s_port, 5298}. +{http_port, 5272}. +{https_port, 5277}. +{service_port, 9990}. +{http_api_endpoint_port, 8074}. +{http_api_old_endpoint_port, 5273}. +{http_api_client_endpoint_port, 8075}. + +%% This node is for global distribution testing. +%% reg is short for region. +%% Both local and global hosts should be defined. +%% "localhost" is a global host. +%% "reg1" is a local host. +{hosts, "\"reg1\", \"localhost\""}. +{s2s_addr, "[[s2s.address]] + host = \"localhost\" + ip_address = \"127.0.0.1\" + + [[s2s.address]] + host = \"localhost.bis\" + ip_address = \"127.0.0.1\""}. +{s2s_default_policy, "\"allow\""}. +{listen_service, "[[listen.service]] + port = {{ service_port }} + access = \"all\" + shaper_rule = \"fast\" + ip_address = \"127.0.0.1\" + password = \"secret\""}. + +{tls_config, "tls.certfile = \"priv/ssl/fake_server.pem\" + tls.mode = \"starttls\" + tls.ciphers = \"ECDHE-RSA-AES256-GCM-SHA384\""}. +{secondary_c2s, ""}. + +{http_api_old_endpoint, "ip_address = \"127.0.0.1\" + port = {{ http_api_old_endpoint_port }}"}. +{http_api_endpoint, "ip_address = \"127.0.0.1\" + port = {{ http_api_endpoint_port }}"}. +{http_api_client_endpoint, "port = {{ http_api_client_endpoint_port }}"}. + +{c2s_dhfile, "tls.dhfile = \"priv/ssl/fake_dh_server.pem\""}. +{s2s_dhfile, "tls.dhfile = \"priv/ssl/fake_dh_server.pem\""}. + diff --git a/rel/vars-toml.config.in b/rel/vars-toml.config.in new file mode 100644 index 00000000000..c2e1b81dbe9 --- /dev/null +++ b/rel/vars-toml.config.in @@ -0,0 +1,64 @@ +{node_name, "mongooseim@localhost"}. + +{c2s_port, 5222}. +{outgoing_s2s_port, 5269}. +{incoming_s2s_port, 5269}. +{http_port, 5280}. +{https_port, 5285}. + +% vm.args +{highload_vm_args, "+P 10000000 -env ERL_MAX_PORTS 250000"}. + +% TOML config +{hosts, "\"localhost\""}. +{host_config, ""}. +{auth_ldap, ""}. +{s2s_addr, ""}. +{s2s_default_policy, "\"deny\""}. +{mod_amp, ""}. +{listen_service, "[[listen.service]] + port = 8888 + access = \"all\" + shaper_rule = \"fast\" + ip_address = \"127.0.0.1\" + password = \"secret\""}. +{mod_last, "[modules.mod_last]"}. +{mod_offline, "[modules.mod_offline] + access_max_user_messages = \"max_user_offline_messages\""}. +{mod_privacy, "[modules.mod_privacy]"}. +{mod_blocking, "[modules.mod_blocking]"}. +{mod_private, "[modules.mod_private]"}. +{mod_roster, "[modules.mod_roster]"}. +{mod_vcard, "[modules.mod_vcard] + host = \"vjud.@HOST@\""}. +{sm_backend, "\"mnesia\""}. +{auth_method, "\"internal\""}. +{cyrsasl_external, "\"standard\""}. +{tls_config, "tls.certfile = \"priv/ssl/fake_server.pem\" + tls.mode = \"starttls\""}. +{tls_module, ""}. +{https_config, "tls.certfile = \"priv/ssl/fake_cert.pem\" + tls.keyfile = \"priv/ssl/fake_key.pem\" + tls.password = \"\""}. +{zlib, ""}. +{outgoing_pools, ""}. +{http_api_old_endpoint, "ip_address = \"127.0.0.1\" + port = 5288"}. +{http_api_endpoint, "ip_address = \"127.0.0.1\" + port = 8088"}. +{http_api_client_endpoint, "port = 8089"}. +{s2s_use_starttls, "use_starttls = \"optional\""}. +{s2s_certfile, "certfile = \"priv/ssl/fake_server.pem\""}. +{sasl_mechanisms, ""}. +{all_metrics_are_global, "false"}. + +%% Defined in Makefile by appending configure.vars.config +%% Uncomment for manual release generation. +%{mongooseim_runner_user, ""}. +%{mongooseim_script_dir, "$(cd ${0%/*} && pwd)"}. +%{mongooseim_etc_dir, "$RUNNER_BASE_DIR/etc"}. +%{mongooseim_log_dir, "log"}. +%{mongooseim_mdb_dir, "$RUNNER_BASE_DIR/Mnesia.$NODE"}. +%{mongooseim_mdb_dir_toggle, "%"}. +%{mongooseim_lock_dir, "$EJABBERD_DIR/var/lock"}. +%{mongooseim_nodetool_etc_dir, "etc"}. diff --git a/src/auth/ejabberd_auth.erl b/src/auth/ejabberd_auth.erl index e40bd729f22..5bdcdb90757 100644 --- a/src/auth/ejabberd_auth.erl +++ b/src/auth/ejabberd_auth.erl @@ -55,7 +55,8 @@ -export([check_digest/4]). --export([auth_modules/1]). +-export([auth_modules/1, + auth_methods/1]). %% Library functions for reuse in ejabberd_auth_* modules -export([authorize_with_check_password/2]). @@ -591,10 +592,14 @@ auth_modules() -> %% Return the list of authenticated modules for a given host -spec auth_modules(Server :: jid:lserver()) -> [authmodule()]. auth_modules(LServer) -> - Method = ejabberd_config:get_local_option({auth_method, LServer}), - Methods = get_auth_method_as_a_list(Method), + Methods = auth_methods(LServer), [list_to_atom("ejabberd_auth_" ++ atom_to_list(M)) || M <- Methods]. +-spec auth_methods(jid:lserver()) -> [atom()]. +auth_methods(LServer) -> + Method = ejabberd_config:get_local_option({auth_method, LServer}), + get_auth_method_as_a_list(Method). + get_auth_method_as_a_list(undefined) -> []; get_auth_method_as_a_list(AuthMethod) when is_list(AuthMethod) -> AuthMethod; get_auth_method_as_a_list(AuthMethod) when is_atom(AuthMethod) -> [AuthMethod]. diff --git a/src/backend_module.erl b/src/backend_module.erl index e0aaf683c4a..6625d0cfc1a 100644 --- a/src/backend_module.erl +++ b/src/backend_module.erl @@ -27,6 +27,7 @@ -author('konrad.zemek@erlang-solutions.com'). -export([create/2, create/3]). +-export([backend_module/2]). %% Callback implemented by proxy modules. -callback backend() -> module(). diff --git a/src/config/mongoose_config_parser.erl b/src/config/mongoose_config_parser.erl index 6d2a27a9239..86466d58089 100644 --- a/src/config/mongoose_config_parser.erl +++ b/src/config/mongoose_config_parser.erl @@ -1,27 +1,33 @@ -%% @doc Pure config logic. -%% No ets table manipulations, no Mnesia, no starting modules, no file reading here. -%% Everything here is safe, side effect free. -%% OK, logging is possible, but keep it to minimum. +%% @doc Parsing and processing of MongooseIM config files +%% - parser backends: 'cfg' and 'toml' +%% - config state management -module(mongoose_config_parser). --export([parse_terms/1]). --export([check_hosts/2]). --export([can_be_ignored/1]). --export([is_not_host_specific/1]). --export([allow_override_all/1, +%% parser API +-export([parse_file/1]). + +%% state API +-export([new_state/0, + allow_override_all/1, allow_override_local_only/1, + override/2, + override_global/1, + override_local/1, + override_acls/1, + set_opts/2, + set_hosts/2, + get_opts/1, state_to_opts/1, state_to_host_opts/1, state_to_global_opt/3, state_to_required_files/1, can_override/2]). -%% for unit tests --export([group_host_changes/1]). +%% config post-processing +-export([dedup_state_opts/1, + add_dep_modules/1]). -%% Support for 'include_config_file' --export([config_filenames_to_include/1, - include_config_files/2]). +-callback parse_file(FileName :: string()) -> state(). -include("mongoose.hrl"). -include("ejabberd_config.hrl"). @@ -46,469 +52,76 @@ override_global = false :: boolean(), override_acls = false :: boolean()}). --type host() :: any(). % TODO: specify this +-type host() :: jid:server(). -type state() :: #state{}. --type macro() :: {macro_key(), macro_value()}. - -%% The atom must have all characters in uppercase. --type macro_key() :: atom(). - --type macro_value() :: term(). - --type known_term() :: override_global - | override_local - | override_acls - | {acl, _, _} - | {alarms, _} - | {access, _, _} - | {shaper, _, _} - | {host, _} - | {hosts, _} - | {host_config, _, _} - | {listen, _} - | {language, _} - | {sm_backend, _} - | {outgoing_s2s_port, integer()} - | {outgoing_s2s_options, _, integer()} - | {{s2s_addr, _}, _} - | {s2s_dns_options, [tuple()]} - | {s2s_use_starttls, integer()} - | {s2s_certfile, _} - | {domain_certfile, _, _} - | {node_type, _} - | {cluster_nodes, _} - | {registration_timeout, integer()} - | {mongooseimctl_access_commands, list()} - | {loglevel, _} - | {max_fsm_queue, _} - | {sasl_mechanisms, _} - | host_term(). - --type host_term() :: {acl, _, _} - | {access, _, _} - | {shaper, _, _} - | {host, _} - | {hosts, _}. - --spec search_hosts_and_pools({host|hosts, [host()] | host()} , state()) -> any(). -search_hosts_and_pools({host, Host}, State) -> - search_hosts_and_pools({hosts, [Host]}, State); -search_hosts_and_pools({hosts, Hosts}, State=#state{hosts = []}) -> - add_hosts_to_option(Hosts, State); -search_hosts_and_pools({hosts, Hosts}, #state{hosts = OldHosts}) -> - ?LOG_ERROR(#{what => too_many_hosts_definitions, - new_hosts => Hosts, old_hosts => OldHosts}), - exit(#{issue => "too many hosts definitions", - new_hosts => Hosts, - old_hosts => OldHosts}); -search_hosts_and_pools(_Term, State) -> - State. - --spec add_hosts_to_option(Hosts :: [host()], - State :: state()) -> state(). -add_hosts_to_option(Hosts, State) -> - PrepHosts = normalize_hosts(Hosts), - add_option(hosts, PrepHosts, State#state{hosts = PrepHosts}). - --spec normalize_hosts([host()]) -> [binary() | tuple()]. -normalize_hosts(Hosts) -> - normalize_hosts(Hosts, []). - - -normalize_hosts([], PrepHosts) -> - lists:reverse(PrepHosts); -normalize_hosts([Host | Hosts], PrepHosts) -> - case jid:nodeprep(host_to_binary(Host)) of - error -> - ?LOG_ERROR(#{what => invalid_hostname_in_config, hostname => Host}), - erlang:error(#{issue => invalid_hostname, - hostname => Host}); - PrepHost -> - normalize_hosts(Hosts, [PrepHost | PrepHosts]) - end. - -host_to_binary(Host) -> - unicode:characters_to_binary(Host). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%% Support for Macro - -%% @doc Replace the macros with their defined values. --spec replace_macros(Terms :: [term()]) -> [term()]. -replace_macros(Terms) -> - {TermsOthers, Macros} = split_terms_macros(Terms), - replace(TermsOthers, Macros). - - -%% @doc Split Terms into normal terms and macro definitions. --spec split_terms_macros(Terms :: [term()]) -> {[term()], [macro()]}. -split_terms_macros(Terms) -> - lists:foldl(fun split_terms_macros_fold/2, {[], []}, Terms). - --spec split_terms_macros_fold(any(), Acc) -> Acc when - Acc :: {[term()], [{Key :: any(), Value :: any()}]}. -split_terms_macros_fold({define_macro, Key, Value} = Term, {TOs, Ms}) -> - case is_macro_name(Key) of - true -> - {TOs, Ms ++ [{Key, Value}]}; - false -> - exit({macro_not_properly_defined, Term}) - end; -split_terms_macros_fold(Term, {TOs, Ms}) -> - {TOs ++ [Term], Ms}. - - -%% @doc Recursively replace in Terms macro usages with the defined value. --spec replace(Terms :: [term()], - Macros :: [macro()]) -> [term()]. -replace([], _) -> - []; -replace([Term | Terms], Macros) -> - [replace_term(Term, Macros) | replace(Terms, Macros)]. - -replace_term(Key, Macros) when is_atom(Key) -> - case is_macro_name(Key) of - true -> - case proplists:get_value(Key, Macros) of - undefined -> exit({undefined_macro, Key}); - Value -> Value - end; - false -> - Key - end; -replace_term({use_macro, Key, Value}, Macros) -> - proplists:get_value(Key, Macros, Value); -replace_term(Term, Macros) when is_list(Term) -> - replace(Term, Macros); -replace_term(Term, Macros) when is_tuple(Term) -> - List = tuple_to_list(Term), - List2 = replace(List, Macros), - list_to_tuple(List2); -replace_term(Term, _) -> - Term. - -%% Check is the term is a config macro --spec is_macro_name(atom()) -> boolean(). -is_macro_name(Atom) when is_atom(Atom) -> - is_all_uppercase(Atom) andalso has_any_uppercase(Atom); -is_macro_name(_) -> - false. - --spec is_all_uppercase(atom()) -> boolean(). -is_all_uppercase(Atom) -> - String = erlang:atom_to_list(Atom), - lists:all(fun(C) when C >= $a, C =< $z -> false; - (_) -> true - end, String). - -has_any_uppercase(Atom) -> - String = erlang:atom_to_list(Atom), - lists:any(fun(C) when C >= $A, C =< $Z -> true; - (_) -> false - end, String). - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%% Process terms - --spec process_term(Term :: known_term(), - State :: state()) -> state(). -process_term(Term, State) -> - case Term of - override_global -> - State#state{override_global = true}; - override_local -> - State#state{override_local = true}; - override_acls -> - State#state{override_acls = true}; - {acl, _ACLName, _ACLData} -> - process_host_term(Term, global, State); - {alarms, Env} -> - add_option(alarms, Env, State); - {access, _RuleName, _Rules} -> - process_host_term(Term, global, State); - {shaper, _Name, _Data} -> - %%lists:foldl(fun(Host, S) -> process_host_term(Term, Host, S) end, - %% State, State#state.hosts); - process_host_term(Term, global, State); - {host, _Host} -> - State; - {hosts, _Hosts} -> - State; - {host_config, Host, Terms} -> - lists:foldl(fun(T, S) -> - process_host_term(T, list_to_binary(Host), S) end, - State, Terms); - {listen, Listeners} -> - Listeners2 = - lists:map( - fun({PortIP, Module, Opts}) -> - {Port, IPT, _, _, Proto, OptsClean} = - ejabberd_listener:parse_listener_portip(PortIP, Opts), - {{Port, IPT, Proto}, Module, OptsClean} - end, - Listeners), - add_option(listen, Listeners2, State); - {language, Val} -> - add_option(language, list_to_binary(Val), State); - {sm_backend, Val} -> - add_option(sm_backend, Val, State); - {rdbms_server_type, Val} -> - add_option(rdbms_server_type, Val, State); - {outgoing_s2s_port, Port} -> - add_option(outgoing_s2s_port, Port, State); - {outgoing_s2s_options, Methods, Timeout} -> - add_option(outgoing_s2s_options, {Methods, Timeout}, State); - {{s2s_addr, Host}, Addr} -> - add_option({s2s_addr, list_to_binary(Host)}, Addr, State); - {{global_distrib_addr, Host}, Addr} -> - add_option({global_distrib_addr, list_to_binary(Host)}, Addr, State); - {s2s_dns_options, PropList} -> - add_option(s2s_dns_options, PropList, State); - {s2s_use_starttls, Port} -> - add_option(s2s_use_starttls, Port, State); - {s2s_ciphers, Ciphers} -> - add_option(s2s_ciphers, Ciphers, State); - {s2s_certfile, CertFile} -> - State2 = compact_global_option(required_files, [CertFile], State), - add_option(s2s_certfile, CertFile, State2); - {domain_certfile, Domain, CertFile} -> - State2 = compact_global_option(required_files, [CertFile], State), - add_option({domain_certfile, Domain}, CertFile, State2); - {node_type, NodeType} -> - add_option(node_type, NodeType, State); - {cluster_nodes, Nodes} -> - add_option(cluster_nodes, Nodes, State); - {watchdog_admins, Admins} -> - add_option(watchdog_admins, Admins, State); - {watchdog_large_heap, LH} -> - add_option(watchdog_large_heap, LH, State); - {registration_timeout, Timeout} -> - add_option(registration_timeout, Timeout, State); - {mongooseimctl_access_commands, ACs} -> - add_option(mongooseimctl_access_commands, ACs, State); - {routing_modules, Mods} -> - add_option(routing_modules, Mods, State); - {loglevel, Loglevel} -> - add_option(loglevel, Loglevel, State); - {max_fsm_queue, N} -> - add_option(max_fsm_queue, N, State); - {sasl_mechanisms, Mechanisms} -> - add_option(sasl_mechanisms, Mechanisms, State); - {all_metrics_are_global, Value} -> - add_option(all_metrics_are_global, Value, State); - {cowboy_server_name, Value} -> - add_option(cowboy_server_name, Value, State); - {services, Value} -> - add_option(services, Value, State); - {_Opt, _Val} -> - lists:foldl(fun(Host, S) -> process_host_term(Term, Host, S) end, - State, State#state.hosts) - end. - --spec process_host_term(Term :: host_term(), - Host :: acl:host(), - State :: state()) -> state(). -process_host_term(Term, Host, State) -> - case Term of - {acl, ACLName, ACLData} -> - OptRec = acl:to_record(Host, ACLName, ACLData), - append_option(OptRec, State); - {access, RuleName, Rules} -> - append_global_opt({access, RuleName, Host}, Rules, State); - {shaper, Name, Data} -> - append_global_opt({shaper, Name, Host}, Data, State); - {host, Host} -> - State; - {hosts, _Hosts} -> - State; - {outgoing_pools, Pools} when is_list(Pools) -> - add_option(outgoing_pools, Pools, State); - {node_specific_options, NodeOpts} -> - add_option(node_specific_options, NodeOpts, State); - {Opt, Val} -> - add_option({Opt, Host}, Val, State) - end. - --spec add_option(Opt :: key(), - Val :: value(), - State :: state()) -> state(). -add_option(Opt, Val, State) -> - Table = opt_table(Opt), - add_option(Table, Opt, Val, State). - -add_option(config, Opt, Val, State) -> - append_global_opt(Opt, Val, State); -add_option(local_config, {{add, OptName}, Host}, Val, State) -> - compact_option({OptName, Host}, Val, State); -add_option(local_config, Opt, Val, State) -> - append_local_opt(Opt, Val, State). - -append_global_opt(OptName, OptValue, State) -> - OptRec = #config{key = OptName, value = OptValue}, - append_option(OptRec, State). - -append_local_opt(OptName, OptValue, State) -> - OptRec = #local_config{key = OptName, value = OptValue}, - append_option(OptRec, State). - -append_option(OptRec, State = #state{opts = Opts}) -> - State#state{ opts = [OptRec | Opts] }. - -%% Merges two values of a local option -compact_option(Opt, Val, State) -> - Opts2 = compact(Opt, Val, State#state.opts, []), - State#state{opts = Opts2}. +%% Parser API -compact({OptName, Host} = Opt, Val, [], Os) -> - %% The option is defined for host using host_config before the global option. - %% The host_option can be overwritten. - %% TODO or maybe not. We need a test. - ?LOG_WARNING(#{what => host_config_option_can_be_overwritten, - text => <<"define global options before host options">>, - option_name => OptName, host => Host}), - [#local_config{key = Opt, value = Val}] ++ Os; -%% Traverse the list of the options already parsed -compact(Opt, Val, [#local_config{key = Opt, value = OldVal} | Os1], Os2) -> - %% If the key of a local_config matches the Opt that wants to be added - OptRec = #local_config{key = Opt, value = Val ++ OldVal}, - %% Then prepend the new value to the list of old values - Os2 ++ [OptRec] ++ Os1; -compact(Opt, Val, [O | Os1], Os2) -> - compact(Opt, Val, Os1, Os2 ++ [O]). - - -%% Merges two values of a global option -compact_global_option(Opt, Val, State) when is_list(Val) -> - Opts2 = compact_global(Opt, Val, State#state.opts, []), - State#state{opts = Opts2}. - -compact_global(Opt, Val, [], Os) -> - [#config{key = Opt, value = Val}] ++ Os; -%% Traverse the list of the options already parsed -compact_global(Opt, Val, [#config{key = Opt, value = OldVal} | Os1], Os2) -> - %% If the key of a local_config matches the Opt that wants to be added - OptRec = #config{key = Opt, value = Val ++ OldVal}, - %% Then prepend the new value to the list of old values - Os2 ++ [OptRec] ++ Os1; -compact_global(Opt, Val, [O | Os1], Os2) -> - compact_global(Opt, Val, Os1, Os2 ++ [O]). +-spec parse_file(FileName :: string()) -> state(). +parse_file(FileName) -> + ParserModule = parser_module(filename:extension(FileName)), + ParserModule:parse_file(FileName). +parser_module(".toml") -> mongoose_config_parser_toml; +parser_module(".cfg") -> mongoose_config_parser_cfg. -opt_table(Opt) -> - case is_global_option(Opt) of - true -> - config; - false -> - local_config - end. - -is_global_option(Opt) -> - lists:member(Opt, global_options()). - -global_options() -> - [ - hosts, - language, - sm_backend, - node_specific_options - ]. - - -%%-------------------------------------------------------------------- -%% Configuration parsing -%%-------------------------------------------------------------------- - --spec parse_terms(term()) -> state(). -parse_terms(Terms) -> - State = just_parse_terms(Terms), - State2 = dedup_state_opts(State), - add_dep_modules(State2). - -just_parse_terms(Terms) -> - State = lists:foldl(fun search_hosts_and_pools/2, #state{}, Terms), - TermsWExpandedMacros = replace_macros(Terms), - lists:foldl(fun process_term/2, State, TermsWExpandedMacros). - --spec check_hosts([jid:server()], [jid:server()]) -> - {[jid:server()], [jid:server()]}. -check_hosts(NewHosts, OldHosts) -> - Old = sets:from_list(OldHosts), - New = sets:from_list(NewHosts), - ListToAdd = sets:to_list(sets:subtract(New, Old)), - ListToDel = sets:to_list(sets:subtract(Old, New)), - {ListToDel, ListToAdd}. - - --spec can_be_ignored(Key :: atom() | tuple()) -> boolean(). -can_be_ignored(Key) when is_atom(Key); - is_tuple(Key) -> - L = [domain_certfile, s2s, all_metrics_are_global, rdbms], - lists:member(Key, L). - -% group values which can be grouped like rdbms ones --spec group_host_changes([term()]) -> [{atom(), [term()]}]. -group_host_changes(Changes) when is_list(Changes) -> - D = lists:foldl(fun(#local_config{key = {Key, Host}, value = Val}, Dict) -> - BKey = atom_to_binary(Key, utf8), - case get_key_group(BKey, Key) of - Key -> - dict:append({Key, Host}, Val, Dict); - NewKey -> - dict:append({NewKey, Host}, {Key, Val}, Dict) - end - end, dict:new(), Changes), - [{Group, lists:sort(lists:flatten(MaybeDeepList))} - || {Group, MaybeDeepList} <- dict:to_list(D)]. - - --spec is_not_host_specific(atom() - | {atom(), jid:server()} - | {atom(), atom(), atom()}) -> boolean(). -is_not_host_specific(Key) when is_atom(Key) -> - true; -is_not_host_specific({Key, Host}) when is_atom(Key), is_binary(Host) -> - false; -is_not_host_specific({Key, PoolType, PoolName}) - when is_atom(Key), is_atom(PoolType), is_atom(PoolName) -> - true. - --spec get_key_group(binary(), atom()) -> atom(). -get_key_group(<<"ldap_", _/binary>>, _) -> - ldap; -get_key_group(<<"rdbms_", _/binary>>, _) -> - rdbms; -get_key_group(<<"pgsql_", _/binary>>, _) -> - rdbms; -get_key_group(<<"auth_", _/binary>>, _) -> - auth; -get_key_group(<<"ext_auth_", _/binary>>, _) -> - auth; -get_key_group(<<"s2s_", _/binary>>, _) -> - s2s; -get_key_group(_, Key) when is_atom(Key) -> - Key. - -%% ----------------------------------------------------------------- %% State API -%% ----------------------------------------------------------------- +-spec new_state() -> state(). +new_state() -> + #state{}. + +-spec allow_override_all(state()) -> state(). allow_override_all(State = #state{}) -> State#state{override_global = true, override_local = true, override_acls = true}. +-spec allow_override_local_only(state()) -> state(). allow_override_local_only(State = #state{}) -> State#state{override_global = false, override_local = true, override_acls = false}. +-spec override(Scope :: atom(), state()) -> state(). +override(global, State) -> override_global(State); +override(local, State) -> override_local(State); +override(acls, State) -> override_acls(State). + +-spec override_global(state()) -> state(). +override_global(State) -> + State#state{override_global = true}. + +-spec override_local(state()) -> state(). +override_local(State) -> + State#state{override_local = true}. + +-spec override_acls(state()) -> state(). +override_acls(State) -> + State#state{override_acls = true}. + +-spec set_opts(Opts :: list(), state()) -> state(). +set_opts(Opts, State) -> + State#state{opts = Opts}. + +-spec set_hosts([host()], state()) -> state(). +set_hosts(Hosts, State) -> + State#state{hosts = Hosts}. + +-spec get_opts(state()) -> list(). +get_opts(State) -> + State#state.opts. + +%% @doc Final getter - reverses the accumulated options. +-spec state_to_opts(state()) -> list(). state_to_opts(#state{opts = Opts}) -> lists:reverse(Opts). +-spec state_to_host_opts(state()) -> [host()]. state_to_host_opts(#state{hosts = Hosts}) -> Hosts. +-spec can_override(global | local | acls, state()) -> boolean(). can_override(global, #state{override_global = Override}) -> Override; can_override(local, #state{override_local = Override}) -> @@ -516,6 +129,7 @@ can_override(local, #state{override_local = Override}) -> can_override(acls, #state{override_acls = Override}) -> Override. +-spec state_to_global_opt(OptName :: atom(), state(), Default :: any()) -> any(). state_to_global_opt(OptName, State, Default) -> Opts = state_to_opts(State), opts_to_global_opt(Opts, OptName, Default). @@ -533,90 +147,9 @@ opts_to_global_opt([_|Opts], OptName, Default) -> opts_to_global_opt([], _OptName, Default) -> Default. +%% Config post-processing -%% ----------------------------------------------------------------- -%% Support for 'include_config_file' -%% ----------------------------------------------------------------- - -config_filenames_to_include([{include_config_file, Filename} | Terms]) -> - [Filename|config_filenames_to_include(Terms)]; -config_filenames_to_include([{include_config_file, Filename, _Options} | Terms]) -> - [Filename|config_filenames_to_include(Terms)]; -config_filenames_to_include([_Other | Terms]) -> - config_filenames_to_include(Terms); -config_filenames_to_include([]) -> - []. - -include_config_files(Terms, Configs) -> - include_config_files(Terms, Configs, []). - -include_config_files([], _Configs, Res) -> - Res; -include_config_files([{include_config_file, Filename} | Terms], Configs, Res) -> - include_config_files([{include_config_file, Filename, []} | Terms], - Configs, Res); -include_config_files([{include_config_file, Filename, Options} | Terms], - Configs, Res) -> - IncludedTerms = find_plain_terms_for_file(Filename, Configs), - Disallow = proplists:get_value(disallow, Options, []), - IncludedTerms2 = delete_disallowed(Disallow, IncludedTerms), - AllowOnly = proplists:get_value(allow_only, Options, all), - IncludedTerms3 = keep_only_allowed(AllowOnly, IncludedTerms2), - include_config_files(Terms, Configs, Res ++ IncludedTerms3); -include_config_files([Term | Terms], Configs, Res) -> - include_config_files(Terms, Configs, Res ++ [Term]). - -find_plain_terms_for_file(Filename, Configs) -> - case lists:keyfind(Filename, 1, Configs) of - false -> - %% Terms were not provided by caller for this file - erlang:error({config_not_found, Filename}); - {Filename, Terms} -> - Terms - end. - -%% @doc Filter from the list of terms the disallowed. -%% Returns a sublist of Terms without the ones which first element is -%% included in Disallowed. --spec delete_disallowed(Disallowed :: [atom()], - Terms :: [term()]) -> [term()]. -delete_disallowed(Disallowed, Terms) -> - lists:foldl( - fun(Dis, Ldis) -> - delete_disallowed2(Dis, Ldis) - end, - Terms, - Disallowed). - -delete_disallowed2(Disallowed, [H | T]) -> - case element(1, H) of - Disallowed -> - ?LOG_WARNING(#{what => ignore_disallowed_option, option => Disallowed}), - delete_disallowed2(Disallowed, T); - _ -> - [H | delete_disallowed2(Disallowed, T)] - end; -delete_disallowed2(_, []) -> - []. - -%% @doc Keep from the list only the allowed terms. -%% Returns a sublist of Terms with only the ones which first element is -%% included in Allowed. --spec keep_only_allowed(Allowed :: [atom()], - Terms :: [term()]) -> [term()]. -keep_only_allowed(all, Terms) -> - Terms; -keep_only_allowed(Allowed, Terms) -> - {As, NAs} = lists:partition( - fun(Term) -> - lists:member(element(1, Term), Allowed) - end, - Terms), - [?LOG_WARNING(#{what => ignore_disallowed_option, option => NA}) - || NA <- NAs], - As. - - +-spec dedup_state_opts(state()) -> state(). dedup_state_opts(State = #state{opts = RevOpts}) -> {RevOpts2, _Removed} = dedup_state_opts_list(RevOpts, [], [], sets:new()), State#state{opts = RevOpts2}. @@ -636,7 +169,7 @@ dedup_state_opts_list([H|List], Removed, Keep, Set) -> dedup_state_opts_list([], Removed, Keep, _Set) -> {Keep, Removed}. - +-spec add_dep_modules(state()) -> state(). add_dep_modules(State = #state{opts = Opts}) -> Opts2 = add_dep_modules_opts(Opts), State#state{opts = Opts2}. diff --git a/src/config/mongoose_config_parser_cfg.erl b/src/config/mongoose_config_parser_cfg.erl new file mode 100644 index 00000000000..1b61afb39ca --- /dev/null +++ b/src/config/mongoose_config_parser_cfg.erl @@ -0,0 +1,407 @@ +%% @doc Config parsing and processing for the 'cfg' format +-module(mongoose_config_parser_cfg). + +-behaviour(mongoose_config_parser). + +-export([parse_file/1]). + +%% For tests +-export([parse_terms/1]). + +-include("mongoose.hrl"). +-include("ejabberd_config.hrl"). + +-type macro() :: {macro_key(), macro_value()}. + +%% The atom must have all characters in uppercase. +-type macro_key() :: atom(). + +-type macro_value() :: term(). + +-type known_term() :: override_global + | override_local + | override_acls + | {acl, _, _} + | {alarms, _} + | {access, _, _} + | {shaper, _, _} + | {host, _} + | {hosts, _} + | {host_config, _, _} + | {listen, _} + | {language, _} + | {sm_backend, _} + | {outgoing_s2s_port, integer()} + | {outgoing_s2s_options, _, integer()} + | {{s2s_addr, _}, _} + | {s2s_dns_options, [tuple()]} + | {s2s_use_starttls, integer()} + | {s2s_certfile, _} + | {domain_certfile, _, _} + | {node_type, _} + | {cluster_nodes, _} + | {registration_timeout, integer()} + | {mongooseimctl_access_commands, list()} + | {loglevel, _} + | {max_fsm_queue, _} + | {sasl_mechanisms, _} + | host_term(). + +-type host_term() :: {acl, _, _} + | {access, _, _} + | {shaper, _, _} + | {host, _} + | {hosts, _}. + +%%-------------------------------------------------------------------- +%% Configuration parsing +%%-------------------------------------------------------------------- + +-spec parse_file(FileName :: string()) -> mongoose_config_parser:state(). +parse_file(FileName) -> + Terms = mongoose_config_terms:get_plain_terms_file(FileName), + parse_terms(Terms). + +-spec parse_terms(term()) -> mongoose_config_parser:state(). +parse_terms(Terms) -> + State = just_parse_terms(Terms), + State2 = mongoose_config_parser:dedup_state_opts(State), + mongoose_config_parser:add_dep_modules(State2). + +just_parse_terms(Terms) -> + State = lists:foldl(fun search_hosts_and_pools/2, mongoose_config_parser:new_state(), Terms), + TermsWExpandedMacros = replace_macros(Terms), + lists:foldl(fun process_term/2, State, TermsWExpandedMacros). + +-spec search_hosts_and_pools({host|hosts, + [mongoose_config_parser:host()] | mongoose_config_parser:host()}, + mongoose_config_parser:state()) -> any(). +search_hosts_and_pools({host, Host}, State) -> + search_hosts_and_pools({hosts, [Host]}, State); +search_hosts_and_pools({hosts, Hosts}, State) -> + case mongoose_config_parser:state_to_host_opts(State) of + [] -> + add_hosts_to_option(Hosts, State); + OldHosts -> + ?LOG_ERROR(#{what => too_many_hosts_definitions, + new_hosts => Hosts, old_hosts => OldHosts}), + exit(#{issue => "too many hosts definitions", + new_hosts => Hosts, + old_hosts => OldHosts}) + end; +search_hosts_and_pools(_Term, State) -> + State. + +-spec add_hosts_to_option(Hosts :: [mongoose_config_parser:host()], + State :: mongoose_config_parser:state()) -> + mongoose_config_parser:state(). +add_hosts_to_option(Hosts, State) -> + PrepHosts = normalize_hosts(Hosts), + add_option(hosts, PrepHosts, mongoose_config_parser:set_hosts(PrepHosts, State)). + +-spec normalize_hosts([mongoose_config_parser:host()]) -> [binary() | tuple()]. +normalize_hosts(Hosts) -> + normalize_hosts(Hosts, []). + + +normalize_hosts([], PrepHosts) -> + lists:reverse(PrepHosts); +normalize_hosts([Host | Hosts], PrepHosts) -> + case jid:nodeprep(host_to_binary(Host)) of + error -> + ?LOG_ERROR(#{what => invalid_hostname_in_config, hostname => Host}), + erlang:error(#{issue => invalid_hostname, + hostname => Host}); + PrepHost -> + normalize_hosts(Hosts, [PrepHost | PrepHosts]) + end. + +host_to_binary(Host) -> + unicode:characters_to_binary(Host). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% Support for Macro + +%% @doc Replace the macros with their defined values. +-spec replace_macros(Terms :: [term()]) -> [term()]. +replace_macros(Terms) -> + {TermsOthers, Macros} = split_terms_macros(Terms), + replace(TermsOthers, Macros). + + +%% @doc Split Terms into normal terms and macro definitions. +-spec split_terms_macros(Terms :: [term()]) -> {[term()], [macro()]}. +split_terms_macros(Terms) -> + lists:foldl(fun split_terms_macros_fold/2, {[], []}, Terms). + +-spec split_terms_macros_fold(any(), Acc) -> Acc when + Acc :: {[term()], [{Key :: any(), Value :: any()}]}. +split_terms_macros_fold({define_macro, Key, Value} = Term, {TOs, Ms}) -> + case is_macro_name(Key) of + true -> + {TOs, Ms ++ [{Key, Value}]}; + false -> + exit({macro_not_properly_defined, Term}) + end; +split_terms_macros_fold(Term, {TOs, Ms}) -> + {TOs ++ [Term], Ms}. + + +%% @doc Recursively replace in Terms macro usages with the defined value. +-spec replace(Terms :: [term()], + Macros :: [macro()]) -> [term()]. +replace([], _) -> + []; +replace([Term | Terms], Macros) -> + [replace_term(Term, Macros) | replace(Terms, Macros)]. + + +replace_term(Key, Macros) when is_atom(Key) -> + case is_macro_name(Key) of + true -> + case proplists:get_value(Key, Macros) of + undefined -> exit({undefined_macro, Key}); + Value -> Value + end; + false -> + Key + end; +replace_term({use_macro, Key, Value}, Macros) -> + proplists:get_value(Key, Macros, Value); +replace_term(Term, Macros) when is_list(Term) -> + replace(Term, Macros); +replace_term(Term, Macros) when is_tuple(Term) -> + List = tuple_to_list(Term), + List2 = replace(List, Macros), + list_to_tuple(List2); +replace_term(Term, _) -> + Term. + +%% Check is the term is a config macro +-spec is_macro_name(atom()) -> boolean(). +is_macro_name(Atom) when is_atom(Atom) -> + is_all_uppercase(Atom) andalso has_any_uppercase(Atom); +is_macro_name(_) -> + false. + +-spec is_all_uppercase(atom()) -> boolean(). +is_all_uppercase(Atom) -> + String = erlang:atom_to_list(Atom), + lists:all(fun(C) when C >= $a, C =< $z -> false; + (_) -> true + end, String). + +has_any_uppercase(Atom) -> + String = erlang:atom_to_list(Atom), + lists:any(fun(C) when C >= $A, C =< $Z -> true; + (_) -> false + end, String). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% Process terms + +-spec process_term(Term :: known_term(), + State :: mongoose_config_parser:state()) -> mongoose_config_parser:state(). +process_term(Term, State) -> + case Term of + override_global -> + mongoose_config_parser:override_global(State); + override_local -> + mongoose_config_parser:override_local(State); + override_acls -> + mongoose_config_parser:override_acls(State); + {acl, _ACLName, _ACLData} -> + process_host_term(Term, global, State); + {alarms, Env} -> + add_option(alarms, Env, State); + {access, _RuleName, _Rules} -> + process_host_term(Term, global, State); + {shaper, _Name, _Data} -> + %%lists:foldl(fun(Host, S) -> process_host_term(Term, Host, S) end, + %% State, State#state.hosts); + process_host_term(Term, global, State); + {host, _Host} -> + State; + {hosts, _Hosts} -> + State; + {host_config, Host, Terms} -> + lists:foldl(fun(T, S) -> + process_host_term(T, list_to_binary(Host), S) end, + State, Terms); + {listen, Listeners} -> + Listeners2 = + lists:map( + fun({PortIP, Module, Opts}) -> + {Port, IPT, _, _, Proto, OptsClean} = + ejabberd_listener:parse_listener_portip(PortIP, Opts), + {{Port, IPT, Proto}, Module, OptsClean} + end, + Listeners), + add_option(listen, Listeners2, State); + {language, Val} -> + add_option(language, list_to_binary(Val), State); + {sm_backend, Val} -> + add_option(sm_backend, Val, State); + {rdbms_server_type, Val} -> + add_option(rdbms_server_type, Val, State); + {outgoing_s2s_port, Port} -> + add_option(outgoing_s2s_port, Port, State); + {outgoing_s2s_options, Methods, Timeout} -> + State1 = add_option(outgoing_s2s_families, Methods, State), + add_option(outgoing_s2s_timeout, Timeout, State1); + {{s2s_addr, Host}, Addr} -> + add_option({s2s_addr, list_to_binary(Host)}, Addr, State); + {{global_distrib_addr, Host}, Addr} -> + add_option({global_distrib_addr, list_to_binary(Host)}, Addr, State); + {s2s_dns_options, PropList} -> + add_option(s2s_dns_options, PropList, State); + {s2s_use_starttls, Port} -> + add_option(s2s_use_starttls, Port, State); + {s2s_ciphers, Ciphers} -> + add_option(s2s_ciphers, Ciphers, State); + {s2s_certfile, CertFile} -> + State2 = compact_global_option(required_files, [CertFile], State), + add_option(s2s_certfile, CertFile, State2); + {domain_certfile, Domain, CertFile} -> + State2 = compact_global_option(required_files, [CertFile], State), + add_option({domain_certfile, Domain}, CertFile, State2); + {node_type, NodeType} -> + add_option(node_type, NodeType, State); + {cluster_nodes, Nodes} -> + add_option(cluster_nodes, Nodes, State); + {watchdog_admins, Admins} -> + add_option(watchdog_admins, Admins, State); + {watchdog_large_heap, LH} -> + add_option(watchdog_large_heap, LH, State); + {registration_timeout, Timeout} -> + add_option(registration_timeout, Timeout, State); + {mongooseimctl_access_commands, ACs} -> + add_option(mongooseimctl_access_commands, ACs, State); + {routing_modules, Mods} -> + add_option(routing_modules, Mods, State); + {loglevel, Loglevel} -> + add_option(loglevel, Loglevel, State); + {max_fsm_queue, N} -> + add_option(max_fsm_queue, N, State); + {sasl_mechanisms, Mechanisms} -> + add_option(sasl_mechanisms, Mechanisms, State); + {all_metrics_are_global, Value} -> + add_option(all_metrics_are_global, Value, State); + {cowboy_server_name, Value} -> + add_option(cowboy_server_name, Value, State); + {services, Value} -> + add_option(services, Value, State); + {_Opt, _Val} -> + lists:foldl(fun(Host, S) -> process_host_term(Term, Host, S) end, + State, mongoose_config_parser:state_to_host_opts(State)) + end. + +-spec process_host_term(Term :: host_term(), + Host :: acl:host(), + State :: mongoose_config_parser:state()) -> mongoose_config_parser:state(). +process_host_term(Term, Host, State) -> + case Term of + {acl, ACLName, ACLData} -> + OptRec = acl:to_record(Host, ACLName, ACLData), + append_option(OptRec, State); + {access, RuleName, Rules} -> + append_global_opt({access, RuleName, Host}, Rules, State); + {shaper, Name, Data} -> + append_global_opt({shaper, Name, Host}, Data, State); + {host, Host} -> + State; + {hosts, _Hosts} -> + State; + {outgoing_pools, Pools} when is_list(Pools) -> + add_option(outgoing_pools, Pools, State); + {node_specific_options, NodeOpts} -> + add_option(node_specific_options, NodeOpts, State); + {Opt, Val} -> + add_option({Opt, Host}, Val, State) + end. + +-spec add_option(Opt :: mongoose_config_parser:key(), + Val :: mongoose_config_parser:value(), + State :: mongoose_config_parser:state()) -> mongoose_config_parser:state(). +add_option(Opt, Val, State) -> + Table = opt_table(Opt), + add_option(Table, Opt, Val, State). + +add_option(config, Opt, Val, State) -> + append_global_opt(Opt, Val, State); +add_option(local_config, {{add, OptName}, Host}, Val, State) -> + compact_option({OptName, Host}, Val, State); +add_option(local_config, Opt, Val, State) -> + append_local_opt(Opt, Val, State). + +append_global_opt(OptName, OptValue, State) -> + OptRec = #config{key = OptName, value = OptValue}, + append_option(OptRec, State). + +append_local_opt(OptName, OptValue, State) -> + OptRec = #local_config{key = OptName, value = OptValue}, + append_option(OptRec, State). + +append_option(OptRec, State) -> + Opts = mongoose_config_parser:get_opts(State), + mongoose_config_parser:set_opts([OptRec | Opts], State). + +%% Merges two values of a local option +compact_option(Opt, Val, State) -> + Opts = mongoose_config_parser:get_opts(State), + Opts2 = compact(Opt, Val, Opts, []), + mongoose_config_parser:set_opts(Opts2, State). + +compact({OptName, Host} = Opt, Val, [], Os) -> + %% The option is defined for host using host_config before the global option. + %% The host_option can be overwritten. + %% TODO or maybe not. We need a test. + ?LOG_WARNING(#{what => host_config_option_can_be_overwritten, + text => <<"define global options before host options">>, + option_name => OptName, host => Host}), + [#local_config{key = Opt, value = Val}] ++ Os; +%% Traverse the list of the options already parsed +compact(Opt, Val, [#local_config{key = Opt, value = OldVal} | Os1], Os2) -> + %% If the key of a local_config matches the Opt that wants to be added + OptRec = #local_config{key = Opt, value = Val ++ OldVal}, + %% Then prepend the new value to the list of old values + Os2 ++ [OptRec] ++ Os1; +compact(Opt, Val, [O | Os1], Os2) -> + compact(Opt, Val, Os1, Os2 ++ [O]). + + +%% Merges two values of a global option +compact_global_option(Opt, Val, State) when is_list(Val) -> + Opts2 = compact_global(Opt, Val, mongoose_config_parser:get_opts(State), []), + mongoose_config_parser:set_opts(Opts2, State). + +compact_global(Opt, Val, [], Os) -> + [#config{key = Opt, value = Val}] ++ Os; +%% Traverse the list of the options already parsed +compact_global(Opt, Val, [#config{key = Opt, value = OldVal} | Os1], Os2) -> + %% If the key of a local_config matches the Opt that wants to be added + OptRec = #config{key = Opt, value = Val ++ OldVal}, + %% Then prepend the new value to the list of old values + Os2 ++ [OptRec] ++ Os1; +compact_global(Opt, Val, [O | Os1], Os2) -> + compact_global(Opt, Val, Os1, Os2 ++ [O]). + + +opt_table(Opt) -> + case is_global_option(Opt) of + true -> + config; + false -> + local_config + end. + +is_global_option(Opt) -> + lists:member(Opt, global_options()). + +global_options() -> + [ + hosts, + language, + sm_backend, + node_specific_options + ]. diff --git a/src/config/mongoose_config_parser_toml.erl b/src/config/mongoose_config_parser_toml.erl new file mode 100644 index 00000000000..80ad0000ac4 --- /dev/null +++ b/src/config/mongoose_config_parser_toml.erl @@ -0,0 +1,2001 @@ +%% @doc Config parsing and processing for the TOML format +-module(mongoose_config_parser_toml). + +-behaviour(mongoose_config_parser). + +-export([parse_file/1]). + +-ifdef(TEST). +-export([parse/1]). +-endif. + +-include("mongoose.hrl"). +-include("ejabberd_config.hrl"). + +%% Used to create per-host config when the list of hosts is not known yet +-define(HOST_F(Expr), [fun(Host) -> Expr end]). + +%% Input: TOML parsed by tomerl +-type toml_key() :: binary(). +-type toml_value() :: tomerl:value(). +-type toml_section() :: tomerl:section(). + +%% Output: list of config records, containing key-value pairs +-type option() :: term(). % any part of a config value, which can be a complex term +-type config() :: #config{} | #local_config{} | acl:acl() | {override, atom()}. +-type config_list() :: [config() | fun((ejabberd:server()) -> [config()])]. % see HOST_F + +%% Path from the currently processed config node to the root +%% - toml_key(): key in a toml_section() +%% - item: item in a list +%% - tuple(): item in a list, tagged with data from the item, e.g. host name +-type path() :: [toml_key() | item | tuple()]. + +-spec parse_file(FileName :: string()) -> mongoose_config_parser:state(). +parse_file(FileName) -> + {ok, Content} = tomerl:read_file(FileName), + Config = parse(Content), + [Hosts] = lists:filtermap(fun(#config{key = hosts, value = Hosts}) -> + {true, Hosts}; + (_) -> false + end, Config), + {FOpts, Config1} = lists:partition(fun(Opt) -> is_function(Opt, 1) end, Config), + {Overrides, Opts} = lists:partition(fun({override, _}) -> true; + (_) -> false + end, Config1), + HOpts = lists:flatmap(fun(F) -> lists:flatmap(F, Hosts) end, FOpts), + lists:foldl(fun(F, StateIn) -> F(StateIn) end, + mongoose_config_parser:new_state(), + [fun(S) -> mongoose_config_parser:set_hosts(Hosts, S) end, + fun(S) -> mongoose_config_parser:set_opts(Opts ++ HOpts, S) end, + fun mongoose_config_parser:dedup_state_opts/1, + fun mongoose_config_parser:add_dep_modules/1, + fun(S) -> set_overrides(Overrides, S) end]). + +%% Config processing functions are annotated with TOML paths +%% Path syntax: dotted, like TOML keys with the following additions: +%% - '[]' denotes an element in a list +%% - '( ... )' encloses an optional prefix +%% - '*' is a wildcard for names - usually that name is passed as an argument +%% If the path is the same as for the previous function, it is not repeated. +%% +%% Example: (host_config[].)access.* +%% Meaning: either a key in the 'access' section, e.g. +%% [access] +%% local = ... +%% or the same, but prefixed for a specific host, e.g. +%% [[host_config]] +%% host = "myhost" +%% host_config.access +%% local = ... + +%% root path +-spec parse(toml_section()) -> config_list(). +parse(Content) -> + parse_section([], Content). + +%% path: * +-spec process_section(path(), toml_section() | [toml_section()]) -> config_list(). +process_section([<<"listen">>] = Path, Content) -> + Listeners = parse_section(Path, Content), + [#local_config{key = listen, value = Listeners}]; +process_section([<<"auth">>|_] = Path, Content) -> + AuthOpts = parse_section(Path, Content), + ?HOST_F(partition_auth_opts(AuthOpts, Host)); +process_section([<<"outgoing_pools">>] = Path, Content) -> + Pools = parse_section(Path, Content), + [#local_config{key = outgoing_pools, value = Pools}]; +process_section([<<"services">>] = Path, Content) -> + Services = parse_section(Path, Content), + [#local_config{key = services, value = Services}]; +process_section([<<"modules">>|_] = Path, Content) -> + Mods = parse_section(Path, Content), + ?HOST_F([#local_config{key = {modules, Host}, value = Mods}]); +process_section([<<"host_config">>] = Path, Content) -> + parse_list(Path, Content); +process_section(Path, Content) -> + parse_section(Path, Content). + +%% path: (host_config[].)general.* +-spec process_general(path(), toml_value()) -> [config()]. +process_general([<<"loglevel">>|_], V) -> + [#local_config{key = loglevel, value = b2a(V)}]; +process_general([<<"hosts">>|_] = Path, Hosts) -> + [#config{key = hosts, value = parse_list(Path, Hosts)}]; +process_general([<<"registration_timeout">>|_], V) -> + [#local_config{key = registration_timeout, value = int_or_infinity(V)}]; +process_general([<<"language">>|_], V) -> + [#config{key = language, value = V}]; +process_general([<<"all_metrics_are_global">>|_], V) -> + [#local_config{key = all_metrics_are_global, value = V}]; +process_general([<<"sm_backend">>|_], V) -> + [#config{key = sm_backend, value = {b2a(V), []}}]; +process_general([<<"max_fsm_queue">>|_], V) -> + [#local_config{key = max_fsm_queue, value = V}]; +process_general([<<"http_server_name">>|_], V) -> + [#local_config{key = cowboy_server_name, value = b2l(V)}]; +process_general([<<"rdbms_server_type">>|_], V) -> + [#local_config{key = rdbms_server_type, value = b2a(V)}]; +process_general([<<"override">>|_] = Path, Value) -> + parse_list(Path, Value); +process_general([<<"pgsql_users_number_estimate">>|_], V) -> + ?HOST_F([#local_config{key = {pgsql_users_number_estimate, Host}, value = V}]); +process_general([<<"route_subdomains">>|_], V) -> + ?HOST_F([#local_config{key = {route_subdomains, Host}, value = b2a(V)}]); +process_general([<<"mongooseimctl_access_commands">>|_] = Path, Rules) -> + [#local_config{key = mongooseimctl_access_commands, value = parse_section(Path, Rules)}]; +process_general([<<"routing_modules">>|_] = Path, Mods) -> + [#local_config{key = routing_modules, value = parse_list(Path, Mods)}]; +process_general([<<"replaced_wait_timeout">>|_], V) -> + ?HOST_F([#local_config{key = {replaced_wait_timeout, Host}, value = V}]); +process_general([<<"hide_service_name">>|_], V) -> + ?HOST_F([#local_config{key = {hide_service_name, Host}, value = V}]). + +-spec process_host(path(), toml_value()) -> [option()]. +process_host(_Path, Val) -> + [jid:nodeprep(Val)]. + +-spec process_override(path(), toml_value()) -> [option()]. +process_override(_Path, Override) -> + [{override, b2a(Override)}]. + +-spec ctl_access_rule(path(), toml_section()) -> [option()]. +ctl_access_rule([Rule|_] = Path, Section) -> + limit_keys([<<"commands">>, <<"argument_restrictions">>], Section), + [{b2a(Rule), + parse_kv(Path, <<"commands">>, Section), + parse_kv(Path, <<"argument_restrictions">>, Section, #{})}]. + +-spec ctl_access_commands(path(), toml_value()) -> option(). +ctl_access_commands(_Path, <<"all">>) -> all; +ctl_access_commands(Path, Commands) -> parse_list(Path, Commands). + +-spec ctl_access_arg_restriction(path(), toml_value()) -> [option()]. +ctl_access_arg_restriction([Key|_], Value) -> + [{b2a(Key), b2l(Value)}]. + +%% path: listen.*[] +-spec process_listener(path(), toml_section()) -> [option()]. +process_listener([_, Type|_] = Path, Content) -> + Options = maps:without([<<"port">>, <<"ip_address">>], Content), + PortIP = listener_portip(Content), + Opts = parse_section(Path, Options), + {Port, IPT, _, _, Proto, OptsClean} = + ejabberd_listener:parse_listener_portip(PortIP, Opts), + [{{Port, IPT, Proto}, listener_module(Type), OptsClean}]. + +-spec listener_portip(toml_section()) -> option(). +listener_portip(#{<<"port">> := Port, <<"ip_address">> := Addr}) -> {Port, b2l(Addr)}; +listener_portip(#{<<"port">> := Port}) -> Port. + +-spec listener_module(toml_key()) -> option(). +listener_module(<<"http">>) -> ejabberd_cowboy; +listener_module(<<"c2s">>) -> ejabberd_c2s; +listener_module(<<"s2s">>) -> ejabberd_s2s_in; +listener_module(<<"service">>) -> ejabberd_service. + +%% path: listen.http[].* +-spec http_listener_opt(path(), toml_value()) -> [option()]. +http_listener_opt([<<"tls">>|_] = Path, Opts) -> + [{ssl, parse_section(Path, Opts)}]; +http_listener_opt([<<"transport">>|_] = Path, Opts) -> + [{transport_options, parse_section(Path, Opts)}]; +http_listener_opt([<<"protocol">>|_] = Path, Opts) -> + [{protocol_options, parse_section(Path, Opts)}]; +http_listener_opt([<<"handlers">>|_] = Path, Handlers) -> + [{modules, parse_section(Path, Handlers)}]; +http_listener_opt(P, V) -> listener_opt(P, V). + +%% path: listen.c2s[].* +-spec c2s_listener_opt(path(), toml_value()) -> [option()]. +c2s_listener_opt([<<"access">>|_], V) -> [{access, b2a(V)}]; +c2s_listener_opt([<<"shaper">>|_], V) -> [{shaper, b2a(V)}]; +c2s_listener_opt([<<"xml_socket">>|_], V) -> [{xml_socket, V}]; +c2s_listener_opt([<<"zlib">>|_], V) -> [{zlib, V}]; +c2s_listener_opt([<<"max_fsm_queue">>|_], V) -> [{max_fsm_queue, V}]; +c2s_listener_opt([{tls, _}|_] = P, V) -> listener_tls_opts(P, V); +c2s_listener_opt(P, V) -> xmpp_listener_opt(P, V). + +%% path: listen.s2s[].* +-spec s2s_listener_opt(path(), toml_value()) -> [option()]. +s2s_listener_opt([<<"shaper">>|_], V) -> [{shaper, b2a(V)}]; +s2s_listener_opt([<<"tls">>|_] = P, V) -> parse_section(P, V); +s2s_listener_opt(P, V) -> xmpp_listener_opt(P, V). + +%% path: listen.service[].*, +%% listen.http[].handlers.mod_websockets[].service.* +-spec service_listener_opt(path(), toml_value()) -> [option()]. +service_listener_opt([<<"access">>|_], V) -> [{access, b2a(V)}]; +service_listener_opt([<<"shaper_rule">>|_], V) -> [{shaper_rule, b2a(V)}]; +service_listener_opt([<<"check_from">>|_], V) -> [{service_check_from, V}]; +service_listener_opt([<<"hidden_components">>|_], V) -> [{hidden_components, V}]; +service_listener_opt([<<"conflict_behaviour">>|_], V) -> [{conflict_behaviour, b2a(V)}]; +service_listener_opt([<<"password">>|_], V) -> [{password, b2l(V)}]; +service_listener_opt([<<"max_fsm_queue">>|_], V) -> [{max_fsm_queue, V}]; +service_listener_opt(P, V) -> xmpp_listener_opt(P, V). + +%% path: listen.c2s[].*, listen.s2s[].*, listen.service[].* +-spec xmpp_listener_opt(path(), toml_value()) -> [option()]. +xmpp_listener_opt([<<"hibernate_after">>|_], V) -> [{hibernate_after, V}]; +xmpp_listener_opt([<<"max_stanza_size">>|_], V) -> [{max_stanza_size, V}]; +xmpp_listener_opt([<<"backlog">>|_], N) -> [{backlog, N}]; +xmpp_listener_opt([<<"proxy_protocol">>|_], V) -> [{proxy_protocol, V}]; +xmpp_listener_opt([<<"num_acceptors">>|_], V) -> [{acceptors_num, V}]; +xmpp_listener_opt(Path, V) -> listener_opt(Path, V). + +%% path: listen.*[].* +-spec listener_opt(path(), toml_value()) -> [option()]. +listener_opt([<<"proto">>|_], Proto) -> [{proto, b2a(Proto)}]; +listener_opt([<<"ip_version">>|_], 6) -> [inet6]; +listener_opt([<<"ip_version">>|_], 4) -> [inet]. + +%% path: listen.http[].tls.* +-spec https_option(path(), toml_value()) -> [option()]. +https_option([<<"verify_mode">>|_], Value) -> [{verify_mode, b2a(Value)}]; +https_option(Path, Value) -> tls_option(Path, Value). + +%% path: listen.c2s[].tls.* +-spec c2s_tls_option(path(), toml_value()) -> option(). +c2s_tls_option([<<"mode">>|_], V) -> [b2a(V)]; +c2s_tls_option([<<"verify_peer">>|_], V) -> [verify_peer(V)]; +c2s_tls_option([<<"protocol_options">>, {tls, fast_tls}|_] = Path, V) -> + [{protocol_options, parse_list(Path, V)}]; +c2s_tls_option([_, {tls, fast_tls}|_] = Path, V) -> fast_tls_option(Path, V); +c2s_tls_option([<<"verify_mode">>, {tls, just_tls}|_], V) -> b2a(V); +c2s_tls_option([<<"disconnect_on_failure">>, {tls, just_tls}|_], V) -> V; +c2s_tls_option([<<"crl_files">>, {tls, just_tls}|_] = Path, V) -> [{crlfiles, parse_list(Path, V)}]; +c2s_tls_option([_, {tls, just_tls}|_] = Path, V) -> tls_option(Path, V). + +%% path: listen.s2s[].tls.* +-spec s2s_tls_option(path(), toml_value()) -> [option()]. +s2s_tls_option([<<"protocol_options">>|_] = Path, V) -> + [{protocol_options, parse_list(Path, V)}]; +s2s_tls_option([Opt|_] = Path, Val) when Opt =:= <<"cacertfile">>; + Opt =:= <<"dhfile">>; + Opt =:= <<"ciphers">> -> + fast_tls_option(Path, Val). + +%% path: listen.http[].transport.* +-spec cowboy_transport_opt(path(), toml_value()) -> [option()]. +cowboy_transport_opt([<<"num_acceptors">>|_], N) -> [{num_acceptors, N}]; +cowboy_transport_opt([<<"max_connections">>|_], N) -> [{max_connections, int_or_infinity(N)}]. + +%% path: listen.http[].protocol.* +-spec cowboy_protocol_opt(path(), toml_value()) -> [option()]. +cowboy_protocol_opt([<<"compress">>|_], V) -> [{compress, V}]. + +%% path: listen.http[].handlers.*[] +-spec cowboy_module(path(), toml_section()) -> [option()]. +cowboy_module([_, Type|_] = Path, #{<<"host">> := Host, <<"path">> := ModPath} = Options) -> + Opts = maps:without([<<"host">>, <<"path">>], Options), + ModuleOpts = cowboy_module_options(Path, Opts), + [{b2l(Host), b2l(ModPath), b2a(Type), ModuleOpts}]. + +-spec cowboy_module_options(path(), toml_section()) -> [option()]. +cowboy_module_options([_, <<"mod_websockets">>|_] = Path, Opts) -> + parse_section(Path, Opts); +cowboy_module_options([_, <<"lasse_handler">>|_], Opts) -> + limit_keys([<<"module">>], Opts), + #{<<"module">> := Module} = Opts, + [b2a(Module)]; +cowboy_module_options([_, <<"cowboy_static">>|_], Opts) -> + limit_keys([<<"type">>, <<"app">>, <<"content_path">>], Opts), + #{<<"type">> := Type, + <<"app">> := App, + <<"content_path">> := Path} = Opts, + {b2a(Type), b2a(App), b2l(Path), [{mimetypes, cow_mimetypes, all}]}; +cowboy_module_options([_, <<"cowboy_swagger_redirect_handler">>|_], Opts) -> + Opts = #{}; +cowboy_module_options([_, <<"cowboy_swagger_json_handler">>|_], Opts) -> + Opts = #{}; +cowboy_module_options([_, <<"mongoose_api">>|_] = Path, Opts) -> + #{<<"handlers">> := _} = Opts, + parse_section(Path, Opts); +cowboy_module_options([_, <<"mongoose_api_admin">>|_], + #{<<"username">> := User, <<"password">> := Pass}) -> + [{auth, {User, Pass}}]; +cowboy_module_options([_, <<"mongoose_api_admin">>|_], #{}) -> + []; +cowboy_module_options([_, <<"mongoose_api_client">>|_], #{}) -> + []; +cowboy_module_options(_, Opts) -> + limit_keys([], Opts), + []. + +%% path: listen.http[].handlers.mod_websockets[].* +-spec websockets_option(path(), toml_value()) -> [option()]. +websockets_option([<<"timeout">>|_], V) -> + [{timeout, int_or_infinity(V)}]; +websockets_option([<<"ping_rate">>|_], <<"none">>) -> + [{ping_rate, none}]; +websockets_option([<<"ping_rate">>|_], V) -> + [{ping_rate, V}]; +websockets_option([<<"max_stanza_size">>|_], V) -> + [{max_stanza_size, int_or_infinity(V)}]; +websockets_option([<<"service">>|_] = Path, Value) -> + [{ejabberd_service, parse_section(Path, Value)}]. + +%% path: listen.http[].handlers.mongoose_api[].* +-spec mongoose_api_option(path(), toml_value()) -> [option()]. +mongoose_api_option([<<"handlers">>|_] = Path, Value) -> + [{handlers, parse_list(Path, Value)}]. + +%% path: listen.c2s[].tls +-spec listener_tls_opts(path(), toml_section()) -> [option()]. +listener_tls_opts([{tls, just_tls}|_] = Path, M) -> + VM = just_tls_verify_fun(Path, M), + Common = maps:with([<<"mode">>, <<"verify_peer">>, <<"crl_files">>], M), + OptsM = maps:without([<<"module">>, + <<"mode">>, <<"verify_peer">>, <<"crl_files">>, + <<"verify_mode">>, <<"disconnect_on_failure">>], M), + SSLOpts = case VM ++ parse_section(Path, OptsM) of + [] -> []; + Opts -> [{ssl_options, Opts}] + end, + [{tls_module, just_tls}] ++ SSLOpts ++ parse_section(Path, Common); +listener_tls_opts([{tls, fast_tls}|_] = Path, M) -> + parse_section(Path, maps:without([<<"module">>], M)). + +-spec just_tls_verify_fun(path(), toml_section()) -> [option()]. +just_tls_verify_fun(Path, #{<<"verify_mode">> := _} = M) -> + VMode = parse_kv(Path, <<"verify_mode">>, M), + Disconnect = parse_kv(Path, <<"disconnect_on_failure">>, M, true), + [{verify_fun, {VMode, Disconnect}}]; +just_tls_verify_fun(_, _) -> []. + +%% path: (host_config[].)auth.* +-spec auth_option(path(), toml_value()) -> [option()]. +auth_option([<<"methods">>|_] = Path, Methods) -> + [{auth_method, parse_list(Path, Methods)}]; +auth_option([<<"password">>|_] = Path, #{<<"hash">> := Hashes}) -> + [{password_format, {scram, parse_list([<<"hash">> | Path], Hashes)}}]; +auth_option([<<"password">>|_], #{<<"format">> := V}) -> + [{password_format, b2a(V)}]; +auth_option([<<"scram_iterations">>|_], V) -> + [{scram_iterations, V}]; +auth_option([<<"sasl_external">>|_] = Path, V) -> + [{cyrsasl_external, parse_list(Path, V)}]; +auth_option([<<"sasl_mechanisms">>|_] = Path, V) -> + [{sasl_mechanisms, parse_list(Path, V)}]; +auth_option([<<"jwt">>|_] = Path, V) -> + ensure_keys([<<"secret">>, <<"algorithm">>, <<"username_key">>], V), + parse_section(Path, V); +auth_option(Path, V) -> + parse_section(Path, V). + +%% path: (host_config[].)auth.anonymous.* +auth_anonymous_option([<<"allow_multiple_connections">>|_], V) -> + [{allow_multiple_connections, V}]; +auth_anonymous_option([<<"protocol">>|_], V) -> + [{anonymous_protocol, b2a(V)}]. + +%% path: (host_config[].)auth.ldap.* +-spec auth_ldap_option(path(), toml_section()) -> [option()]. +auth_ldap_option([<<"pool_tag">>|_], V) -> + [{ldap_pool_tag, b2a(V)}]; +auth_ldap_option([<<"bind_pool_tag">>|_], V) -> + [{ldap_bind_pool_tag, b2a(V)}]; +auth_ldap_option([<<"base">>|_], V) -> + [{ldap_base, b2l(V)}]; +auth_ldap_option([<<"uids">>|_] = Path, V) -> + [{ldap_uids, parse_list(Path, V)}]; +auth_ldap_option([<<"filter">>|_], V) -> + [{ldap_filter, b2l(V)}]; +auth_ldap_option([<<"dn_filter">>|_] = Path, V) -> + Opts = parse_section(Path, V), + {_, Filter} = proplists:lookup(filter, Opts), + {_, Attrs} = proplists:lookup(attributes, Opts), + [{ldap_dn_filter, {Filter, Attrs}}]; +auth_ldap_option([<<"local_filter">>|_] = Path, V) -> + Opts = parse_section(Path, V), + {_, Op} = proplists:lookup(operation, Opts), + {_, Attribute} = proplists:lookup(attribute, Opts), + {_, Values} = proplists:lookup(values, Opts), + [{ldap_local_filter, {Op, {Attribute, Values}}}]; +auth_ldap_option([<<"deref">>|_], V) -> + [{ldap_deref, b2a(V)}]. + +-spec auth_ldap_uids(path(), toml_section()) -> [option()]. +auth_ldap_uids(_, #{<<"attr">> := Attr, <<"format">> := Format}) -> + [{b2l(Attr), b2l(Format)}]; +auth_ldap_uids(_, #{<<"attr">> := Attr}) -> + [b2l(Attr)]. + +-spec auth_ldap_dn_filter(path(), toml_value()) -> [option()]. +auth_ldap_dn_filter([<<"filter">>|_], V) -> + [{filter, b2l(V)}]; +auth_ldap_dn_filter([<<"attributes">>|_] = Path, V) -> + Attrs = parse_list(Path, V), + [{attributes, Attrs}]. + +-spec auth_ldap_local_filter(path(), toml_value()) -> [option()]. +auth_ldap_local_filter([<<"operation">>|_], V) -> + [{operation, b2a(V)}]; +auth_ldap_local_filter([<<"attribute">>|_], V) -> + [{attribute, b2l(V)}]; +auth_ldap_local_filter([<<"values">>|_] = Path, V) -> + Attrs = parse_list(Path, V), + [{values, Attrs}]. + +%% path: (host_config[].)auth.external.* +-spec auth_external_option(path(), toml_value()) -> [option()]. +auth_external_option([<<"instances">>|_], V) -> + [{extauth_instances, V}]; +auth_external_option([<<"program">>|_], V) -> + [{extauth_program, b2l(V)}]. + +%% path: (host_config[].)auth.http.* +-spec auth_http_option(path(), toml_value()) -> [option()]. +auth_http_option([<<"basic_auth">>|_], V) -> + [{basic_auth, b2l(V)}]. + +%% path: (host_config[].)auth.jwt.* +-spec auth_jwt_option(path(), toml_value()) -> [option()]. +auth_jwt_option([<<"secret">>|_] = Path, V) -> + [Item] = parse_section(Path, V), % expect exactly one option + [Item]; +auth_jwt_option([<<"algorithm">>|_], V) -> + [{jwt_algorithm, b2l(V)}]; +auth_jwt_option([<<"username_key">>|_], V) -> + [{jwt_username_key, b2a(V)}]. + +%% path: (host_config[].)auth.jwt.secret.* +-spec auth_jwt_secret(path(), toml_value()) -> [option()]. +auth_jwt_secret([<<"file">>|_], V) -> + [{jwt_secret_source, b2l(V)}]; +auth_jwt_secret([<<"env">>|_], V) -> + [{jwt_secret_source, {env, b2l(V)}}]; +auth_jwt_secret([<<"value">>|_], V) -> + [{jwt_secret, b2l(V)}]. + +%% path: (host_config[].)auth.riak.* +-spec auth_riak_option(path(), toml_value()) -> [option()]. +auth_riak_option([<<"bucket_type">>|_], V) -> + [{bucket_type, V}]. + +%% path: (host_config[].)auth.sasl_external[] +-spec sasl_external(path(), toml_value()) -> [option()]. +sasl_external(_, <<"standard">>) -> [standard]; +sasl_external(_, <<"common_name">>) -> [common_name]; +sasl_external(_, <<"auth_id">>) -> [auth_id]; +sasl_external(_, M) -> [{mod, b2a(M)}]. + +%% path: (host_config[].)auth.sasl_mechanism[] +%% auth.sasl_mechanisms.* +-spec sasl_mechanism(path(), toml_value()) -> [option()]. +sasl_mechanism(_, V) -> + [b2a(<<"cyrsasl_", V/binary>>)]. + +-spec partition_auth_opts([{atom(), any()}], ejabberd:server()) -> [config()]. +partition_auth_opts(AuthOpts, Host) -> + {InnerOpts, OuterOpts} = lists:partition(fun({K, _}) -> is_inner_auth_opt(K) end, AuthOpts), + [#local_config{key = {auth_opts, Host}, value = InnerOpts} | + [#local_config{key = {K, Host}, value = V} || {K, V} <- OuterOpts]]. + +-spec is_inner_auth_opt(atom()) -> boolean(). +is_inner_auth_opt(auth_method) -> false; +is_inner_auth_opt(allow_multiple_connections) -> false; +is_inner_auth_opt(anonymous_protocol) -> false; +is_inner_auth_opt(sasl_mechanisms) -> false; +is_inner_auth_opt(extauth_instances) -> false; +is_inner_auth_opt(_) -> true. + +%% path: outgoing_pools.*.* +-spec process_pool(path(), toml_section()) -> [option()]. +process_pool([Tag, Type|_] = Path, M) -> + Scope = pool_scope(M), + Options = parse_section(Path, maps:without([<<"scope">>, <<"host">>, <<"connection">>], M)), + ConnectionOptions = parse_kv(Path, <<"connection">>, M, #{}), + [{b2a(Type), Scope, b2a(Tag), Options, ConnectionOptions}]. + +-spec pool_scope(toml_section()) -> option(). +pool_scope(#{<<"scope">> := <<"single_host">>, <<"host">> := Host}) -> Host; +pool_scope(#{<<"scope">> := Scope}) -> b2a(Scope); +pool_scope(#{}) -> global. + +%% path: outgoing_pools.*.*.*, +%% (host_config[].)modules.mod_event_pusher.backend.push.wpool.* +-spec pool_option(path(), toml_value()) -> [option()]. +pool_option([<<"workers">>|_], V) -> [{workers, V}]; +pool_option([<<"strategy">>|_], V) -> [{strategy, b2a(V)}]; +pool_option([<<"call_timeout">>|_], V) -> [{call_timeout, V}]. + +%% path: outgoing_pools.*.connection +-spec connection_options(path(), toml_section()) -> [option()]. +connection_options([{connection, Driver}, _, <<"rdbms">>|_] = Path, M) -> + Options = parse_section(Path, maps:with([<<"keepalive_interval">>], M)), + ServerOptions = parse_section(Path, maps:without([<<"keepalive_interval">>], M)), + Server = rdbms_server(Driver, ServerOptions), + [{server, Server} | Options]; +connection_options([_, _, <<"riak">>|_] = Path, Options = #{<<"username">> := UserName, + <<"password">> := Password}) -> + M = maps:without([<<"username">>, <<"password">>], Options), + [{credentials, b2l(UserName), b2l(Password)} | parse_section(Path, M)]; +connection_options(Path, Options) -> + parse_section(Path, Options). + +-spec rdbms_server(atom(), [option()]) -> option(). +rdbms_server(odbc, Opts) -> + [{settings, Settings}] = Opts, + Settings; +rdbms_server(Driver, Opts) -> + {_, Host} = proplists:lookup(host, Opts), + {_, Database} = proplists:lookup(database, Opts), + {_, UserName} = proplists:lookup(username, Opts), + {_, Password} = proplists:lookup(password, Opts), + case {proplists:get_value(port, Opts, no_port), + proplists:get_value(tls, Opts, no_tls)} of + {no_port, no_tls} -> {Driver, Host, Database, UserName, Password}; + {Port, no_tls} -> {Driver, Host, Port, Database, UserName, Password}; + {no_port, TLS} -> {Driver, Host, Database, UserName, Password, TLS}; + {Port, TLS} -> {Driver, Host, Port, Database, UserName, Password, TLS} + end. + +%% path: outgoing_pools.rdbms.*.connection.* +-spec odbc_option(path(), toml_value()) -> [option()]. +odbc_option([<<"settings">>|_], V) -> [{settings, b2l(V)}]; +odbc_option(Path, V) -> rdbms_option(Path, V). + +-spec sql_server_option(path(), toml_value()) -> [option()]. +sql_server_option([<<"host">>|_], V) -> [{host, b2l(V)}]; +sql_server_option([<<"database">>|_], V) -> [{database, b2l(V)}]; +sql_server_option([<<"username">>|_], V) -> [{username, b2l(V)}]; +sql_server_option([<<"password">>|_], V) -> [{password, b2l(V)}]; +sql_server_option([<<"port">>|_], V) -> [{port, V}]; +sql_server_option([<<"tls">>, {connection, mysql} | _] = Path, Opts) -> + [{tls, parse_section(Path, Opts)}]; +sql_server_option([<<"tls">>, {connection, pgsql} | _] = Path, Opts) -> + % true means try to establish encryption and proceed plain if failed + % required means fail if encryption is not possible + % false would mean do not even try, but we do not let the user do it + {SSLMode, Opts1} = case maps:take(<<"required">>, Opts) of + {true, M} -> {required, M}; + {false, M} -> {true, M}; + error -> {true, Opts} + end, + SSLOpts = case parse_section(Path, Opts1) of + [] -> []; + SSLOptList -> [{ssl_opts, SSLOptList}] + end, + [{tls, [{ssl, SSLMode} | SSLOpts]}]; +sql_server_option(Path, V) -> rdbms_option(Path, V). + +-spec rdbms_option(path(), toml_value()) -> [option()]. +rdbms_option([<<"keepalive_interval">>|_], V) -> [{keepalive_interval, V}]; +rdbms_option([<<"driver">>|_], _V) -> []. + +%% path: outgoing_pools.http.*.connection.* +-spec http_option(path(), toml_value()) -> [option()]. +http_option([<<"host">>|_], V) -> [{server, b2l(V)}]; +http_option([<<"path_prefix">>|_], V) -> [{path_prefix, b2l(V)}]; +http_option([<<"request_timeout">>|_], V) -> [{request_timeout, V}]; +http_option([<<"tls">>|_] = Path, Options) -> [{http_opts, parse_section(Path, Options)}]. + +%% path: outgoing_pools.redis.*.connection.* +-spec redis_option(path(), toml_value()) -> [option()]. +redis_option([<<"host">>|_], Host) -> [{host, b2l(Host)}]; +redis_option([<<"port">>|_], Port) -> [{port, Port}]; +redis_option([<<"database">>|_], Database) -> [{database, Database}]; +redis_option([<<"password">>|_], Password) -> [{password, b2l(Password)}]. + +%% path: outgoing_pools.ldap.*.connection.* +-spec ldap_option(path(), toml_value()) -> [option()]. +ldap_option([<<"host">>|_], Host) -> [{host, b2l(Host)}]; +ldap_option([<<"port">>|_], Port) -> [{port, Port}]; +ldap_option([<<"rootdn">>|_], RootDN) -> [{rootdn, b2l(RootDN)}]; +ldap_option([<<"password">>|_], Password) -> [{password, b2l(Password)}]; +ldap_option([<<"encrypt">>|_], <<"tls">>) -> [{encrypt, tls}]; +ldap_option([<<"encrypt">>|_], <<"none">>) -> [{encrypt, none}]; +ldap_option([<<"servers">>|_] = Path, V) -> [{servers, parse_list(Path, V)}]; +ldap_option([<<"connect_interval">>|_], V) -> [{connect_interval, V}]; +ldap_option([<<"tls">>|_] = Path, Options) -> [{tls_options, parse_section(Path, Options)}]. + +%% path: outgoing_pools.riak.*.connection.* +-spec riak_option(path(), toml_value()) -> [option()]. +riak_option([<<"address">>|_], Addr) -> [{address, b2l(Addr)}]; +riak_option([<<"port">>|_], Port) -> [{port, Port}]; +riak_option([<<"credentials">>|_] = Path, V) -> + Creds = parse_section(Path, V), + {_, User} = proplists:lookup(user, Creds), + {_, Pass} = proplists:lookup(password, Creds), + [{credentials, User, Pass}]; +riak_option([<<"cacertfile">>|_], Path) -> [{cacertfile, b2l(Path)}]; +riak_option([<<"certfile">>|_], Path) -> [{certfile, b2l(Path)}]; +riak_option([<<"keyfile">>|_], Path) -> [{keyfile, b2l(Path)}]; +riak_option([<<"tls">>|_] = Path, Options) -> + Ssl = parse_section(Path, Options), + {RootOpts, SslOpts} = proplists:split(Ssl, [cacertfile, certfile, keyfile]), + case SslOpts of + [] ->lists:flatten(RootOpts); + _ -> [{ssl_opts, SslOpts} | lists:flatten(RootOpts)] + end. + +%% path: outgoing_pools.riak.*.connection.credentials.* +-spec riak_credentials(path(), toml_value()) -> [option()]. +riak_credentials([<<"user">>|_], V) -> [{user, b2l(V)}]; +riak_credentials([<<"password">>|_], V) -> [{password, b2l(V)}]. + +%% path: outgoing_pools.cassandra.*.connnection.* +-spec cassandra_option(path(), toml_value()) -> [option()]. +cassandra_option([<<"servers">>|_] = Path, V) -> [{servers, parse_list(Path, V)}]; +cassandra_option([<<"keyspace">>|_], KeySpace) -> [{keyspace, b2l(KeySpace)}]; +cassandra_option([<<"tls">>|_] = Path, Options) -> [{ssl, parse_section(Path, Options)}]; +cassandra_option([<<"auth">>|_] = Path, Options) -> + [AuthConfig] = parse_section(Path, Options), + [{auth, AuthConfig}]; +cassandra_option([<<"plain">>|_], #{<<"username">> := User, <<"password">> := Pass}) -> + [{cqerl_auth_plain_handler, [{User, Pass}]}]. + +%% path: outgoing_pools.cassandra.*.connection.servers[] +-spec cassandra_server(path(), toml_section()) -> [option()]. +cassandra_server(_, #{<<"ip_address">> := IPAddr, <<"port">> := Port}) -> [{b2l(IPAddr), Port}]; +cassandra_server(_, #{<<"ip_address">> := IPAddr}) -> [b2l(IPAddr)]. + +%% path: outgoing_pools.elastic.*.connection.* +-spec elastic_option(path(), toml_value()) -> [option()]. +elastic_option([<<"host">>|_], Host) -> [{host, b2l(Host)}]; +elastic_option([<<"port">>|_], Port) -> [{port, Port}]. + +%% path: outgoing_pools.rabbit.*.connection.* +-spec rabbit_option(path(), toml_value()) -> [option()]. +rabbit_option([<<"amqp_host">>|_], V) -> [{amqp_host, b2l(V)}]; +rabbit_option([<<"amqp_port">>|_], V) -> [{amqp_port, V}]; +rabbit_option([<<"amqp_username">>|_], V) -> [{amqp_username, b2l(V)}]; +rabbit_option([<<"amqp_password">>|_], V) -> [{amqp_password, b2l(V)}]; +rabbit_option([<<"confirms_enabled">>|_], V) -> [{confirms_enabled, V}]; +rabbit_option([<<"max_worker_queue_len">>|_], V) -> [{max_worker_queue_len, int_or_infinity(V)}]. + +%% path: services.* +-spec process_service(path(), toml_section()) -> [option()]. +process_service([S|_] = Path, Opts) -> + [{b2a(S), parse_section(Path, Opts)}]. + +%% path: services.*.* +-spec service_opt(path(), toml_value()) -> [option()]. +service_opt([<<"submods">>, <<"service_admin_extra">>|_] = Path, V) -> + List = parse_list(Path, V), + [{submods, List}]; +service_opt([<<"initial_report">>, <<"service_mongoose_system_metrics">>|_], V) -> + [{initial_report, V}]; +service_opt([<<"periodic_report">>, <<"service_mongoose_system_metrics">>|_], V) -> + [{periodic_report, V}]; +service_opt([<<"report">>, <<"service_mongoose_system_metrics">>|_], true) -> + [report]; +service_opt([<<"report">>, <<"service_mongoose_system_metrics">>|_], false) -> + [no_report]; +service_opt([<<"tracking_id">>, <<"service_mongoose_system_metrics">>|_], V) -> + [{tracking_id, b2l(V)}]. + +%% path: (host_config[].)modules.* +-spec process_module(path(), toml_section()) -> [option()]. +process_module([Mod|_] = Path, Opts) -> + %% Sort option keys to ensure options could be matched in tests + post_process_module(b2a(Mod), parse_section(Path, Opts)). + +post_process_module(mod_mam_meta, Opts) -> + %% Disable the archiving by default + [{mod_mam_meta, lists:sort(defined_or_false(muc, defined_or_false(pm, Opts)))}]; +post_process_module(Mod, Opts) -> + [{Mod, lists:sort(Opts)}]. + +%% path: (host_config[].)modules.*.* +-spec module_opt(path(), toml_value()) -> [option()]. +module_opt([<<"report_commands_node">>, <<"mod_adhoc">>|_], V) -> + [{report_commands_node, V}]; +module_opt([<<"validity_period">>, <<"mod_auth_token">>|_] = Path, V) -> + parse_list(Path, V); +module_opt([<<"inactivity">>, <<"mod_bosh">>|_], V) -> + [{inactivity, int_or_infinity(V)}]; +module_opt([<<"max_wait">>, <<"mod_bosh">>|_], V) -> + [{max_wait, int_or_infinity(V)}]; +module_opt([<<"server_acks">>, <<"mod_bosh">>|_], V) -> + [{server_acks, V}]; +module_opt([<<"backend">>, <<"mod_bosh">>|_], V) -> + [{backend, b2a(V)}]; +module_opt([<<"maxpause">>, <<"mod_bosh">>|_], V) -> + [{maxpause, V}]; +module_opt([<<"cache_size">>, <<"mod_caps">>|_], V) -> + [{cache_size, V}]; +module_opt([<<"cache_life_time">>, <<"mod_caps">>|_], V) -> + [{cache_life_time, V}]; +module_opt([<<"buffer_max">>, <<"mod_csi">>|_], V) -> + [{buffer_max, int_or_infinity(V)}]; +module_opt([<<"extra_domains">>, <<"mod_disco">>|_] = Path, V) -> + Domains = parse_list(Path, V), + [{extra_domains, Domains}]; +module_opt([<<"server_info">>, <<"mod_disco">>|_] = Path, V) -> + Info = parse_list(Path, V), + [{server_info, Info}]; +module_opt([<<"users_can_see_hidden_services">>, <<"mod_disco">>|_], V) -> + [{users_can_see_hidden_services, V}]; +module_opt([<<"backend">>, <<"mod_event_pusher">>|_] = Path, V) -> + Backends = parse_section(Path, V), + [{backends, Backends}]; +module_opt([<<"service">>, <<"mod_extdisco">>|_] = Path, V) -> + parse_list(Path, V); +module_opt([<<"host">>, <<"mod_http_upload">>|_], V) -> + [{host, b2l(V)}]; +module_opt([<<"backend">>, <<"mod_http_upload">>|_], V) -> + [{backend, b2a(V)}]; +module_opt([<<"expiration_time">>, <<"mod_http_upload">>|_], V) -> + [{expiration_time, V}]; +module_opt([<<"token_bytes">>, <<"mod_http_upload">>|_], V) -> + [{token_bytes, V}]; +module_opt([<<"max_file_size">>, <<"mod_http_upload">>|_], V) -> + [{max_file_size, V}]; +module_opt([<<"s3">>, <<"mod_http_upload">>|_] = Path, V) -> + S3Opts = parse_section(Path, V), + [{s3, S3Opts}]; +module_opt([<<"backend">>, <<"mod_inbox">>|_], V) -> + [{backend, b2a(V)}]; +module_opt([<<"reset_markers">>, <<"mod_inbox">>|_] = Path, V) -> + Markers = parse_list(Path, V), + [{reset_markers, Markers}]; +module_opt([<<"groupchat">>, <<"mod_inbox">>|_] = Path, V) -> + GChats = parse_list(Path, V), + [{groupchat, GChats}]; +module_opt([<<"aff_changes">>, <<"mod_inbox">>|_], V) -> + [{aff_changes, V}]; +module_opt([<<"remove_on_kicked">>, <<"mod_inbox">>|_], V) -> + [{remove_on_kicked, V}]; +module_opt([<<"global_host">>, <<"mod_global_distrib">>|_], V) -> + [{global_host, b2l(V)}]; +module_opt([<<"local_host">>, <<"mod_global_distrib">>|_], V) -> + [{local_host, b2l(V)}]; +module_opt([<<"message_ttl">>, <<"mod_global_distrib">>|_], V) -> + [{message_ttl, V}]; +module_opt([<<"connections">>, <<"mod_global_distrib">>|_] = Path, V) -> + Conns = parse_section(Path, V), + [{connections, Conns}]; +module_opt([<<"cache">>, <<"mod_global_distrib">>|_] = Path, V) -> + Cache = parse_section(Path, V), + [{cache, Cache}]; +module_opt([<<"bounce">>, <<"mod_global_distrib">>|_], false) -> + [{bounce, false}]; +module_opt([<<"bounce">>, <<"mod_global_distrib">>|_] = Path, V) -> + Bounce = parse_section(Path, V), + [{bounce, Bounce}]; +module_opt([<<"redis">>, <<"mod_global_distrib">>|_] = Path, V) -> + Redis = parse_section(Path, V), + [{redis, Redis}]; +module_opt([<<"hosts_refresh_interval">>, <<"mod_global_distrib">>|_], V) -> + [{hosts_refresh_interval, V}]; +module_opt([<<"proxy_host">>, <<"mod_jingle_sip">>|_], V) -> + [{proxy_host, b2l(V)}]; +module_opt([<<"proxy_port">>, <<"mod_jingle_sip">>|_], V) -> + [{proxy_port, V}]; +module_opt([<<"listen_port">>, <<"mod_jingle_sip">>|_], V) -> + [{listen_port, V}]; +module_opt([<<"local_host">>, <<"mod_jingle_sip">>|_], V) -> + [{local_host, b2l(V)}]; +module_opt([<<"sdp_origin">>, <<"mod_jingle_sip">>|_], V) -> + [{sdp_origin, b2l(V)}]; +module_opt([<<"ram_key_size">>, <<"mod_keystore">>|_], V) -> + [{ram_key_size, V}]; +module_opt([<<"keys">>, <<"mod_keystore">>|_] = Path, V) -> + Keys = parse_list(Path, V), + [{keys, Keys}]; +module_opt([<<"pm">>, <<"mod_mam_meta">>|_] = Path, V) -> + PM = parse_section(Path, V), + [{pm, PM}]; +module_opt([<<"muc">>, <<"mod_mam_meta">>|_] = Path, V) -> + Muc = parse_section(Path, V), + [{muc, Muc}]; +module_opt([_, <<"mod_mam_meta">>|_] = Path, V) -> + mod_mam_opts(Path, V); +module_opt([<<"host">>, <<"mod_muc">>|_], V) -> + [{host, b2l(V)}]; +module_opt([<<"access">>, <<"mod_muc">>|_], V) -> + [{access, b2a(V)}]; +module_opt([<<"access_create">>, <<"mod_muc">>|_], V) -> + [{access_create, b2a(V)}]; +module_opt([<<"access_admin">>, <<"mod_muc">>|_], V) -> + [{access_admin, b2a(V)}]; +module_opt([<<"access_persistent">>, <<"mod_muc">>|_], V) -> + [{access_persistent, b2a(V)}]; +module_opt([<<"history_size">>, <<"mod_muc">>|_], V) -> + [{history_size, V}]; +module_opt([<<"room_shaper">>, <<"mod_muc">>|_], V) -> + [{room_shaper, b2a(V)}]; +module_opt([<<"max_room_id">>, <<"mod_muc">>|_], V) -> + [{max_room_id, int_or_infinity(V)}]; +module_opt([<<"max_room_name">>, <<"mod_muc">>|_], V) -> + [{max_room_name, int_or_infinity(V)}]; +module_opt([<<"max_room_desc">>, <<"mod_muc">>|_], V) -> + [{max_room_desc, int_or_infinity(V)}]; +module_opt([<<"min_message_interval">>, <<"mod_muc">>|_], V) -> + [{min_message_interval, V}]; +module_opt([<<"min_presence_interval">>, <<"mod_muc">>|_], V) -> + [{min_presence_interval, V}]; +module_opt([<<"max_users">>, <<"mod_muc">>|_], V) -> + [{max_users, V}]; +module_opt([<<"max_users_admin_threshold">>, <<"mod_muc">>|_], V) -> + [{max_users_admin_threshold, V}]; +module_opt([<<"user_message_shaper">>, <<"mod_muc">>|_], V) -> + [{user_message_shaper, b2a(V)}]; +module_opt([<<"user_presence_shaper">>, <<"mod_muc">>|_], V) -> + [{user_presence_shaper, b2a(V)}]; +module_opt([<<"max_user_conferences">>, <<"mod_muc">>|_], V) -> + [{max_user_conferences, V}]; +module_opt([<<"http_auth_pool">>, <<"mod_muc">>|_], V) -> + [{http_auth_pool, b2a(V)}]; +module_opt([<<"load_permanent_rooms_at_startup">>, <<"mod_muc">>|_], V) -> + [{load_permanent_rooms_at_startup, V}]; +module_opt([<<"hibernate_timeout">>, <<"mod_muc">>|_], V) -> + [{hibernate_timeout, V}]; +module_opt([<<"hibernated_room_check_interval">>, <<"mod_muc">>|_], V) -> + [{hibernated_room_check_interval, int_or_infinity(V)}]; +module_opt([<<"hibernated_room_timeout">>, <<"mod_muc">>|_], V) -> + [{hibernated_room_timeout, int_or_infinity(V)}]; +module_opt([<<"default_room">>, <<"mod_muc">>|_] = Path, V) -> + Defaults = parse_section(Path, V), + [{default_room_options, Defaults}]; +module_opt([<<"outdir">>, <<"mod_muc_log">>|_], V) -> + [{outdir, b2l(V)}]; +module_opt([<<"access_log">>, <<"mod_muc_log">>|_], V) -> + [{access_log, b2a(V)}]; +module_opt([<<"dirtype">>, <<"mod_muc_log">>|_], V) -> + [{dirtype, b2a(V)}]; +module_opt([<<"dirname">>, <<"mod_muc_log">>|_], V) -> + [{dirname, b2a(V)}]; +module_opt([<<"file_format">>, <<"mod_muc_log">>|_], V) -> + [{file_format, b2a(V)}]; +module_opt([<<"css_file">>, <<"mod_muc_log">>|_], <<"false">>) -> + [{cssfile, false}]; +module_opt([<<"css_file">>, <<"mod_muc_log">>|_], V) -> + [{cssfile, V}]; +module_opt([<<"timezone">>, <<"mod_muc_log">>|_], V) -> + [{timezone, b2a(V)}]; +module_opt([<<"top_link">>, <<"mod_muc_log">>|_] = Path, V) -> + Link = list_to_tuple(parse_section(Path, V)), + [{top_link, Link}]; +module_opt([<<"spam_prevention">>, <<"mod_muc_log">>|_], V) -> + [{spam_prevention, V}]; +module_opt([<<"host">>, <<"mod_muc_light">>|_], V) -> + [{host, b2l(V)}]; +module_opt([<<"equal_occupants">>, <<"mod_muc_light">>|_], V) -> + [{equal_occupants, V}]; +module_opt([<<"legacy_mode">>, <<"mod_muc_light">>|_], V) -> + [{legacy_mode, V}]; +module_opt([<<"rooms_per_user">>, <<"mod_muc_light">>|_], V) -> + [{rooms_per_user, int_or_infinity(V)}]; +module_opt([<<"blocking">>, <<"mod_muc_light">>|_], V) -> + [{blocking, V}]; +module_opt([<<"all_can_configure">>, <<"mod_muc_light">>|_], V) -> + [{all_can_configure, V}]; +module_opt([<<"all_can_invite">>, <<"mod_muc_light">>|_], V) -> + [{all_can_invite, V}]; +module_opt([<<"max_occupants">>, <<"mod_muc_light">>|_], V) -> + [{max_occupants, int_or_infinity(V)}]; +module_opt([<<"rooms_per_page">>, <<"mod_muc_light">>|_], V) -> + [{rooms_per_page, int_or_infinity(V)}]; +module_opt([<<"rooms_in_rosters">>, <<"mod_muc_light">>|_], V) -> + [{rooms_in_rosters, V}]; +module_opt([<<"config_schema">>, <<"mod_muc_light">>|_] = Path, V) -> + Configs = parse_list(Path, V), + [{config_schema, Configs}]; +module_opt([<<"access_max_user_messages">>, <<"mod_offline">>|_], V) -> + [{access_max_user_messages, b2a(V)}]; +module_opt([<<"send_pings">>, <<"mod_ping">>|_], V) -> + [{send_pings, V}]; +module_opt([<<"ping_interval">>, <<"mod_ping">>|_], V) -> + [{ping_interval, V}]; +module_opt([<<"timeout_action">>, <<"mod_ping">>|_], V) -> + [{timeout_action, b2a(V)}]; +module_opt([<<"ping_req_timeout">>, <<"mod_ping">>|_], V) -> + [{ping_req_timeout, V}]; +module_opt([<<"host">>, <<"mod_pubsub">>|_], V) -> + [{host, b2l(V)}]; +module_opt([<<"access_createnode">>, <<"mod_pubsub">>|_], V) -> + [{access_createnode, b2a(V)}]; +module_opt([<<"max_items_node">>, <<"mod_pubsub">>|_], V) -> + [{max_items_node, V}]; +module_opt([<<"max_subscriptions_node">>, <<"mod_pubsub">>|_], <<"infinity">>) -> + []; +module_opt([<<"max_subscriptions_node">>, <<"mod_pubsub">>|_], V) -> + [{max_subscriptions_node, V}]; +module_opt([<<"nodetree">>, <<"mod_pubsub">>|_], V) -> + [{nodetree, V}]; +module_opt([<<"ignore_pep_from_offline">>, <<"mod_pubsub">>|_], V) -> + [{ignore_pep_from_offline, V}]; +module_opt([<<"last_item_cache">>, <<"mod_pubsub">>|_], false) -> + [{last_item_cache, false}]; +module_opt([<<"last_item_cache">>, <<"mod_pubsub">>|_], V) -> + [{last_item_cache, b2a(V)}]; +module_opt([<<"plugins">>, <<"mod_pubsub">>|_] = Path, V) -> + Plugs = parse_list(Path, V), + [{plugins, Plugs}]; +module_opt([<<"pep_mapping">>, <<"mod_pubsub">>|_] = Path, V) -> + Mappings = parse_list(Path, V), + [{pep_mapping, Mappings}]; +module_opt([<<"default_node_config">>, <<"mod_pubsub">>|_] = Path, V) -> + Config = parse_section(Path, V), + [{default_node_config, Config}]; +module_opt([<<"item_publisher">>, <<"mod_pubsub">>|_], V) -> + [{item_publisher, V}]; +module_opt([<<"sync_broadcast">>, <<"mod_pubsub">>|_], V) -> + [{sync_broadcast, V}]; +module_opt([<<"pool_name">>, <<"mod_push_service_mongoosepush">>|_], V) -> + [{pool_name, b2a(V)}]; +module_opt([<<"api_version">>, <<"mod_push_service_mongoosepush">>|_], V) -> + [{api_version, b2l(V)}]; +module_opt([<<"max_http_connections">>, <<"mod_push_service_mongoosepush">>|_], V) -> + [{max_http_connections, V}]; +module_opt([<<"access">>, <<"mod_register">>|_], V) -> + [{access, b2a(V)}]; +module_opt([<<"registration_watchers">>, <<"mod_register">>|_] = Path, V) -> + [{registration_watchers, parse_list(Path, V)}]; +module_opt([<<"password_strength">>, <<"mod_register">>|_], V) -> + [{password_strength, V}]; +module_opt([<<"ip_access">>, <<"mod_register">>|_] = Path, V) -> + Rules = parse_list(Path, V), + [{ip_access, Rules}]; +module_opt([<<"welcome_message">>, <<"mod_register">>|_] = Path, V) -> + Props = parse_section(Path, V), + Subject = proplists:get_value(subject, Props, ""), + Body = proplists:get_value(body, Props, ""), + [{welcome_message, {Subject, Body}}]; +module_opt([<<"routes">>, <<"mod_revproxy">>|_] = Path, V) -> + Routes = parse_list(Path, V), + [{routes, Routes}]; +module_opt([<<"versioning">>, <<"mod_roster">>|_], V) -> + [{versioning, V}]; +module_opt([<<"store_current_id">>, <<"mod_roster">>|_], V) -> + [{store_current_id, V}]; +module_opt([<<"ldap_useruid">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_useruid, b2l(V)}]; +module_opt([<<"ldap_groupattr">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_groupattr, b2l(V)}]; +module_opt([<<"ldap_groupdesc">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_groupdesc, b2l(V)}]; +module_opt([<<"ldap_userdesc">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_userdesc, b2l(V)}]; +module_opt([<<"ldap_userid">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_userid, b2l(V)}]; +module_opt([<<"ldap_memberattr">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_memberattr, b2l(V)}]; +module_opt([<<"ldap_memberattr_format">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_memberattr_format, b2l(V)}]; +module_opt([<<"ldap_memberattr_format_re">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_memberattr_format_re, b2l(V)}]; +module_opt([<<"ldap_auth_check">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_auth_check, V}]; +module_opt([<<"ldap_user_cache_validity">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_user_cache_validity, V}]; +module_opt([<<"ldap_group_cache_validity">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_group_cache_validity, V}]; +module_opt([<<"ldap_user_cache_size">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_user_cache_size, V}]; +module_opt([<<"ldap_group_cache_size">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_group_cache_size, V}]; +module_opt([<<"ldap_rfilter">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_rfilter, b2l(V)}]; +module_opt([<<"ldap_gfilter">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_gfilter, b2l(V)}]; +module_opt([<<"ldap_ufilter">>, <<"mod_shared_roster_ldap">>|_], V) -> + [{ldap_ufilter, b2l(V)}]; +module_opt([<<"buffer_max">>, <<"mod_stream_management">>|_], <<"no_buffer">>) -> + [{buffer_max, no_buffer}]; +module_opt([<<"buffer_max">>, <<"mod_stream_management">>|_], V) -> + [{buffer_max, int_or_infinity(V)}]; +module_opt([<<"ack_freq">>, <<"mod_stream_management">>|_], <<"never">>) -> + [{ack_freq, never}]; +module_opt([<<"ack_freq">>, <<"mod_stream_management">>|_], V) -> + [{ack_freq, V}]; +module_opt([<<"resume_timeout">>, <<"mod_stream_management">>|_], V) -> + [{resume_timeout, V}]; +module_opt([<<"stale_h">>, <<"mod_stream_management">>|_] = Path, V) -> + Stale = parse_section(Path, V), + [{stale_h, Stale}]; +module_opt([<<"host">>, <<"mod_vcard">>|_], V) -> + [{host, b2l(V)}]; +module_opt([<<"search">>, <<"mod_vcard">>|_], V) -> + [{search, V}]; +module_opt([<<"matches">>, <<"mod_vcard">>|_], V) -> + [{matches, int_or_infinity(V)}]; +module_opt([<<"ldap_vcard_map">>, <<"mod_vcard">>|_] = Path, V) -> + Maps = parse_list(Path, V), + [{ldap_vcard_map, Maps}]; +module_opt([<<"ldap_uids">>, <<"mod_vcard">>|_] = Path, V) -> + List = parse_list(Path, V), + [{ldap_uids, List}]; +module_opt([<<"ldap_search_fields">>, <<"mod_vcard">>|_] = Path, V) -> + Fields = parse_list(Path, V), + [{ldap_search_fields, Fields}]; +module_opt([<<"ldap_search_reported">>, <<"mod_vcard">>|_] = Path, V) -> + Reported = parse_list(Path, V), + [{ldap_search_reported, Reported}]; +module_opt([<<"ldap_search_operator">>, <<"mod_vcard">>|_], V) -> + [{ldap_search_operator, b2a(V)}]; +module_opt([<<"ldap_binary_search_fields">>, <<"mod_vcard">>|_] = Path, V) -> + List = parse_list(Path, V), + [{ldap_binary_search_fields, List}]; +module_opt([<<"os_info">>, <<"mod_version">>|_], V) -> + [{os_info, V}]; +% General options +module_opt([<<"iqdisc">>|_], V) -> + {Type, Opts} = maps:take(<<"type">>, V), + [{iqdisc, iqdisc_value(b2a(Type), Opts)}]; +module_opt([<<"backend">>|_], V) -> + [{backend, b2a(V)}]; +%% LDAP-specific options +module_opt([<<"ldap_pool_tag">>|_], V) -> + [{ldap_pool_tag, b2a(V)}]; +module_opt([<<"ldap_base">>|_], V) -> + [{ldap_base, b2l(V)}]; +module_opt([<<"ldap_filter">>|_], V) -> + [{ldap_filter, b2l(V)}]; +module_opt([<<"ldap_deref">>|_], V) -> + [{ldap_deref, b2a(V)}]; +%% Backend-specific options +module_opt([<<"riak">>|_] = Path, V) -> + parse_section(Path, V). + +%% path: (host_config[].)modules.*.riak.* +-spec riak_opts(path(), toml_section()) -> [option()]. +riak_opts([<<"defaults_bucket_type">>|_], V) -> + [{defaults_bucket_type, V}]; +riak_opts([<<"names_bucket_type">>|_], V) -> + [{names_bucket_type, V}]; +riak_opts([<<"version_bucket_type">>|_], V) -> + [{version_bucket_type, V}]; +riak_opts([<<"bucket_type">>|_], V) -> + [{bucket_type, V}]; +riak_opts([<<"search_index">>|_], V) -> + [{search_index, V}]. + +-spec mod_register_ip_access_rule(path(), toml_section()) -> [option()]. +mod_register_ip_access_rule(_, #{<<"address">> := Addr, <<"policy">> := Policy}) -> + [{b2a(Policy), b2l(Addr)}]. + +-spec mod_auth_token_validity_periods(path(), toml_section()) -> [option()]. +mod_auth_token_validity_periods(_, + #{<<"token">> := Token, <<"value">> := Value, <<"unit">> := Unit}) -> + [{{validity_period, b2a(Token)}, {Value, b2a(Unit)}}]. + +-spec mod_disco_server_info(path(), toml_section()) -> [option()]. +mod_disco_server_info(Path, #{<<"module">> := <<"all">>, <<"name">> := Name, <<"urls">> := Urls}) -> + URLList = parse_list([<<"urls">> | Path], Urls), + [{all, b2l(Name), URLList}]; +mod_disco_server_info(Path, #{<<"module">> := Modules, <<"name">> := Name, <<"urls">> := Urls}) -> + Mods = parse_list([<<"module">> | Path], Modules), + URLList = parse_list([<<"urls">> | Path], Urls), + [{Mods, b2l(Name), URLList}]. + +-spec mod_event_pusher_backend_sns(path(), toml_section()) -> [option()]. +mod_event_pusher_backend_sns(Path, Opts) -> + SnsOpts = parse_section(Path, Opts), + [{sns, SnsOpts}]. + +-spec mod_event_pusher_backend_push(path(), toml_section()) -> [option()]. +mod_event_pusher_backend_push(Path, Opts) -> + PushOpts = parse_section(Path, Opts), + [{push, PushOpts}]. + +-spec mod_event_pusher_backend_http(path(), toml_section()) -> [option()]. +mod_event_pusher_backend_http(Path, Opts) -> + HttpOpts = parse_section(Path, Opts), + [{http, HttpOpts}]. + +-spec mod_event_pusher_backend_rabbit(path(), toml_section()) -> [option()]. +mod_event_pusher_backend_rabbit(Path, Opts) -> + ROpts = parse_section(Path, Opts), + [{rabbit, ROpts}]. + +-spec mod_event_pusher_backend_sns_opts(path(), toml_value()) -> [option()]. +mod_event_pusher_backend_sns_opts([<<"presence_updates_topic">>|_], V) -> + [{presence_updates_topic, b2l(V)}]; +mod_event_pusher_backend_sns_opts([<<"pm_messages_topic">>|_], V) -> + [{pm_messages_topic, b2l(V)}]; +mod_event_pusher_backend_sns_opts([<<"muc_messages_topic">>|_], V) -> + [{muc_messages_topic, b2l(V)}]; +mod_event_pusher_backend_sns_opts([<<"plugin_module">>|_], V) -> + [{plugin_module, b2a(V)}]; +mod_event_pusher_backend_sns_opts([<<"muc_host">>|_], V) -> + [{muc_host, b2l(V)}]; +mod_event_pusher_backend_sns_opts([<<"sns_host">>|_], V) -> + [{sns_host, b2l(V)}]; +mod_event_pusher_backend_sns_opts([<<"region">>|_], V) -> + [{region, b2l(V)}]; +mod_event_pusher_backend_sns_opts([<<"access_key_id">>|_], V) -> + [{access_key_id, b2l(V)}]; +mod_event_pusher_backend_sns_opts([<<"secret_access_key">>|_], V) -> + [{secret_access_key, b2l(V)}]; +mod_event_pusher_backend_sns_opts([<<"account_id">>|_], V) -> + [{account_id, b2l(V)}]; +mod_event_pusher_backend_sns_opts([<<"pool_size">>|_], V) -> + [{pool_size, V}]; +mod_event_pusher_backend_sns_opts([<<"publish_retry_count">>|_], V) -> + [{publish_retry_count, V}]; +mod_event_pusher_backend_sns_opts([<<"publish_retry_time_ms">>|_], V) -> + [{publish_retry_time_ms, V}]. + +-spec mod_event_pusher_backend_push_opts(path(), toml_value()) -> [option()]. +mod_event_pusher_backend_push_opts([<<"backend">>|_], V) -> + [{backend, b2a(V)}]; +mod_event_pusher_backend_push_opts([<<"wpool">>|_] = Path, V) -> + WpoolOpts = parse_section(Path, V), + [{wpool, WpoolOpts}]; +mod_event_pusher_backend_push_opts([<<"plugin_module">>|_], V) -> + [{plugin_module, b2a(V)}]; +mod_event_pusher_backend_push_opts([<<"virtual_pubsub_hosts">> |_] = Path, V) -> + VPH = parse_list(Path, V), + [{virtual_pubsub_hosts, VPH}]. + +-spec mod_event_pusher_backend_http_opts(path(), toml_value()) -> [option()]. +mod_event_pusher_backend_http_opts([<<"pool_name">>|_], V) -> + [{pool_name, b2a(V)}]; +mod_event_pusher_backend_http_opts([<<"path">>|_], V) -> + [{path, b2l(V)}]; +mod_event_pusher_backend_http_opts([<<"callback_module">>|_], V) -> + [{callback_module, b2a(V)}]. + +-spec mod_event_pusher_backend_rabbit_opts(path(), toml_value()) -> [option()]. +mod_event_pusher_backend_rabbit_opts([<<"presence_exchange">>|_] = Path, V) -> + [{presence_exchange, parse_section(Path, V)}]; +mod_event_pusher_backend_rabbit_opts([<<"chat_msg_exchange">>|_] = Path, V) -> + [{chat_msg_exchange, parse_section(Path, V)}]; +mod_event_pusher_backend_rabbit_opts([<<"groupchat_msg_exchange">>|_] = Path, V) -> + [{groupchat_msg_exchange, parse_section(Path, V)}]. + +-spec mod_event_pusher_rabbit_presence_ex(path(), toml_value()) -> [option()]. +mod_event_pusher_rabbit_presence_ex([<<"name">>|_], V) -> + [{name, V}]; +mod_event_pusher_rabbit_presence_ex([<<"type">>|_], V) -> + [{type, V}]. + +-spec mod_event_pusher_rabbit_msg_ex(path(), toml_value()) -> [option()]. +mod_event_pusher_rabbit_msg_ex([<<"name">>|_], V) -> + [{name, V}]; +mod_event_pusher_rabbit_msg_ex([<<"type">>|_], V) -> + [{type, V}]; +mod_event_pusher_rabbit_msg_ex([<<"sent_topic">>|_], V) -> + [{sent_topic, V}]; +mod_event_pusher_rabbit_msg_ex([<<"recv_topic">>|_], V) -> + [{recv_topic, V}]. + +-spec mod_extdisco_service(path(), toml_value()) -> [option()]. +mod_extdisco_service([_, <<"service">>|_] = Path, V) -> + [parse_section(Path, V)]; +mod_extdisco_service([<<"type">>|_], V) -> + [{type, b2a(V)}]; +mod_extdisco_service([<<"host">>|_], V) -> + [{host, b2l(V)}]; +mod_extdisco_service([<<"port">>|_], V) -> + [{port, V}]; +mod_extdisco_service([<<"transport">>|_], V) -> + [{transport, b2l(V)}]; +mod_extdisco_service([<<"username">>|_], V) -> + [{username, b2l(V)}]; +mod_extdisco_service([<<"password">>|_], V) -> + [{password, b2l(V)}]. + +-spec mod_http_upload_s3(path(), toml_value()) -> [option()]. +mod_http_upload_s3([<<"bucket_url">>|_], V) -> + [{bucket_url, b2l(V)}]; +mod_http_upload_s3([<<"add_acl">>|_], V) -> + [{add_acl, V}]; +mod_http_upload_s3([<<"region">>|_], V) -> + [{region, b2l(V)}]; +mod_http_upload_s3([<<"access_key_id">>|_], V) -> + [{access_key_id, b2l(V)}]; +mod_http_upload_s3([<<"secret_access_key">>|_], V) -> + [{secret_access_key, b2l(V)}]. + +-spec mod_global_distrib_connections(path(), toml_value()) -> [option()]. +mod_global_distrib_connections([<<"endpoints">>|_] = Path, V) -> + Endpoints = parse_list(Path, V), + [{endpoints, Endpoints}]; +mod_global_distrib_connections([<<"advertised_endpoints">>|_], false) -> + [{advertised_endpoints, false}]; +mod_global_distrib_connections([<<"advertised_endpoints">>|_] = Path, V) -> + Endpoints = parse_list(Path, V), + [{advertised_endpoints, Endpoints}]; +mod_global_distrib_connections([<<"connections_per_endpoint">>|_], V) -> + [{connections_per_endpoint, V}]; +mod_global_distrib_connections([<<"endpoint_refresh_interval">>|_], V) -> + [{endpoint_refresh_interval, V}]; +mod_global_distrib_connections([<<"endpoint_refresh_interval_when_empty">>|_], V) -> + [{endpoint_refresh_interval_when_empty, V}]; +mod_global_distrib_connections([<<"disabled_gc_interval">>|_], V) -> + [{disabled_gc_interval, V}]; +mod_global_distrib_connections([<<"tls">>|_] = _Path, false) -> + [{tls_opts, false}]; +mod_global_distrib_connections([<<"tls">>|_] = Path, V) -> + TLSOpts = parse_section(Path, V), + [{tls_opts, TLSOpts}]. + +-spec mod_global_distrib_cache(path(), toml_value()) -> [option()]. +mod_global_distrib_cache([<<"cache_missed">>|_], V) -> + [{cache_missed, V}]; +mod_global_distrib_cache([<<"domain_lifetime_seconds">>|_], V) -> + [{domain_lifetime_seconds, V}]; +mod_global_distrib_cache([<<"jid_lifetime_seconds">>|_], V) -> + [{jid_lifetime_seconds, V}]; +mod_global_distrib_cache([<<"max_jids">>|_], V) -> + [{max_jids, V}]. + +-spec mod_global_distrib_redis(path(), toml_value()) -> [option()]. +mod_global_distrib_redis([<<"pool">>|_], V) -> + [{pool, b2a(V)}]; +mod_global_distrib_redis([<<"expire_after">>|_], V) -> + [{expire_after, V}]; +mod_global_distrib_redis([<<"refresh_after">>|_], V) -> + [{refresh_after, V}]. + +-spec mod_global_distrib_bounce(path(), toml_value()) -> [option()]. +mod_global_distrib_bounce([<<"resend_after_ms">>|_], V) -> + [{resend_after_ms, V}]; +mod_global_distrib_bounce([<<"max_retries">>|_], V) -> + [{max_retries, V}]. + +-spec mod_global_distrib_connections_endpoints(path(), toml_section()) -> [option()]. +mod_global_distrib_connections_endpoints(_, #{<<"host">> := Host, <<"port">> := Port}) -> + [{b2l(Host), Port}]. + +-spec mod_global_distrib_connections_advertised_endpoints(path(), toml_section()) -> [option()]. +mod_global_distrib_connections_advertised_endpoints(_, #{<<"host">> := Host, <<"port">> := Port}) -> + [{b2l(Host), Port}]. + +-spec mod_keystore_keys(path(), toml_section()) -> [option()]. +mod_keystore_keys(_, #{<<"name">> := Name, <<"type">> := <<"ram">>}) -> + [{b2a(Name), ram}]; +mod_keystore_keys(_, #{<<"name">> := Name, <<"type">> := <<"file">>, <<"path">> := Path}) -> + [{b2a(Name), {file, b2l(Path)}}]. + +-spec mod_mam_opts(path(), toml_value()) -> [option()]. +mod_mam_opts([<<"backend">>|_], V) -> + [{backend, b2a(V)}]; +mod_mam_opts([<<"no_stanzaid_element">>|_], V) -> + [{no_stanzaid_element, V}]; +mod_mam_opts([<<"is_archivable_message">>|_], V) -> + [{is_archivable_message, b2a(V)}]; +mod_mam_opts([<<"message_retraction">>|_], V) -> + [{message_retraction, V}]; +mod_mam_opts([<<"user_prefs_store">>|_], false) -> + [{user_prefs_store, false}]; +mod_mam_opts([<<"user_prefs_store">>|_], V) -> + [{user_prefs_store, b2a(V)}]; +mod_mam_opts([<<"full_text_search">>|_], V) -> + [{full_text_search, V}]; +mod_mam_opts([<<"cache_users">>|_], V) -> + [{cache_users, V}]; +mod_mam_opts([<<"rdbms_message_format">>|_], V) -> + [{rdbms_message_format, b2a(V)}]; +mod_mam_opts([<<"async_writer">>|_], V) -> + [{async_writer, V}]; +mod_mam_opts([<<"flush_interval">>|_], V) -> + [{flush_interval, V}]; +mod_mam_opts([<<"max_batch_size">>|_], V) -> + [{max_batch_size, V}]; +mod_mam_opts([<<"default_result_limit">>|_], V) -> + [{default_result_limit, V}]; +mod_mam_opts([<<"max_result_limit">>|_], V) -> + [{max_result_limit, V}]; +mod_mam_opts([<<"archive_chat_markers">>|_], V) -> + [{archive_chat_markers, V}]; +mod_mam_opts([<<"archive_groupchats">>|_], V) -> + [{archive_groupchats, V}]; +mod_mam_opts([<<"async_writer_rdbms_pool">>|_], V) -> + [{async_writer_rdbms_pool, b2a(V)}]; +mod_mam_opts([<<"db_jid_format">>|_], V) -> + [{db_jid_format, b2a(V)}]; +mod_mam_opts([<<"db_message_format">>|_], V) -> + [{db_message_format, b2a(V)}]; +mod_mam_opts([<<"simple">>|_], V) -> + [{simple, V}]; +mod_mam_opts([<<"host">>|_], V) -> + [{host, b2l(V)}]; +mod_mam_opts([<<"extra_lookup_params">>|_], V) -> + [{extra_lookup_params, b2a(V)}]; +mod_mam_opts([<<"riak">>|_] = Path, V) -> + parse_section(Path, V). + +-spec mod_muc_default_room(path(), toml_value()) -> [option()]. +mod_muc_default_room([<<"title">>|_], V) -> + [{title, V}]; +mod_muc_default_room([<<"description">>|_], V) -> + [{description, V}]; +mod_muc_default_room([<<"allow_change_subj">>|_], V) -> + [{allow_change_subj, V}]; +mod_muc_default_room([<<"allow_query_users">>|_], V) -> + [{allow_query_users, V}]; +mod_muc_default_room([<<"allow_private_messages">>|_], V) -> + [{allow_private_messages, V}]; +mod_muc_default_room([<<"allow_visitor_status">>|_], V) -> + [{allow_visitor_status, V}]; +mod_muc_default_room([<<"allow_visitor_nickchange">>|_], V) -> + [{allow_visitor_nickchange, V}]; +mod_muc_default_room([<<"public">>|_], V) -> + [{public, V}]; +mod_muc_default_room([<<"public_list">>|_], V) -> + [{public_list, V}]; +mod_muc_default_room([<<"persistent">>|_], V) -> + [{persistent, V}]; +mod_muc_default_room([<<"moderated">>|_], V) -> + [{moderated, V}]; +mod_muc_default_room([<<"members_by_default">>|_], V) -> + [{members_by_default, V}]; +mod_muc_default_room([<<"members_only">>|_], V) -> + [{members_only, V}]; +mod_muc_default_room([<<"allow_user_invites">>|_], V) -> + [{allow_user_invites, V}]; +mod_muc_default_room([<<"allow_multiple_sessions">>|_], V) -> + [{allow_multiple_sessions, V}]; +mod_muc_default_room([<<"password_protected">>|_], V) -> + [{password_protected, V}]; +mod_muc_default_room([<<"password">>|_], V) -> + [{password, V}]; +mod_muc_default_room([<<"anonymous">>|_], V) -> + [{anonymous, V}]; +mod_muc_default_room([<<"max_users">>|_], V) -> + [{max_users, V}]; +mod_muc_default_room([<<"logging">>|_], V) -> + [{logging, V}]; +mod_muc_default_room([<<"maygetmemberlist">>|_] = Path, V) -> + List = parse_list(Path, V), + [{maygetmemberlist, List}]; +mod_muc_default_room([<<"affiliations">>|_] = Path, V) -> + Affs = parse_list(Path, V), + [{affiliations, Affs}]; +mod_muc_default_room([<<"subject">>|_], V) -> + [{subject, V}]; +mod_muc_default_room([<<"subject_author">>|_], V) -> + [{subject_author, V}]. + +-spec mod_muc_default_room_affiliations(path(), toml_section()) -> [option()]. +mod_muc_default_room_affiliations(_, #{<<"user">> := User, <<"server">> := Server, + <<"resource">> := Resource, <<"affiliation">> := Aff}) -> + [{{User, Server, Resource}, b2a(Aff)}]. + +-spec mod_muc_log_top_link(path(), toml_value()) -> [option()]. +mod_muc_log_top_link([<<"target">>|_], V) -> + [b2l(V)]; +mod_muc_log_top_link([<<"text">>|_], V) -> + [b2l(V)]. + +-spec mod_muc_light_config_schema(path(), toml_section()) -> [option()]. +mod_muc_light_config_schema(_, #{<<"field">> := Field, <<"value">> := Val, + <<"internal_key">> := Key, <<"type">> := Type}) -> + [{b2l(Field), Val, b2a(Key), b2a(Type)}]; + mod_muc_light_config_schema(_, #{<<"field">> := Field, <<"value">> := Val}) -> + [{b2l(Field), b2l(Val)}]. + +-spec mod_pubsub_pep_mapping(path(), toml_section()) -> [option()]. +mod_pubsub_pep_mapping(_, #{<<"namespace">> := Name, <<"node">> := Node}) -> + [{b2l(Name), b2l(Node)}]. + +-spec mod_pubsub_default_node_config(path(), toml_section()) -> [option()]. +mod_pubsub_default_node_config([<<"access_model">>|_], Value) -> + [{access_model, b2a(Value)}]; +mod_pubsub_default_node_config([<<"deliver_notifications">>|_], Value) -> + [{deliver_notifications, Value}]; +mod_pubsub_default_node_config([<<"deliver_payloads">>|_], Value) -> + [{deliver_payloads, Value}]; +mod_pubsub_default_node_config([<<"max_items">>|_], Value) -> + [{max_items, Value}]; +mod_pubsub_default_node_config([<<"max_payload_size">>|_], Value) -> + [{max_payload_size, Value}]; +mod_pubsub_default_node_config([<<"node_type">>|_], Value) -> + [{node_type, b2a(Value)}]; +mod_pubsub_default_node_config([<<"notification_type">>|_], Value) -> + [{notification_type, b2a(Value)}]; +mod_pubsub_default_node_config([<<"notify_config">>|_], Value) -> + [{notify_config, Value}]; +mod_pubsub_default_node_config([<<"notify_delete">>|_], Value) -> + [{notify_delete, Value}]; +mod_pubsub_default_node_config([<<"notify_retract">>|_], Value) -> + [{notify_retract, Value}]; +mod_pubsub_default_node_config([<<"persist_items">>|_], Value) -> + [{persist_items, Value}]; +mod_pubsub_default_node_config([<<"presence_based_delivery">>|_], Value) -> + [{presence_based_delivery, Value}]; +mod_pubsub_default_node_config([<<"publish_model">>|_], Value) -> + [{publish_model, b2a(Value)}]; +mod_pubsub_default_node_config([<<"purge_offline">>|_], Value) -> + [{purge_offline, Value}]; +mod_pubsub_default_node_config([<<"roster_groups_allowed">>|_] = Path, Value) -> + Groups = parse_list(Path, Value), + [{roster_groups_allowed, Groups}]; +mod_pubsub_default_node_config([<<"send_last_published_item">>|_], Value) -> + [{send_last_published_item, b2a(Value)}]; +mod_pubsub_default_node_config([<<"subscribe">>|_], Value) -> + [{subscribe, Value}]. + +mod_pubsub_roster_groups_allowed(_, Value) -> + [Value]. + +-spec mod_revproxy_routes(path(), toml_section()) -> [option()]. +mod_revproxy_routes(_, #{<<"host">> := Host, <<"path">> := Path, <<"method">> := Method, + <<"upstream">> := Upstream}) -> + [{b2l(Host), b2l(Path), b2l(Method), b2l(Upstream)}]; +mod_revproxy_routes(_, #{<<"host">> := Host, <<"path">> := Path, <<"upstream">> := Upstream}) -> + [{b2l(Host), b2l(Path), b2l(Upstream)}]. + +-spec mod_stream_management_stale_h(path(), toml_value()) -> [option()]. +mod_stream_management_stale_h([<<"enabled">>|_], V) -> + [{enabled, V}]; +mod_stream_management_stale_h([<<"repeat_after">>|_], V) -> + [{stale_h_repeat_after, V}]; +mod_stream_management_stale_h([<<"geriatric">>|_], V) -> + [{stale_h_geriatric, V}]. + +-spec mod_vcard_ldap_uids(path(), toml_section()) -> [option()]. +mod_vcard_ldap_uids(_, #{<<"attr">> := Attr, <<"format">> := Format}) -> + [{b2l(Attr), b2l(Format)}]; +mod_vcard_ldap_uids(_, #{<<"attr">> := Attr}) -> + [b2l(Attr)]. + + +-spec mod_vcard_ldap_vcard_map(path(), toml_section()) -> [option()]. +mod_vcard_ldap_vcard_map(_, #{<<"vcard_field">> := VF, <<"ldap_pattern">> := LP, + <<"ldap_field">> := LF}) -> + [{VF, LP, [LF]}]. + +-spec mod_vcard_ldap_search_fields(path(), toml_section()) -> [option()]. +mod_vcard_ldap_search_fields(_, #{<<"search_field">> := SF, <<"ldap_field">> := LF}) -> + [{SF, LF}]. + +-spec mod_vcard_ldap_search_reported(path(), toml_section()) -> [option()]. +mod_vcard_ldap_search_reported(_, #{<<"search_field">> := SF, <<"vcard_field">> := VF}) -> + [{SF, VF}]. + +-spec mod_vcard_ldap_binary_search_fields(path(), toml_section()) -> [option()]. +mod_vcard_ldap_binary_search_fields(_, V) -> + [V]. + +-spec iqdisc_value(atom(), toml_section()) -> option(). +iqdisc_value(queues, #{<<"workers">> := Workers} = V) -> + limit_keys([<<"workers">>], V), + {queues, Workers}; +iqdisc_value(Type, V) -> + limit_keys([], V), + Type. + +-spec service_admin_extra_submods(path(), toml_value()) -> [option()]. +service_admin_extra_submods(_, V) -> + [b2a(V)]. + +welcome_message([<<"subject">>|_], Value) -> + [{subject, b2l(Value)}]; +welcome_message([<<"body">>|_], Value) -> + [{body, b2l(Value)}]. + +%% path: (host_config[].)shaper.* +-spec process_shaper(path(), toml_section()) -> [config()]. +process_shaper([Name, _|Path], #{<<"max_rate">> := MaxRate}) -> + [#config{key = {shaper, b2a(Name), host(Path)}, value = {maxrate, MaxRate}}]. + +%% path: (host_config[].)acl.* +-spec process_acl(path(), toml_value()) -> [config()]. +process_acl([item, ACLName, _|Path], Content) -> + [acl:to_record(host(Path), b2a(ACLName), acl_data(Content))]. + +-spec acl_data(toml_value()) -> option(). +acl_data(#{<<"match">> := <<"all">>}) -> all; +acl_data(#{<<"match">> := <<"none">>}) -> none; +acl_data(M) -> + {AclName, AclKeys} = find_acl(M, lists:sort(maps:keys(M)), acl_keys()), + list_to_tuple([AclName | lists:map(fun(K) -> maps:get(K, M) end, AclKeys)]). + +find_acl(M, SortedMapKeys, [{AclName, AclKeys}|Rest]) -> + case lists:sort(AclKeys) of + SortedMapKeys -> {AclName, AclKeys}; + _ -> find_acl(M, SortedMapKeys, Rest) + end. + +acl_keys() -> + [{user, [<<"user">>, <<"server">>]}, + {user, [<<"user">>]}, + {server, [<<"server">>]}, + {resource, [<<"resource">>]}, + {user_regexp, [<<"user_regexp">>, <<"server">>]}, + {node_regexp, [<<"user_regexp">>, <<"server_regexp">>]}, + {user_regexp, [<<"user_regexp">>]}, + {server_regexp, [<<"server_regexp">>]}, + {resource_regexp, [<<"resource_regexp">>]}, + {user_glob, [<<"user_glob">>, <<"server">>]}, + {node_glob, [<<"user_glob">>, <<"server_glob">>]}, + {user_glob, [<<"user_glob">>]}, + {server_glob, [<<"server_glob">>]}, + {resource_glob, [<<"resource_glob">>]} + ]. + +%% path: (host_config[].)access.* +-spec process_access_rule(path(), toml_value()) -> [config()]. +process_access_rule([Name, _|HostPath] = Path, Contents) -> + Rules = parse_list(Path, Contents), + [#config{key = {access, b2a(Name), host(HostPath)}, value = Rules}]. + +%% path: (host_config[].)access.*[] +-spec process_access_rule_item(path(), toml_section()) -> [option()]. +process_access_rule_item(_, #{<<"acl">> := ACL, <<"value">> := Value}) -> + [{access_rule_value(Value), b2a(ACL)}]. + +host([]) -> global; +host([{host, Host}, _]) -> Host. + +-spec access_rule_value(toml_value()) -> option(). +access_rule_value(B) when is_binary(B) -> b2a(B); +access_rule_value(V) -> V. + +%% path: (host_config[].)s2s.* +-spec process_s2s_option(path(), toml_value()) -> config_list(). +process_s2s_option([<<"dns">>|_] = Path, V) -> + [#local_config{key = s2s_dns_options, value = parse_section(Path, V)}]; +process_s2s_option([<<"outgoing">>|_] = Path, V) -> + parse_section(Path, V); +process_s2s_option([<<"use_starttls">>|_], V) -> + [#local_config{key = s2s_use_starttls, value = b2a(V)}]; +process_s2s_option([<<"certfile">>|_], V) -> + [#local_config{key = s2s_certfile, value = b2l(V)}]; +process_s2s_option([<<"default_policy">>|_], V) -> + ?HOST_F([#local_config{key = {s2s_default_policy, Host}, value = b2a(V)}]); +process_s2s_option([<<"host_policy">>|_] = Path, V) -> + parse_list(Path, V); +process_s2s_option([<<"address">>|_] = Path, V) -> + parse_list(Path, V); +process_s2s_option([<<"ciphers">>|_], V) -> + [#local_config{key = s2s_ciphers, value = b2l(V)}]; +process_s2s_option([<<"domain_certfile">>|_] = Path, V) -> + parse_list(Path, V); +process_s2s_option([<<"shared">>|_], V) -> + ?HOST_F([#local_config{key = {s2s_shared, Host}, value = V}]); +process_s2s_option([<<"max_retry_delay">>|_], V) -> + ?HOST_F([#local_config{key = {s2s_max_retry_delay, Host}, value = V}]). + +%% path: s2s.dns.* +-spec s2s_dns_opt(path(), toml_value()) -> [option()]. +s2s_dns_opt([<<"timeout">>|_], Value) -> [{timeout, Value}]; +s2s_dns_opt([<<"retries">>|_], Value) -> [{retries, Value}]. + +%% path: s2s.outgoing.* +-spec outgoing_s2s_opt(path(), toml_value()) -> [config()]. +outgoing_s2s_opt([<<"port">>|_], Value) -> + [#local_config{key = outgoing_s2s_port, value = Value}]; +outgoing_s2s_opt([<<"ip_versions">>|_] = Path, Value) -> + [#local_config{key = outgoing_s2s_families, value = parse_list(Path, Value)}]; +outgoing_s2s_opt([<<"connection_timeout">>|_], Value) -> + [#local_config{key = outgoing_s2s_timeout, value = int_or_infinity(Value)}]. + +%% path: s2s.outgoing.ip_versions[] +-spec s2s_address_family(path(), toml_value()) -> [option()]. +s2s_address_family(_, 4) -> [ipv4]; +s2s_address_family(_, 6) -> [ipv6]. + +%% path: s2s.host_policy[] +-spec s2s_host_policy(path(), toml_section()) -> config_list(). +s2s_host_policy(Path, M) -> + Opts = parse_section(Path, M), + {_, S2SHost} = proplists:lookup(host, Opts), + {_, Policy} = proplists:lookup(policy, Opts), + ?HOST_F([#local_config{key = {{s2s_host, S2SHost}, Host}, value = Policy}]). + +%% path: s2s.host_policy[].* +-spec s2s_host_policy_opt(path(), toml_value()) -> [option()]. +s2s_host_policy_opt([<<"host">>|_], V) -> [{host, V}]; +s2s_host_policy_opt([<<"policy">>|_], V) -> [{policy, b2a(V)}]. + +%% path: s2s.address[] +-spec s2s_address(path(), toml_section()) -> [config()]. +s2s_address(Path, M) -> + Opts = parse_section(Path, M), + {_, Host} = proplists:lookup(host, Opts), + {_, IPAddress} = proplists:lookup(ip_address, Opts), + Addr = case proplists:lookup(port, Opts) of + {_, Port} -> {IPAddress, Port}; + none -> IPAddress + end, + [#local_config{key = {s2s_addr, Host}, value = Addr}]. + +%% path: s2s.address[].* +-spec s2s_addr_opt(path(), toml_value()) -> [option()]. +s2s_addr_opt([<<"host">>|_], V) -> [{host, V}]; +s2s_addr_opt([<<"ip_address">>|_], V) -> [{ip_address, b2l(V)}]; +s2s_addr_opt([<<"port">>|_], V) -> [{port, V}]. + +%% path: s2s.domain_certfile[] +-spec s2s_domain_cert(path(), toml_section()) -> [config()]. +s2s_domain_cert(_, #{<<"domain">> := Dom, <<"certfile">> := Cert}) -> + [#local_config{key = {domain_certfile, b2l(Dom)}, value = b2l(Cert)}]. + +%% path: host_config[] +-spec process_host_item(path(), toml_section()) -> config_list(). +process_host_item(Path, M) -> + {_Host, Sections} = maps:take(<<"host">>, M), + parse_section(Path, Sections). + +%% path: listen.http[].tls.*, +%% listen.c2s[].tls.*, +%% outgoing_pools.rdbms.connection.tls.*, +%% outgoing_pools.ldap.connection.tls.*, +%% outgoing_pools.riak.connection.tls.*, +%% outgoing_pools.cassandra.connection.tls.* +-spec tls_option(path(), toml_value()) -> [option()]. +tls_option([<<"verify_peer">>|_], V) -> [{verify, verify_peer(V)}]; +tls_option([<<"certfile">>|_], V) -> [{certfile, b2l(V)}]; +tls_option([<<"cacertfile">>|_], V) -> [{cacertfile, b2l(V)}]; +tls_option([<<"dhfile">>|_], V) -> [{dhfile, b2l(V)}]; +tls_option([<<"keyfile">>|_], V) -> [{keyfile, b2l(V)}]; +tls_option([<<"password">>|_], V) -> [{password, b2l(V)}]; +tls_option([<<"server_name_indication">>|_], false) -> [{server_name_indication, disable}]; +tls_option([<<"ciphers">>|_] = Path, L) -> [{ciphers, parse_list(Path, L)}]; +tls_option([<<"versions">>|_] = Path, L) -> [{versions, parse_list(Path, L)}]. + +%% path: listen.http[].tls.*, +%% listen.c2s[].tls.*,, +%% (host_config[].)modules.mod_global_distrib.connections.tls.* +-spec fast_tls_option(path(), toml_value()) -> [option()]. +fast_tls_option([<<"certfile">>|_], V) -> [{certfile, b2l(V)}]; +fast_tls_option([<<"cacertfile">>|_], V) -> [{cafile, b2l(V)}]; +fast_tls_option([<<"dhfile">>|_], V) -> [{dhfile, b2l(V)}]; +fast_tls_option([<<"ciphers">>|_], V) -> [{ciphers, b2l(V)}]. + +-spec verify_peer(boolean()) -> option(). +verify_peer(false) -> verify_none; +verify_peer(true) -> verify_peer. + +-spec tls_cipher(path(), toml_value()) -> [option()]. +tls_cipher(_, #{<<"key_exchange">> := KEx, + <<"cipher">> := Cipher, + <<"mac">> := MAC, + <<"prf">> := PRF}) -> + [#{key_exchange => b2a(KEx), cipher => b2a(Cipher), mac => b2a(MAC), prf => b2a(PRF)}]; +tls_cipher(_, Cipher) -> [b2l(Cipher)]. + +set_overrides(Overrides, State) -> + lists:foldl(fun({override, Scope}, CurrentState) -> + mongoose_config_parser:override(Scope, CurrentState) + end, State, Overrides). + +%% TODO replace with binary_to_existing_atom where possible, prevent atom leak +b2a(B) -> binary_to_atom(B, utf8). + +b2l(B) -> binary_to_list(B). + +int_or_infinity(I) when is_integer(I) -> I; +int_or_infinity(<<"infinity">>) -> infinity. + +-spec limit_keys([toml_key()], toml_section()) -> any(). +limit_keys(Keys, Section) -> + Section = maps:with(Keys, Section). + +-spec ensure_keys([toml_key()], toml_section()) -> any(). +ensure_keys(Keys, Section) -> + true = lists:all(fun(Key) -> maps:is_key(Key, Section) end, Keys). + +-spec parse_kv(path(), toml_key(), toml_section(), option()) -> option(). +parse_kv(Path, K, Section, Default) -> + Value = maps:get(K, Section, Default), + Key = key(K, Path, Value), + handle([Key|Path], Value). + +-spec parse_kv(path(), toml_key(), toml_section()) -> option(). +parse_kv(Path, K, Section) -> + #{K := Value} = Section, + Key = key(K, Path, Value), + handle([Key|Path], Value). + +-spec parse_section(path(), toml_section()) -> [option()]. +parse_section(Path, M) -> + lists:flatmap(fun({K, V}) -> + Key = key(K, Path, V), + handle([Key|Path], V) + end, lists:sort(maps:to_list(M))). + +-spec parse_list(path(), [toml_value()]) -> [option()]. +parse_list(Path, L) -> + lists:flatmap(fun(Elem) -> + Key = item_key(Path, Elem), + handle([Key|Path], Elem) + end, L). + +-spec handle(path(), toml_value()) -> option(). +handle(Path, Value) -> + Handler = handler(Path), + Option = try Handler(Path, Value) + catch error:Error:Stacktrace + when not is_map(Error); not is_map_key(path, Error) -> + E = #{what => toml_parse_failed, + path => Path, reason => Error}, + erlang:raise(error, E, Stacktrace) + end, + validate(Path, Option), + Option. + +validate(Path, Option) -> + try mongoose_config_validator_toml:validate(Path, Option) + catch error:Error:Stacktrace + when not is_map(Error); not is_map_key(path, Error) -> + E = #{what => toml_validate_failed, + path => Path, reason => Error}, + erlang:raise(error, E, Stacktrace) + end. + +-spec handler(path()) -> fun((path(), toml_value()) -> option()). +handler([_]) -> fun process_section/2; + +%% general +handler([_, <<"general">>]) -> fun process_general/2; +handler([_, <<"hosts">>, <<"general">>]) -> fun process_host/2; +handler([_, <<"override">>, <<"general">>]) -> fun process_override/2; +handler([_, <<"mongooseimctl_access_commands">>, <<"general">>]) -> fun ctl_access_rule/2; +handler([<<"commands">>, _, <<"mongooseimctl_access_commands">>, <<"general">>]) -> + fun ctl_access_commands/2; +handler([_, <<"commands">>, _, <<"mongooseimctl_access_commands">>, <<"general">>]) -> + fun(_, Val) -> [b2l(Val)] end; +handler([<<"argument_restrictions">>, _, <<"mongooseimctl_access_commands">>, <<"general">>]) -> + fun parse_section/2; +handler([_, <<"argument_restrictions">>, _, <<"mongooseimctl_access_commands">>, <<"general">>]) -> + fun ctl_access_arg_restriction/2; +handler([_, <<"routing_modules">>, <<"general">>]) -> + fun(_, Val) -> [b2a(Val)] end; + +%% listen +handler([_, <<"listen">>]) -> fun parse_list/2; +handler([_, _, <<"listen">>]) -> fun process_listener/2; +handler([_, _, <<"http">>, <<"listen">>]) -> fun http_listener_opt/2; +handler([_, _, <<"c2s">>, <<"listen">>]) -> fun c2s_listener_opt/2; +handler([_, _, <<"s2s">>, <<"listen">>]) -> fun s2s_listener_opt/2; +handler([_, <<"tls">>, _, <<"s2s">>, <<"listen">>]) -> fun s2s_tls_option/2; +handler([_, _, <<"service">>, <<"listen">>]) -> fun service_listener_opt/2; +handler([_, {tls, _}, _, <<"c2s">>, <<"listen">>]) -> fun c2s_tls_option/2; +handler([_, <<"versions">>, {tls, just_tls}, _, <<"c2s">>, <<"listen">>]) -> + fun(_, Val) -> [b2a(Val)] end; +handler([_, <<"ciphers">>, {tls, just_tls}, _, <<"c2s">>, <<"listen">>]) -> + fun tls_cipher/2; +handler([_, <<"crl_files">>, {tls, just_tls}, _, <<"c2s">>, <<"listen">>]) -> + fun(_, Val) -> [b2l(Val)] end; +handler([_, <<"protocol_options">>, _TLS, _, _, <<"listen">>]) -> + fun(_, Val) -> [b2l(Val)] end; +handler([_, <<"tls">>, _, <<"http">>, <<"listen">>]) -> fun https_option/2; +handler([_, <<"transport">>, _, <<"http">>, <<"listen">>]) -> fun cowboy_transport_opt/2; +handler([_, <<"protocol">>, _, <<"http">>, <<"listen">>]) -> fun cowboy_protocol_opt/2; +handler([_, <<"handlers">>, _, <<"http">>, <<"listen">>]) -> fun parse_list/2; +handler([_, _, <<"handlers">>, _, <<"http">>, <<"listen">>]) -> fun cowboy_module/2; +handler([_, _, <<"mongoose_api">>, <<"handlers">>, _, <<"http">>, <<"listen">>]) -> + fun mongoose_api_option/2; +handler([_, <<"handlers">>, _, <<"mongoose_api">>, <<"handlers">>, _, <<"http">>, <<"listen">>]) -> + fun(_, Val) -> [b2a(Val)] end; +handler([_, _, <<"mod_websockets">>, <<"handlers">>, _, <<"http">>, <<"listen">>]) -> + fun websockets_option/2; +handler([_, <<"service">>, _, <<"mod_websockets">>, <<"handlers">>, _, <<"http">>, <<"listen">>]) -> + fun service_listener_opt/2; + +%% auth +handler([_, <<"auth">>]) -> fun auth_option/2; +handler([_, <<"anonymous">>, <<"auth">>]) -> fun auth_anonymous_option/2; +handler([_, <<"ldap">>, <<"auth">>]) -> fun auth_ldap_option/2; +handler([_, <<"external">>, <<"auth">>]) -> fun auth_external_option/2; +handler([_, <<"http">>, <<"auth">>]) -> fun auth_http_option/2; +handler([_, <<"jwt">>, <<"auth">>]) -> fun auth_jwt_option/2; +handler([_, <<"secret">>, <<"jwt">>, <<"auth">>]) -> fun auth_jwt_secret/2; +handler([_, <<"riak">>, <<"auth">>]) -> fun auth_riak_option/2; +handler([_, <<"uids">>, <<"ldap">>, <<"auth">>]) -> fun auth_ldap_uids/2; +handler([_, <<"dn_filter">>, <<"ldap">>, <<"auth">>]) -> fun auth_ldap_dn_filter/2; +handler([_, <<"local_filter">>, <<"ldap">>, <<"auth">>]) -> fun auth_ldap_local_filter/2; +handler([_, <<"attributes">>, _, <<"ldap">>, <<"auth">>]) -> fun(_, V) -> [b2l(V)] end; +handler([_, <<"values">>, _, <<"ldap">>, <<"auth">>]) -> fun(_, V) -> [b2l(V)] end; +handler([_, <<"methods">>, <<"auth">>]) -> fun(_, Val) -> [b2a(Val)] end; +handler([_, <<"hash">>, <<"password">>, <<"auth">>]) -> fun(_, Val) -> [b2a(Val)] end; +handler([_, <<"sasl_external">>, <<"auth">>]) -> fun sasl_external/2; +handler([_, <<"sasl_mechanisms">>, <<"auth">>]) -> fun sasl_mechanism/2; + +%% outgoing_pools +handler([_, <<"outgoing_pools">>]) -> fun parse_section/2; +handler([_, _, <<"outgoing_pools">>]) -> fun process_pool/2; +handler([<<"connection">>, _, _, <<"outgoing_pools">>]) -> fun connection_options/2; +handler([{connection, _}, _, + <<"rdbms">>, <<"outgoing_pools">>]) -> fun connection_options/2; +handler([_, _, _, <<"outgoing_pools">>]) -> fun pool_option/2; +handler([_, {connection, odbc}, _, + <<"rdbms">>, <<"outgoing_pools">>]) -> fun odbc_option/2; +handler([_, {connection, _}, _, + <<"rdbms">>, <<"outgoing_pools">>]) -> fun sql_server_option/2; +handler([_, <<"connection">>, _, + <<"http">>, <<"outgoing_pools">>]) -> fun http_option/2; +handler([_, <<"connection">>, _, + <<"redis">>, <<"outgoing_pools">>]) -> fun redis_option/2; +handler([_, <<"connection">>, _, + <<"ldap">>, <<"outgoing_pools">>]) -> fun ldap_option/2; +handler([_, <<"servers">>, <<"connection">>, _, + <<"ldap">>, <<"outgoing_pools">>]) -> fun(_, V) -> [b2l(V)] end; +handler([_, <<"connection">>, _, + <<"riak">>, <<"outgoing_pools">>]) -> fun riak_option/2; +handler([_, <<"credentials">>, <<"connection">>, _, + <<"riak">>, <<"outgoing_pools">>]) -> fun riak_credentials/2; +handler([_, <<"connection">>, _, + <<"cassandra">>, <<"outgoing_pools">>]) -> fun cassandra_option/2; +handler([_, <<"auth">>, <<"connection">>, _, + <<"cassandra">>, <<"outgoing_pools">>]) -> fun cassandra_option/2; +handler([_, <<"servers">>, <<"connection">>, _, + <<"cassandra">>, <<"outgoing_pools">>]) -> fun cassandra_server/2; +handler([_, <<"connection">>, _, + <<"elastic">>, <<"outgoing_pools">>]) -> fun elastic_option/2; +handler([_, <<"connection">>, _, + <<"rabbit">>, <<"outgoing_pools">>]) -> fun rabbit_option/2; +handler([_, <<"tls">>, _, _, _, <<"outgoing_pools">>]) -> fun tls_option/2; +handler([_, <<"versions">>, <<"tls">>, _, _, _, <<"outgoing_pools">>]) -> + fun(_, Val) -> [b2a(Val)] end; +handler([_, <<"ciphers">>, <<"tls">>, _, _, _, <<"outgoing_pools">>]) -> + fun tls_cipher/2; + +%% services +handler([_, <<"services">>]) -> fun process_service/2; +handler([_, _, <<"services">>]) -> fun service_opt/2; + +%% modules +handler([_, <<"modules">>]) -> fun process_module/2; +handler([_, _, <<"modules">>]) -> fun module_opt/2; +handler([_, <<"riak">>, _, <<"modules">>]) -> + fun riak_opts/2; +handler([_, <<"ip_access">>, <<"mod_register">>, <<"modules">>]) -> + fun mod_register_ip_access_rule/2; +handler([_, <<"registration_watchers">>, <<"mod_register">>, <<"modules">>]) -> + fun(_, V) -> [V] end; +handler([_, <<"welcome_message">>, <<"mod_register">>, <<"modules">>]) -> + fun welcome_message/2; +handler([_, <<"validity_period">>, <<"mod_auth_token">>, <<"modules">>]) -> + fun mod_auth_token_validity_periods/2; +handler([_, <<"extra_domains">>, <<"mod_disco">>, <<"modules">>]) -> + fun(_, V) -> [V] end; +handler([_, <<"server_info">>, <<"mod_disco">>, <<"modules">>]) -> + fun mod_disco_server_info/2; +handler([_, <<"urls">>, _, <<"server_info">>, <<"mod_disco">>, <<"modules">>]) -> + fun(_, V) -> [b2l(V)] end; +handler([_, <<"module">>, _, <<"server_info">>, <<"mod_disco">>, <<"modules">>]) -> + fun(_, V) -> [b2a(V)] end; +handler([<<"sns">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_backend_sns/2; +handler([<<"push">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_backend_push/2; +handler([<<"http">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_backend_http/2; +handler([<<"rabbit">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_backend_rabbit/2; +handler([_, <<"sns">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_backend_sns_opts/2; +handler([_, <<"push">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_backend_push_opts/2; +handler([_, <<"http">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_backend_http_opts/2; +handler([_, <<"rabbit">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_backend_rabbit_opts/2; +handler([_,<<"wpool">>, <<"push">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun pool_option/2; +handler([_,<<"virtual_pubsub_hosts">>, <<"push">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun (_, V) -> [b2l(V)] end; +handler([_,<<"presence_exchange">>, <<"rabbit">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_rabbit_presence_ex/2; +handler([_,<<"chat_msg_exchange">>, <<"rabbit">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_rabbit_msg_ex/2; +handler([_,<<"groupchat_msg_exchange">>, <<"rabbit">>, <<"backend">>, <<"mod_event_pusher">>, <<"modules">>]) -> + fun mod_event_pusher_rabbit_msg_ex/2; +handler([_, <<"service">>, <<"mod_extdisco">>, <<"modules">>]) -> + fun mod_extdisco_service/2; +handler([_, _, <<"service">>, <<"mod_extdisco">>, <<"modules">>]) -> + fun mod_extdisco_service/2; +handler([_, <<"s3">>, <<"mod_http_upload">>, <<"modules">>]) -> + fun mod_http_upload_s3/2; +handler([_, <<"reset_markers">>, <<"mod_inbox">>, <<"modules">>]) -> + fun(_, V) -> [b2a(V)] end; +handler([_, <<"groupchat">>, <<"mod_inbox">>, <<"modules">>]) -> + fun(_, V) -> [b2a(V)] end; +handler([_, <<"connections">>, <<"mod_global_distrib">>, <<"modules">>]) -> + fun mod_global_distrib_connections/2; +handler([_, <<"cache">>, <<"mod_global_distrib">>, <<"modules">>]) -> + fun mod_global_distrib_cache/2; +handler([_, <<"bounce">>, <<"mod_global_distrib">>, <<"modules">>]) -> + fun mod_global_distrib_bounce/2; +handler([_, <<"redis">>, <<"mod_global_distrib">>, <<"modules">>]) -> + fun mod_global_distrib_redis/2; +handler([_,<<"endpoints">>, <<"connections">>, <<"mod_global_distrib">>, <<"modules">>]) -> + fun mod_global_distrib_connections_endpoints/2; +handler([_,<<"advertised_endpoints">>, <<"connections">>, <<"mod_global_distrib">>, <<"modules">>]) -> + fun mod_global_distrib_connections_advertised_endpoints/2; +handler([_,<<"tls">>, <<"connections">>, <<"mod_global_distrib">>, <<"modules">>]) -> + fun fast_tls_option/2; +handler([_, <<"keys">>, <<"mod_keystore">>, <<"modules">>]) -> + fun mod_keystore_keys/2; +handler([_, _, <<"mod_mam_meta">>, <<"modules">>]) -> + fun mod_mam_opts/2; +handler([_, <<"default_room">>, <<"mod_muc">>, <<"modules">>]) -> + fun mod_muc_default_room/2; +handler([_, <<"maygetmemberlist">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>]) -> + fun (_, V) -> [b2a(V)] end; +handler([_, <<"affiliations">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>]) -> + fun mod_muc_default_room_affiliations/2; +handler([_, <<"top_link">>, <<"mod_muc_log">>, <<"modules">>]) -> + fun mod_muc_log_top_link/2; +handler([_, <<"config_schema">>, <<"mod_muc_light">>, <<"modules">>]) -> + fun mod_muc_light_config_schema/2; +handler([_, <<"plugins">>, <<"mod_pubsub">>, <<"modules">>]) -> + fun(_, V) -> [V] end; +handler([_, <<"pep_mapping">>, <<"mod_pubsub">>, <<"modules">>]) -> + fun mod_pubsub_pep_mapping/2; +handler([_, <<"default_node_config">>, <<"mod_pubsub">>, <<"modules">>]) -> + fun mod_pubsub_default_node_config/2; +handler([_, <<"roster_groups_allowed">>, <<"default_node_config">>, <<"mod_pubsub">>, <<"modules">>]) -> + fun mod_pubsub_roster_groups_allowed/2; +handler([_, <<"routes">>, <<"mod_revproxy">>, <<"modules">>]) -> + fun mod_revproxy_routes/2; +handler([_, <<"stale_h">>, <<"mod_stream_management">>, <<"modules">>]) -> + fun mod_stream_management_stale_h/2; +handler([_, <<"ldap_uids">>, <<"mod_vcard">>, <<"modules">>]) -> + fun mod_vcard_ldap_uids/2; +handler([_, <<"ldap_vcard_map">>, <<"mod_vcard">>, <<"modules">>]) -> + fun mod_vcard_ldap_vcard_map/2; +handler([_, <<"ldap_search_fields">>, <<"mod_vcard">>, <<"modules">>]) -> + fun mod_vcard_ldap_search_fields/2; +handler([_, <<"ldap_search_reported">>, <<"mod_vcard">>, <<"modules">>]) -> + fun mod_vcard_ldap_search_reported/2; +handler([_, <<"ldap_binary_search_fields">>, <<"mod_vcard">>, <<"modules">>]) -> + fun mod_vcard_ldap_binary_search_fields/2; +handler([_, <<"submods">>, <<"service_admin_extra">>, <<"services">>]) -> + fun service_admin_extra_submods/2; + + +%% shaper, acl, access +handler([_, <<"shaper">>]) -> fun process_shaper/2; +handler([_, <<"acl">>]) -> fun parse_list/2; +handler([_, _, <<"acl">>]) -> fun process_acl/2; +handler([_, <<"access">>]) -> fun process_access_rule/2; +handler([_, _, <<"access">>]) -> fun process_access_rule_item/2; + +%% s2s +handler([_, <<"s2s">>]) -> fun process_s2s_option/2; +handler([_, <<"dns">>, <<"s2s">>]) -> fun s2s_dns_opt/2; +handler([_, <<"outgoing">>, <<"s2s">>]) -> fun outgoing_s2s_opt/2; +handler([_, <<"ip_versions">>, <<"outgoing">>, <<"s2s">>]) -> fun s2s_address_family/2; +handler([_, <<"host_policy">>, <<"s2s">>]) -> fun s2s_host_policy/2; +handler([_, _, <<"host_policy">>, <<"s2s">>]) -> fun s2s_host_policy_opt/2; +handler([_, <<"address">>, <<"s2s">>]) -> fun s2s_address/2; +handler([_, _, <<"address">>, <<"s2s">>]) -> fun s2s_addr_opt/2; +handler([_, <<"domain_certfile">>, <<"s2s">>]) -> fun s2s_domain_cert/2; + +%% host_config +handler([_, <<"host_config">>]) -> fun process_host_item/2; +handler([<<"auth">>, _, <<"host_config">>] = P) -> handler_for_host(P); +handler([<<"modules">>, _, <<"host_config">>] = P) -> handler_for_host(P); +handler([_, _, <<"host_config">>]) -> fun process_section/2; +handler([_, <<"general">>, _, <<"host_config">>] = P) -> handler_for_host(P); +handler([_, <<"s2s">>, _, <<"host_config">>] = P) -> handler_for_host(P); +handler(Path) -> + [<<"host_config">>, {host, _} | Rest] = lists:reverse(Path), + handler(lists:reverse(Rest)). + +%% 1. Strip host_config, choose the handler for the remaining path +%% 2. Wrap the handler in a fun that calls the resulting function F for the current host +-spec handler_for_host(path()) -> fun((path(), toml_value()) -> option()). +handler_for_host(Path) -> + [<<"host_config">>, {host, Host} | Rest] = lists:reverse(Path), + Handler = handler(lists:reverse(Rest)), + fun(PathArg, ValueArg) -> + ConfigFunctions = Handler(PathArg, ValueArg), + lists:flatmap(fun(F) -> F(Host) end, ConfigFunctions) + end. + +-spec key(toml_key(), path(), toml_value()) -> tuple() | toml_key(). +key(<<"tls">>, [item, <<"c2s">>, <<"listen">>], M) -> + %% store the tls module in path as both of them need different options + case maps:get(<<"module">>, M, <<"fast_tls">>) of + <<"just_tls">> -> {tls, just_tls}; + <<"fast_tls">> -> {tls, fast_tls} + end; +key(<<"connection">>, [_, <<"rdbms">>, <<"outgoing_pools">>], M) -> + %% store the db driver in path as 'odbc' and 'mysql'/'pgsql' need different options + Driver = maps:get(<<"driver">>, M), + {connection, b2a(Driver)}; +key(Key, _Path, _) -> Key. + +-spec item_key(path(), toml_value()) -> tuple() | item. +item_key([<<"host_config">>], #{<<"host">> := Host}) -> {host, Host}; +item_key(_, _) -> item. + +defined_or_false(Key, Opts) -> + case proplists:is_defined(Key, Opts) of + true -> + []; + false -> + [{Key, false}] + end ++ Opts. diff --git a/src/config/mongoose_config_reload.erl b/src/config/mongoose_config_reload.erl index de271657993..74f8c31e7ad 100644 --- a/src/config/mongoose_config_reload.erl +++ b/src/config/mongoose_config_reload.erl @@ -13,6 +13,10 @@ -export([states_to_reloading_context/1]). -export([context_to_failed_checks/1]). -export([context_to_changes_to_apply/1]). +-export([check_hosts/2]). +-export([can_be_ignored/1]). +-export([group_host_changes/1]). +-export([is_not_host_specific/1]). -include("mongoose.hrl"). -include("ejabberd_config.hrl"). @@ -40,7 +44,7 @@ mongoose_node => node(), config_file => string(), loaded_categorized_options => categorized_options(), - ondisc_config_terms => list(), + ondisc_config_state => state(), missing_files => list(file:filename()), required_files => list(file:filename())}. @@ -458,15 +462,14 @@ node_values(Key, NodeStates) -> %% mongoose_node => node(), %% config_file => string(), %% loaded_categorized_options => categorized_options(), -%% ondisc_config_terms => list() +%% ondisc_config_state => state() extend_node_states(NodeStates) -> lists:map(fun(NodeState) -> extend_node_state(NodeState) end, NodeStates). extend_node_state(NodeState=#{ loaded_categorized_options := LoadedCatOptions, - ondisc_config_terms := OndiscTerms}) -> - OndiscState = mongoose_config_parser:parse_terms(OndiscTerms), + ondisc_config_state := OndiscState}) -> OndiscCatOptions = state_to_categorized_options(OndiscState), NodeSpecificPatterns = mongoose_config_parser:state_to_global_opt(node_specific_options, OndiscState, []), LoadedFlatGlobalOptions = categorize_options_to_flat_global_config_opts(LoadedCatOptions), @@ -540,3 +543,29 @@ subtract_lists(List, Except) -> SetList = ordsets:from_list(List), SetExcept = ordsets:from_list(Except), ordsets:subtract(SetList, SetExcept). + +-spec check_hosts([jid:server()], [jid:server()]) -> + {[jid:server()], [jid:server()]}. +check_hosts(NewHosts, OldHosts) -> + Old = sets:from_list(OldHosts), + New = sets:from_list(NewHosts), + ListToAdd = sets:to_list(sets:subtract(New, Old)), + ListToDel = sets:to_list(sets:subtract(Old, New)), + {ListToDel, ListToAdd}. + +-spec can_be_ignored(Key :: atom() | tuple()) -> boolean(). +can_be_ignored(Key) when is_atom(Key); + is_tuple(Key) -> + L = [domain_certfile, s2s, all_metrics_are_global, rdbms], + lists:member(Key, L). + +-spec is_not_host_specific(atom() + | {atom(), jid:server()} + | {atom(), atom(), atom()}) -> boolean(). +is_not_host_specific(Key) when is_atom(Key) -> + true; +is_not_host_specific({Key, Host}) when is_atom(Key), is_binary(Host) -> + false; +is_not_host_specific({Key, PoolType, PoolName}) + when is_atom(Key), is_atom(PoolType), is_atom(PoolName) -> + true. diff --git a/src/config/mongoose_config_terms.erl b/src/config/mongoose_config_terms.erl new file mode 100644 index 00000000000..0f48ec249f8 --- /dev/null +++ b/src/config/mongoose_config_terms.erl @@ -0,0 +1,147 @@ +%%% @doc Parsing of the 'cfg' file to Erlang terms +-module(mongoose_config_terms). + +-export([get_plain_terms_file/1]). + +-include("mongoose.hrl"). +-include("ejabberd_config.hrl"). + +%% @doc Read an ejabberd configuration file and return the terms. +%% Input is an absolute or relative path to an ejabberd config file. +%% Returns a list of plain terms, +%% in which the options 'include_config_file' were parsed +%% and the terms in those files were included. +-spec get_plain_terms_file(string()) -> [term()]. +get_plain_terms_file(File1) -> + File = mongoose_config_utils:get_absolute_path(File1), + case file:consult(File) of + {ok, Terms} -> + include_config_files(Terms); + {error, {LineNumber, erl_parse, _ParseMessage} = Reason} -> + ExitText = describe_config_problem(File, Reason, LineNumber), + ?LOG_ERROR(#{what => ejabberd_config_file_loading_failed, + file => File, line => LineNumber, reason => Reason}), + mongoose_config_utils:exit_or_halt(ExitText); + {error, Reason} -> + ExitText = describe_config_problem(File, Reason), + ?LOG_ERROR(#{what => mim_config_file_loading_failed, + file => File, reason => Reason}), + mongoose_config_utils:exit_or_halt(ExitText) + end. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% Support for 'include_config_file' + +%% @doc Include additional configuration files in the list of terms. +-spec include_config_files([term()]) -> [term()]. +include_config_files(Terms) -> + Filenames = config_filenames_to_include(Terms), + Configs = lists:map(fun(Filename) -> + {Filename, get_plain_terms_file(Filename)} + end, Filenames), + include_config_files(Terms, Configs). + +config_filenames_to_include([{include_config_file, Filename} | Terms]) -> + [Filename|config_filenames_to_include(Terms)]; +config_filenames_to_include([{include_config_file, Filename, _Options} | Terms]) -> + [Filename|config_filenames_to_include(Terms)]; +config_filenames_to_include([_Other | Terms]) -> + config_filenames_to_include(Terms); +config_filenames_to_include([]) -> + []. + +include_config_files(Terms, Configs) -> + include_config_files(Terms, Configs, []). + +include_config_files([], _Configs, Res) -> + Res; +include_config_files([{include_config_file, Filename} | Terms], Configs, Res) -> + include_config_files([{include_config_file, Filename, []} | Terms], + Configs, Res); +include_config_files([{include_config_file, Filename, Options} | Terms], + Configs, Res) -> + IncludedTerms = find_plain_terms_for_file(Filename, Configs), + Disallow = proplists:get_value(disallow, Options, []), + IncludedTerms2 = delete_disallowed(Disallow, IncludedTerms), + AllowOnly = proplists:get_value(allow_only, Options, all), + IncludedTerms3 = keep_only_allowed(AllowOnly, IncludedTerms2), + include_config_files(Terms, Configs, Res ++ IncludedTerms3); +include_config_files([Term | Terms], Configs, Res) -> + include_config_files(Terms, Configs, Res ++ [Term]). + +find_plain_terms_for_file(Filename, Configs) -> + case lists:keyfind(Filename, 1, Configs) of + false -> + %% Terms were not provided by caller for this file + erlang:error({config_not_found, Filename}); + {Filename, Terms} -> + Terms + end. + +%% @doc Filter from the list of terms the disallowed. +%% Returns a sublist of Terms without the ones which first element is +%% included in Disallowed. +-spec delete_disallowed(Disallowed :: [atom()], + Terms :: [term()]) -> [term()]. +delete_disallowed(Disallowed, Terms) -> + lists:foldl( + fun(Dis, Ldis) -> + delete_disallowed2(Dis, Ldis) + end, + Terms, + Disallowed). + +delete_disallowed2(Disallowed, [H | T]) -> + case element(1, H) of + Disallowed -> + ?LOG_WARNING(#{what => ignore_disallowed_option, option => Disallowed}), + delete_disallowed2(Disallowed, T); + _ -> + [H | delete_disallowed2(Disallowed, T)] + end; +delete_disallowed2(_, []) -> + []. + +%% @doc Keep from the list only the allowed terms. +%% Returns a sublist of Terms with only the ones which first element is +%% included in Allowed. +-spec keep_only_allowed(Allowed :: [atom()], + Terms :: [term()]) -> [term()]. +keep_only_allowed(all, Terms) -> + Terms; +keep_only_allowed(Allowed, Terms) -> + {As, NAs} = lists:partition( + fun(Term) -> + lists:member(element(1, Term), Allowed) + end, + Terms), + [?LOG_WARNING(#{what => ignore_disallowed_option, option => NA}) + || NA <- NAs], + As. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% Errors reading the config file + +-type config_problem() :: atom() | {integer(), atom() | tuple(), _}. % spec me better + +-spec describe_config_problem(Filename :: string(), + Reason :: config_problem()) -> string(). +describe_config_problem(Filename, Reason) -> + Text1 = lists:flatten("Problem loading MongooseIM config file " ++ Filename), + Text2 = lists:flatten(" : " ++ file:format_error(Reason)), + ExitText = Text1 ++ Text2, + ExitText. + + +-spec describe_config_problem(Filename :: string(), + Reason :: config_problem(), + Line :: pos_integer()) -> string(). +describe_config_problem(Filename, Reason, LineNumber) -> + Text1 = lists:flatten("Problem loading ejabberd config file " ++ Filename), + Text2 = lists:flatten(" approximately in the line " + ++ file:format_error(Reason)), + ExitText = Text1 ++ Text2, + Lines = mongoose_config_utils:get_config_lines(Filename, LineNumber, 10, 3), + ?LOG_ERROR(#{what => mim_config_file_loading_failed, lines => Lines, + text => <<"The following lines from your configuration file might be relevant to the error">>}), + ExitText. diff --git a/src/config/mongoose_config_validator_toml.erl b/src/config/mongoose_config_validator_toml.erl new file mode 100644 index 00000000000..642359d1355 --- /dev/null +++ b/src/config/mongoose_config_validator_toml.erl @@ -0,0 +1,1874 @@ +-module(mongoose_config_validator_toml). + +-export([validate/2]). + +-include("mongoose.hrl"). +-include("ejabberd_config.hrl"). +-include_lib("jid/include/jid.hrl"). + +-define(HOST, 'HOST'). + +-spec validate(mongoose_config_parser_toml:path(), + mongoose_config_parser_toml:option() | mongoose_config_parser_toml:config_list()) -> + any(). +validate(Path, [F]) when is_function(F, 1) -> + validate(Path, F(?HOST)); + +%% general +validate([<<"loglevel">>, <<"general">>], + [#local_config{value = Val}]) -> validate_loglevel(Val); +validate([item, <<"hosts">>, <<"general">>], + [Value]) -> + validate_non_empty_binary(Value); +validate([<<"hosts">>, <<"general">>], + [#config{value = Val}]) -> + validate_hosts(Val); +validate([<<"registration_timeout">>, <<"general">>], + [#local_config{value = Val}]) -> + validate_timeout(Val); +validate([<<"language">>, <<"general">>], + [#config{value = Value}]) -> + validate_non_empty_binary(Value); +validate([<<"all_metrics_are_global">>, <<"general">>], + [#local_config{value = Val}]) -> + validate_boolean(Val); +validate([<<"sm_backend">>, <<"general">>], + [#config{value = {Backend, []}}]) -> + validate_module(list_to_atom("ejabberd_sm_" ++ atom_to_list(Backend))); +validate([<<"max_fsm_queue">>, <<"general">>], + [#local_config{value = Value}]) -> + validate_positive_integer(Value); +validate([<<"rdbms_server_type">>, <<"general">>], + [#local_config{value = Value}]) -> + validate_enum(Value, [mssql, pgsql]); +validate([item, <<"override">>, <<"general">>], + [{override, Value}]) -> + validate_enum(Value, [local, global, acls]); +validate([<<"override">>, <<"general">>], + Items) -> + validate_unique_items(Items); +validate([<<"pgsql_users_number_estimate">>, <<"general">>|Path], + [#local_config{value = Value}]) -> + validate_root_or_host_config(Path), + validate_boolean(Value); +validate([<<"route_subdomains">>, <<"general">>|Path], + [#local_config{value = Value}]) -> + validate_root_or_host_config(Path), + validate_enum(Value, [s2s]); +validate([item, <<"routing_modules">>, <<"general">>], + [Value]) -> + validate_module(Value); +validate([<<"replaced_wait_timeout">>, <<"general">>|Path], + [#local_config{value = Value}]) -> + validate_root_or_host_config(Path), + validate_positive_integer(Value); +validate([<<"hide_service_name">>, <<"general">>|Path], + [#local_config{value = Value}]) -> + validate_root_or_host_config(Path), + validate_boolean(Value); + +%% listen +validate([item, _Type, <<"listen">>], + [{{Port, _IPT, _Proto}, _Module, _Opts}]) -> + validate_port(Port); +validate([<<"backlog">>, item, _Type, <<"listen">>], + [{backlog, Value}]) -> + validate_non_negative_integer(Value); +validate([<<"proxy_protocol">>, item, _Type, <<"listen">>], + [{proxy_protocol, Value}]) -> + validate_boolean(Value); +validate([<<"num_acceptors">>, item, _Type, <<"listen">>], + [{acceptors_num, Value}]) -> + validate_positive_integer(Value); +validate([<<"access">>, item, _Type, <<"listen">>], + [{access, Value}]) -> + validate_non_empty_atom(Value); +validate([<<"shaper">>, item, _Type, <<"listen">>], + [{shaper, Value}]) -> + validate_non_empty_atom(Value); +validate([<<"shaper_rule">>, item, <<"service">>, <<"listen">>], + [{shaper_rule, Value}]) -> + validate_non_empty_atom(Value); +validate([<<"xml_socket">>, item, <<"c2s">>, <<"listen">>], + [{xml_socket, Value}]) -> + validate_boolean(Value); +validate([<<"zlib">>, item, <<"c2s">>, <<"listen">>], + [{zlib, Value}]) -> + validate_positive_integer(Value); +validate([<<"hibernate_after">>, item, _, <<"listen">>], + [{hibernate_after, Value}]) -> + validate_non_negative_integer(Value); +validate([<<"mode">>, {tls, _}, item, <<"c2s">>, <<"listen">>], + [Value]) -> + validate_enum(Value, [tls, starttls, starttls_required]); +validate([<<"verify_mode">>, {tls, just_tls}, item, <<"c2s">>, <<"listen">>], + Value) -> + validate_enum(Value, [peer, selfsigned_peer, none]); +validate([<<"disconnect_on_failure">>, {tls, just_tls}, item, <<"c2s">>, <<"listen">>], + Value) -> + validate_boolean(Value); +validate([item, <<"crl_files">>, {tls, just_tls}, item, <<"c2s">>, <<"listen">>], + [Value]) -> + validate_non_empty_string(Value); +validate([item, <<"protocol_options">>, _TLS, item, _Type, <<"listen">>], + [Value]) -> + validate_non_empty_string(Value); +validate([FileType, _TLS, item, _Type, <<"listen">>], + [{_, Value}]) when FileType =:= <<"certfile">>; + FileType =:= <<"cacertfile">>; + FileType =:= <<"dhfile">> -> + validate_non_empty_string(Value); +validate([<<"max_stanza_size">>, item, _Type, <<"listen">>], + [{max_stanza_size, Value}]) -> + validate_positive_integer(Value); +validate([<<"max_fsm_queue">>, item, _Type, <<"listen">>], + [{max_fsm_queue, Value}]) -> + validate_positive_integer(Value); +validate([<<"check_from">>, item, <<"service">>, <<"listen">>], + [{service_check_from, Value}]) -> + validate_boolean(Value); +validate([<<"hidden_components">>, item, <<"service">>, <<"listen">>], + [{hidden_components, Value}]) -> + validate_boolean(Value); +validate([<<"conflict_behaviour">>, item, <<"service">>, <<"listen">>], + [{conflict_behaviour, Value}]) -> + validate_enum(Value, [kick_old, disconnect]); +validate([<<"password">>, item, <<"service">>, <<"listen">>], + [{password, Value}]) -> + validate_non_empty_string(Value); +validate([<<"verify_mode">>, <<"tls">>, item, <<"http">>, <<"listen">>], + [{verify_mode, Value}]) -> + validate_enum(Value, [peer, selfsigned_peer, none]); +validate([<<"num_acceptors">>, <<"transport">>, item, <<"http">>, <<"listen">>], + [{num_acceptors, Value}]) -> + validate_positive_integer(Value); +validate([<<"max_connections">>, <<"transport">>, item, <<"http">>, <<"listen">>], + [{max_connections, Value}]) -> + validate_non_negative_integer_or_infinity(Value); +validate([<<"compress">>, <<"protocol">>, item, <<"http">>, <<"listen">>], + [{compress, Value}]) -> + validate_boolean(Value); +validate([item, <<"lasse_handler">>, <<"handlers">>, item, <<"http">>, <<"listen">>], + [{Host, _Path, lasse_handler, Opts}]) -> + validate_non_empty_string(Host), + [Module] = Opts, + validate_module(Module); +validate([item, <<"handlers">>, + item, <<"mongoose_api">>, <<"handlers">>, item, <<"http">>, <<"listen">>], + [Value]) -> + validate_module(Value); +validate([item, _TypeBin, <<"handlers">>, item, <<"http">>, <<"listen">>], + [{Host, _Path, Type, _Opts}]) -> + validate_non_empty_string(Host), + validate_module(Type); + +%% auth +validate([item, <<"methods">>, <<"auth">>|Path], + [Value]) -> + validate_root_or_host_config(Path), + validate_module(list_to_atom("ejabberd_auth_" ++ atom_to_list(Value))); +validate([<<"password">>, <<"auth">>|Path], + [{password_format, Value}]) -> + validate_root_or_host_config(Path), + validate_password_format(Value); +validate([item, <<"hash">>, <<"password">>, <<"auth">>|Path], + [Value]) -> + validate_root_or_host_config(Path), + validate_enum(Value, [sha, sha224, sha256, sha384, sha512]); +validate([<<"scram_iterations">>, <<"auth">>|Path], + [{scram_iterations, Value}]) -> + validate_root_or_host_config(Path), + validate_positive_integer(Value); +validate([item, <<"sasl_external">>, <<"auth">>|Path], + [{mod, Module}]) -> + validate_root_or_host_config(Path), + validate_module(Module); +validate([<<"allow_multiple_connections">>, <<"anonymous">>, <<"auth">>|Path], + [{allow_multiple_connections, Value}]) -> + validate_root_or_host_config(Path), + validate_boolean(Value); +validate([<<"protocol">>, <<"anonymous">>, <<"auth">>|Path], + [{anonymous_protocol, Value}]) -> + validate_root_or_host_config(Path), + validate_enum(Value, [sasl_anon, login_anon, both]); +validate([Pool, <<"ldap">>, <<"auth">>|Path], + [{_, Value}]) when Pool =:= <<"pool_tag">>; + Pool =:= <<"bind_pool_tag">> -> + validate_root_or_host_config(Path), + validate_non_empty_atom(Value); +validate([<<"operation">>, <<"local_filter">>, <<"ldap">>, <<"auth">>|Path], + [{operation, Value}]) -> + validate_root_or_host_config(Path), + validate_enum(Value, [equal, not_equal]); +validate([<<"attribute">>, <<"local_filter">>, <<"ldap">>, <<"auth">>|Path], + [{attribute, Value}]) -> + validate_root_or_host_config(Path), + validate_non_empty_string(Value); +validate([<<"values">>, <<"local_filter">>, <<"ldap">>, <<"auth">>|Path], + [{values, Value}]) -> + validate_root_or_host_config(Path), + validate_non_empty_list(Value); +validate([<<"deref">>, <<"ldap">>, <<"auth">>|Path], + [{ldap_deref, Value}]) -> + validate_root_or_host_config(Path), + validate_enum(Value, [never, always, finding, searching]); +validate([item, <<"sasl_mechanisms">>, <<"auth">>|Path], + [Value]) -> + validate_root_or_host_config(Path), + validate_module(Value); +validate([<<"instances">>, <<"external">>, <<"auth">>|Path], + [{extauth_instances, Value}]) -> + validate_root_or_host_config(Path), + validate_positive_integer(Value); +validate([<<"program">>, <<"external">>, <<"auth">>|Path], + [{extauth_program, Value}]) -> + validate_root_or_host_config(Path), + validate_non_empty_string(Value); +validate([<<"file">>, <<"secret">>, <<"jwt">>, <<"auth">>|Path], + [{jwt_secret_source, Value}]) -> + validate_root_or_host_config(Path), + validate_non_empty_string(Value); +validate([<<"env">>, <<"secret">>, <<"jwt">>, <<"auth">>|Path], + [{jwt_secret_source, {env, Value}}]) -> + validate_root_or_host_config(Path), + validate_non_empty_string(Value); +validate([<<"algorithm">>, <<"jwt">>, <<"auth">>|Path], + [{jwt_algorithm, Value}]) -> + validate_root_or_host_config(Path), + validate_enum(Value, ["HS256", "RS256", "ES256", "HS386", "RS386", "ES386", + "HS512", "RS512", "ES512"]); +validate([<<"username_key">>, <<"jwt">>, <<"auth">>|Path], + [{jwt_username_key, Value}]) -> + validate_root_or_host_config(Path), + validate_non_empty_atom(Value); +validate([<<"bucket_type">>, <<"riak">>, <<"auth">>|Path], + [{bucket_type, Value}]) -> + validate_root_or_host_config(Path), + validate_non_empty_binary(Value); + +%% outgoing_pools +validate([_Tag, _Type, <<"outgoing_pools">>], + [{TypeAtom, Scope, TagAtom, _Options, _ConnectionOptions}]) -> + validate_enum(TypeAtom, [redis, riak, http, rdbms, cassandra, elastic, generic, rabbit, ldap]), + validate_pool_scope(Scope), + validate_non_empty_atom(TagAtom); +validate([<<"workers">>, _Tag, _Type, <<"outgoing_pools">>], + [{workers, Value}]) -> + validate_positive_integer(Value); +validate([<<"strategy">>, _Tag, _Type, <<"outgoing_pools">>], + [{strategy, Value}]) -> + validate_wpool_strategy(Value); +validate([<<"call_timeout">>, _Tag, _Type, <<"outgoing_pools">>], + [{call_timeout, Value}]) -> + validate_positive_integer(Value); +validate([<<"keepalive_interval">>, _Conn, _Tag, <<"rdbms">>, <<"outgoing_pools">>], + [{keepalive_interval, Value}]) -> + validate_positive_integer(Value); +validate([{connection, Driver}, _Tag, <<"rdbms">>, <<"outgoing_pools">>], + [_Value]) -> + validate_enum(Driver, [odbc, pgsql, mysql]); +validate([Key, {connection, _}, _Tag, <<"rdbms">>, <<"outgoing_pools">>], + [{_, Value}]) when Key =:= <<"host">>; + Key =:= <<"database">>; + Key =:= <<"username">>; + Key =:= <<"password">> -> + validate_non_empty_string(Value); +validate([<<"port">>, {connection, _}, _Tag, <<"rdbms">>, <<"outgoing_pools">>], + [{port, Value}]) -> + validate_port(Value); +validate([<<"host">>, _Conn, _Tag, <<"http">>, <<"outgoing_pools">>], + [{server, Value}]) -> + validate_non_empty_string(Value); +validate([<<"path_prefix">>, _Conn, _Tag, <<"http">>, <<"outgoing_pools">>], + [{path_prefix, Value}]) -> + validate_non_empty_string(Value); +validate([<<"request_timeout">>, _Conn, _Tag, <<"http">>, <<"outgoing_pools">>], + [{request_timeout, Value}]) -> + validate_non_negative_integer(Value); +validate([<<"host">>, _Conn, _Tag, <<"redis">>, <<"outgoing_pools">>], + [{host, Value}]) -> + validate_non_empty_string(Value); +validate([<<"port">>, _Conn, _Tag, <<"redis">>, <<"outgoing_pools">>], + [{port, Value}]) -> + validate_port(Value); +validate([<<"database">>, _Conn, _Tag, <<"redis">>, <<"outgoing_pools">>], + [{database, Value}]) -> + validate_non_negative_integer(Value); +validate([<<"password">>, _Conn, _Tag, <<"redis">>, <<"outgoing_pools">>], + [{host, Value}]) -> + validate_string(Value); +validate([<<"address">>, _Conn, _Tag, <<"riak">>, <<"outgoing_pools">>], + [{address, Value}]) -> + validate_non_empty_string(Value); +validate([<<"port">>, _Conn, _Tag, <<"riak">>, <<"outgoing_pools">>], + [{port, Value}]) -> + validate_port(Value); +validate([<<"credentials">>, _Conn, _Tag, <<"riak">>, <<"outgoing_pools">>], + [{credentials, User, Password}]) -> + validate_non_empty_string(User), + validate_non_empty_string(Password); +validate([<<"cacertfile">>, _Conn, _Tag, <<"riak">>, <<"outgoing_pools">>], + [{cacertfile, Value}]) -> + validate_non_empty_string(Value); +validate([<<"certfile">>, _Conn, _Tag, <<"riak">>, <<"outgoing_pools">>], + [{certfile, Value}]) -> + validate_non_empty_string(Value); +validate([<<"keyfile">>, _Conn, _Tag, <<"riak">>, <<"outgoing_pools">>], + [{keyfile, Value}]) -> + validate_non_empty_string(Value); +validate([<<"servers">>, _Conn, _Tag, <<"cassandra">>, <<"outgoing_pools">>], + [{servers, Value}]) -> + [{validate_non_empty_string(Host), validate_port(Port)} || {Host, Port} <- Value]; +validate([<<"keyspace">>, _Conn, _Tag, <<"cassandra">>, <<"outgoing_pools">>], + [{keyspace, Value}]) -> + validate_non_empty_string(Value); +validate([<<"host">>, _Conn, _Tag, <<"elastic">>, <<"outgoing_pools">>], + [{host, Value}]) -> + validate_non_empty_string(Value); +validate([<<"port">>, _Conn, _Tag, <<"elastic">>, <<"outgoing_pools">>], + [{host, Value}]) -> + validate_port(Value); +validate([<<"amqp_host">>, _Conn, _Tag, <<"rabbit">>, <<"outgoing_pools">>], + [{amqp_host, Value}]) -> + validate_non_empty_string(Value); +validate([<<"amqp_port">>, _Conn, _Tag, <<"rabbit">>, <<"outgoing_pools">>], + [{amqp_port, Value}]) -> + validate_port(Value); +validate([<<"amqp_username">>, _Conn, _Tag, <<"rabbit">>, <<"outgoing_pools">>], + [{amqp_username, Value}]) -> + validate_non_empty_string(Value); +validate([<<"amqp_password">>, _Conn, _Tag, <<"rabbit">>, <<"outgoing_pools">>], + [{amqp_password, Value}]) -> + validate_non_empty_string(Value); +validate([<<"confirms_enabled">>, _Conn, _Tag, <<"rabbit">>, <<"outgoing_pools">>], + [{confirms_enabled, Value}]) -> + validate_boolean(Value); +validate([<<"max_worker_queue_len">>, _Conn, _Tag, <<"rabbit">>, <<"outgoing_pools">>], + [{max_worker_queue_len, Value}]) -> + validate_non_negative_integer_or_infinity(Value); +validate([<<"host">>, _Conn, _Tag, <<"ldap">>, <<"outgoing_pools">>], + [{host, Value}]) -> + validate_non_empty_string(Value); +validate([<<"port">>, _Conn, _Tag, <<"ldap">>, <<"outgoing_pools">>], + [{port, Value}]) -> + validate_port(Value); +validate([<<"servers">>, _Conn, _Tag, <<"ldap">>, <<"outgoing_pools">>], + [{servers, Value}]) -> + [validate_non_empty_string(Server) || Server <- Value]; +validate([<<"encrypt">>, _Conn, _Tag, <<"ldap">>, <<"outgoing_pools">>], + [{encrypt, Value}]) -> + validate_enum(Value, [tls, none]); +validate([<<"rootdn">>, _Conn, _Tag, <<"ldap">>, <<"outgoing_pools">>], + [{rootdn, Value}]) -> + validate_string(Value); +validate([<<"password">>, _Conn, _Tag, <<"ldap">>, <<"outgoing_pools">>], + [{password, Value}]) -> + validate_string(Value); +validate([<<"connect_interval">>, _Conn, _Tag, <<"ldap">>, <<"outgoing_pools">>], + [{connect_interval, Value}]) -> + validate_positive_integer(Value); + +%% shaper +validate([_, <<"shaper">>|Path], + [#config{value = {maxrate, Value}}]) -> + validate_root_or_host_config(Path), + validate_positive_integer(Value); + +%% s2s +validate([<<"timeout">>, <<"dns">>, <<"s2s">>], + [{timeout, Value}]) -> + validate_positive_integer(Value); +validate([<<"retries">>, <<"dns">>, <<"s2s">>], + [{retries, Value}]) -> + validate_positive_integer(Value); +validate([<<"port">>, <<"outgoing">>, <<"s2s">>], + [#local_config{value = Value}]) -> + validate_port(Value); +validate([<<"ip_versions">>, <<"outgoing">>, <<"s2s">>], + [#local_config{value = Value}]) -> + validate_non_empty_list(Value); +validate([<<"connection_timeout">>, <<"outgoing">>, <<"s2s">>], + [#local_config{value = Value}]) -> + validate_timeout(Value); +validate([<<"use_starttls">>, <<"s2s">>], + [#local_config{value = Value}]) -> + validate_enum(Value, [false, optional, required, required_trusted]); +validate([<<"certfile">>, <<"s2s">>], + [#local_config{value = Value}]) -> + validate_non_empty_string(Value); +validate([<<"default_policy">>, <<"s2s">>|Path], + [#local_config{value = Value}]) -> + validate_root_or_host_config(Path), + validate_enum(Value, [allow, deny]); +validate([<<"host">>, item, <<"host_policy">>, <<"s2s">>|Path], + [{host, Value}]) -> + validate_root_or_host_config(Path), + validate_non_empty_binary(Value); +validate([<<"policy">>, item, <<"host_policy">>, <<"s2s">>|Path], + [{policy, Value}]) -> + validate_root_or_host_config(Path), + validate_enum(Value, [allow, deny]); +validate([<<"host">>, item, <<"address">>, <<"s2s">>], + [{host, Value}]) -> + validate_non_empty_binary(Value); +validate([<<"ip_address">>, item, <<"address">>, <<"s2s">>], + [{ip_address, Value}]) -> + validate_ip_address(Value); +validate([<<"port">>, item, <<"address">>, <<"s2s">>], + [{port, Value}]) -> + validate_port(Value); +validate([item, <<"domain_certfile">>, <<"s2s">>], + [#local_config{key = {domain_certfile, Domain}, value = Certfile}]) -> + validate_non_empty_string(Domain), + validate_non_empty_string(Certfile); +validate([<<"shared">>, <<"s2s">>|Path], + [#local_config{value = Value}]) -> + validate_root_or_host_config(Path), + validate_non_empty_binary(Value); +validate([<<"max_retry_delay">>, <<"s2s">>|Path], + [#local_config{value = Value}]) -> + validate_root_or_host_config(Path), + validate_positive_integer(Value); + +%% Services +validate([item, <<"submods">>, <<"service_admin_extra">>, <<"services">>], + [Value]) -> + validate_backend(service_admin_extra, Value); +validate([<<"initial_report">>, + <<"service_mongoose_system_metrics">>, <<"services">>], + [{initial_report, Value}]) -> + validate_non_negative_integer(Value); +validate([<<"periodic_report">>, + <<"service_mongoose_system_metrics">>, <<"services">>], + [{periodic_report, Value}]) -> + validate_non_negative_integer(Value); +validate([<<"tracking_id">>, + <<"service_mongoose_system_metrics">>, <<"services">>], + [{tracking_id, Value}]) -> + validate_non_empty_string(Value); + +%% Modules +validate([<<"callback_module">>, <<"http">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{callback_module, V}]) -> + validate_module(V); +validate([<<"path">>, <<"http">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{path, V}]) -> + validate_string(V); +validate([<<"pool_name">>, <<"http">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{pool_name, V}]) -> + validate_pool_name(V); +validate([<<"backend">>, <<"push">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_event_pusher_push, V); +validate([<<"plugin_module">>, <<"push">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{plugin_module, V}]) -> + validate_module(V); +validate([item, <<"virtual_pubsub_hosts">>, <<"push">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [V]) -> + validate_domain_template(V); +validate([<<"strategy">>, <<"wpool">>, <<"push">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{strategy, V}]) -> + validate_wpool_strategy(V); +validate([<<"workers">>, <<"wpool">>, <<"push">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{workers, V}]) -> + validate_positive_integer(V); +validate([<<"name">>, <<"chat_msg_exchange">>, <<"rabbit">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{name, V}]) -> + validate_non_empty_binary(V); +validate([<<"type">>, <<"chat_msg_exchange">>, <<"rabbit">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{type, V}]) -> + validate_non_empty_binary(V); +validate([<<"recv_topic">>, <<"chat_msg_exchange">>, <<"rabbit">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{recv_topic, V}]) -> + validate_non_empty_binary(V); +validate([<<"sent_topic">>, <<"chat_msg_exchange">>, <<"rabbit">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{sent_topic, V}]) -> + validate_non_empty_binary(V); +validate([<<"name">>, <<"groupchat_msg_exchange">>, <<"rabbit">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{name, V}]) -> + validate_non_empty_binary(V); +validate([<<"type">>, <<"groupchat_msg_exchange">>, <<"rabbit">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{type, V}]) -> + validate_non_empty_binary(V); +validate([<<"recv_topic">>, <<"groupchat_msg_exchange">>, <<"rabbit">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{recv_topic, V}]) -> + validate_non_empty_binary(V); +validate([<<"sent_topic">>, <<"groupchat_msg_exchange">>, <<"rabbit">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{sent_topic, V}]) -> + validate_non_empty_binary(V); +validate([<<"name">>, <<"presence_exchange">>, <<"rabbit">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{name, V}]) -> + validate_non_empty_binary(V); +validate([<<"type">>, <<"presence_exchange">>, <<"rabbit">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{type, V}]) -> + validate_non_empty_binary(V); +validate([<<"access_key_id">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{access_key_id, V}]) -> + validate_string(V); +validate([<<"account_id">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{account_id, V}]) -> + validate_string(V); +validate([<<"muc_host">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{muc_host, V}]) -> + validate_domain_template(V); +validate([<<"muc_messages_topic">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{muc_messages_topic, V}]) -> + validate_string(V); +validate([<<"plugin_module">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{plugin_module, V}]) -> + validate_module(V); +validate([<<"pm_messages_topic">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{pm_messages_topic, V}]) -> + validate_string(V); +validate([<<"pool_size">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{pool_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"presence_updates_topic">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{presence_updates_topic, V}]) -> + validate_string(V); +validate([<<"publish_retry_count">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{publish_retry_count, V}]) -> + validate_non_negative_integer(V); +validate([<<"publish_retry_time_ms">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{publish_retry_time_ms, V}]) -> + validate_non_negative_integer(V); +validate([<<"region">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{region, V}]) -> + validate_string(V); +validate([<<"secret_access_key">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{secret_access_key, V}]) -> + validate_string(V); +validate([<<"sns_host">>, <<"sns">>, <<"backend">>, + <<"mod_event_pusher">>, <<"modules">>|_], + [{sns_host, V}]) -> + validate_string(V); +validate([<<"max_retries">>, <<"bounce">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{max_retries, V}]) -> + validate_non_negative_integer(V); +validate([<<"resend_after_ms">>, <<"bounce">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{resend_after_ms, V}]) -> + validate_non_negative_integer(V); +validate([<<"cache_missed">>, <<"cache">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{cache_missed, V}]) -> + validate_boolean(V); +validate([<<"domain_lifetime_seconds">>, <<"cache">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{domain_lifetime_seconds, V}]) -> + validate_non_negative_integer(V); +validate([<<"jid_lifetime_seconds">>, <<"cache">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{jid_lifetime_seconds, V}]) -> + validate_non_negative_integer(V); +validate([<<"max_jids">>, <<"cache">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{max_jids, V}]) -> + validate_non_negative_integer(V); +validate([<<"advertised_endpoints">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [false]) -> + ok; +validate([<<"host">>, item, <<"advertised_endpoints">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [V]) -> + validate_network_address(V); +validate([<<"port">>, item, <<"advertised_endpoints">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [V]) -> + validate_network_port(V); +validate([<<"connections_per_endpoint">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{connections_per_endpoint, V}]) -> + validate_non_negative_integer(V); +validate([<<"disabled_gc_interval">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{disabled_gc_interval, V}]) -> + validate_non_negative_integer(V); +validate([<<"endpoint_refresh_interval">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{endpoint_refresh_interval, V}]) -> + validate_positive_integer(V); +validate([<<"endpoint_refresh_interval_when_empty">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{endpoint_refresh_interval_when_empty, V}]) -> + validate_positive_integer(V); +validate([<<"host">>, item, <<"endpoints">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [V]) -> + validate_network_address(V); +validate([<<"port">>, item, <<"endpoints">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [V]) -> + validate_network_port(V); +validate([<<"tls">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [false]) -> + ok; +validate([<<"cacertfile">>, <<"tls">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{cafile, V}]) -> + validate_filename(V); +validate([<<"certfile">>, <<"tls">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{certfile, V}]) -> + validate_filename(V); +validate([<<"ciphers">>, <<"tls">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{ciphers, V}]) -> + validate_string(V); +validate([<<"dhfile">>, <<"tls">>, <<"connections">>, + <<"mod_global_distrib">>, <<"modules">>|_], + [{dhfile, V}]) -> + validate_filename(V); +validate([<<"global_host">>, <<"mod_global_distrib">>, <<"modules">>|_], + [{global_host, V}]) -> + validate_domain(V); +validate([<<"hosts_refresh_interval">>, <<"mod_global_distrib">>, <<"modules">>|_], + [{hosts_refresh_interval, V}]) -> + validate_non_negative_integer(V); +validate([<<"local_host">>, <<"mod_global_distrib">>, <<"modules">>|_], + [{local_host, V}]) -> + validate_domain(V); +validate([<<"message_ttl">>, <<"mod_global_distrib">>, <<"modules">>|_], + [{message_ttl, V}]) -> + validate_non_negative_integer(V); +validate([<<"expire_after">>, <<"redis">>, <<"mod_global_distrib">>, <<"modules">>|_], + [{expire_after, V}]) -> + validate_non_negative_integer(V); +validate([<<"pool">>, <<"redis">>, <<"mod_global_distrib">>, <<"modules">>|_], + [{pool, V}]) -> + validate_pool_name(V); +validate([<<"refresh_after">>, <<"redis">>, <<"mod_global_distrib">>, <<"modules">>|_], + [{refresh_after, V}]) -> + validate_non_negative_integer(V); +validate([<<"ack_freq">>, <<"mod_stream_management">>, <<"modules">>|_], + [{ack_freq, V}]) -> + validate_positive_integer_or_atom(V, never); +validate([<<"buffer_max">>, <<"mod_stream_management">>, <<"modules">>|_], + [{buffer_max, V}]) -> + validate_positive_integer_or_infinity_or_atom(V, no_buffer); +validate([<<"resume_timeout">>, <<"mod_stream_management">>, <<"modules">>|_], + [{resume_timeout, V}]) -> + validate_positive_integer(V); +validate([<<"enabled">>, <<"stale_h">>, <<"mod_stream_management">>, <<"modules">>|_], + [{enabled, V}]) -> + validate_boolean(V); +validate([<<"geriatric">>, <<"stale_h">>, <<"mod_stream_management">>, <<"modules">>|_], + [{stale_h_geriatric, V}]) -> + validate_positive_integer(V); +validate([<<"repeat_after">>, <<"stale_h">>, <<"mod_stream_management">>, <<"modules">>|_], + [{stale_h_repeat_after, V}]) -> + validate_positive_integer(V); +validate([<<"ldap_auth_check">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_auth_check, V}]) -> + validate_boolean(V); +validate([<<"ldap_base">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_base, V}]) -> + validate_string(V); +validate([<<"ldap_deref">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_deref, V}]) -> + validate_enum(V, [never,always,finding,searching]); +validate([<<"ldap_filter">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_filter, V}]) -> + validate_string(V); +validate([<<"ldap_gfilter">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_gfilter, V}]) -> + validate_string(V); +validate([<<"ldap_group_cache_size">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_group_cache_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"ldap_group_cache_validity">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_group_cache_validity, V}]) -> + validate_non_negative_integer(V); +validate([<<"ldap_groupattr">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_groupattr, V}]) -> + validate_string(V); +validate([<<"ldap_groupdesc">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_groupdesc, V}]) -> + validate_string(V); +validate([<<"ldap_memberattr">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_memberattr, V}]) -> + validate_string(V); +validate([<<"ldap_memberattr_format">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_memberattr_format, V}]) -> + validate_string(V); +validate([<<"ldap_memberattr_format_re">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_memberattr_format_re, V}]) -> + validate_string(V); +validate([<<"ldap_pool_tag">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_pool_tag, V}]) -> + validate_pool_name(V); +validate([<<"ldap_rfilter">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_rfilter, V}]) -> + validate_string(V); +validate([<<"ldap_ufilter">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_ufilter, V}]) -> + validate_string(V); +validate([<<"ldap_user_cache_size">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_user_cache_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"ldap_user_cache_validity">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_user_cache_validity, V}]) -> + validate_non_negative_integer(V); +validate([<<"ldap_userdesc">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_userdesc, V}]) -> + validate_string(V); +validate([<<"ldap_useruid">>, <<"mod_shared_roster_ldap">>, <<"modules">>|_], + [{ldap_useruid, V}]) -> + validate_string(V); +validate([<<"iqdisc">>, <<"mod_version">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"os_info">>, <<"mod_version">>, <<"modules">>|_], + [{os_info, V}]) -> + validate_boolean(V); +validate([<<"access">>, <<"mod_register">>, <<"modules">>|_], + [{access, V}]) -> + validate_non_empty_atom(V); +validate([item, <<"ip_access">>, <<"mod_register">>, <<"modules">>|_], + [V]) -> + validate_ip_access(V); +validate([<<"iqdisc">>, <<"mod_register">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"password_strength">>, <<"mod_register">>, <<"modules">>|_], + [{password_strength, V}]) -> + validate_non_negative_integer(V); +validate([item, <<"registration_watchers">>, <<"mod_register">>, <<"modules">>|_], + [V]) -> + validate_jid(V); +validate([<<"body">>, <<"welcome_message">>, <<"mod_register">>, <<"modules">>|_], + [{body, V}]) -> + validate_string(V); +validate([<<"subject">>, <<"welcome_message">>, <<"mod_register">>, <<"modules">>|_], + [{subject, V}]) -> + validate_string(V); +validate([<<"iqdisc">>, <<"mod_adhoc">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"report_commands_node">>, <<"mod_adhoc">>, <<"modules">>|_], + [{report_commands_node, V}]) -> + validate_boolean(V); +validate([<<"cache_life_time">>, <<"mod_caps">>, <<"modules">>|_], + [{cache_life_time, V}]) -> + validate_non_negative_integer_or_infinity(V); +validate([<<"cache_size">>, <<"mod_caps">>, <<"modules">>|_], + [{cache_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"type">>, _, <<"service">>, <<"mod_extdisco">>, <<"modules">>|_], + [{type, V}]) -> + validate_non_empty_atom(V); +validate([<<"host">>, _,<<"service">>, <<"mod_extdisco">>, <<"modules">>|_], + [{host, V}]) -> + validate_non_empty_list(V); +validate([<<"port">>, _,<<"service">>, <<"mod_extdisco">>, <<"modules">>|_], + [{port, V}]) -> + validate_port(V); +validate([<<"transport">>,_, <<"service">>, <<"mod_extdisco">>, <<"modules">>|_], + [{transport, V}]) -> + validate_non_empty_list(V); +validate([<<"username">>, _,<<"service">>, <<"mod_extdisco">>, <<"modules">>|_], + [{username, V}]) -> + validate_non_empty_list(V); +validate([<<"password">>, _,<<"service">>, <<"mod_extdisco">>, <<"modules">>|_], + [{password, V}]) -> + validate_non_empty_list(V); +validate([<<"backend">>, <<"mod_http_upload">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_http_upload, V); +validate([<<"expiration_time">>, <<"mod_http_upload">>, <<"modules">>|_], + [{expiration_time, V}]) -> + validate_non_negative_integer(V); +validate([<<"host">>, <<"mod_http_upload">>, <<"modules">>|_], + [{host, V}]) -> + validate_domain_template(V); +validate([<<"iqdisc">>, <<"mod_http_upload">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"max_file_size">>, <<"mod_http_upload">>, <<"modules">>|_], + [{max_file_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"access_key_id">>, <<"s3">>, <<"mod_http_upload">>, <<"modules">>|_], + [{access_key_id, V}]) -> + validate_string(V); +validate([<<"add_acl">>, <<"s3">>, <<"mod_http_upload">>, <<"modules">>|_], + [{add_acl, V}]) -> + validate_boolean(V); +validate([<<"bucket_url">>, <<"s3">>, <<"mod_http_upload">>, <<"modules">>|_], + [{bucket_url, V}]) -> + validate_url(V); +validate([<<"region">>, <<"s3">>, <<"mod_http_upload">>, <<"modules">>|_], + [{region, V}]) -> + validate_string(V); +validate([<<"secret_access_key">>, <<"s3">>, <<"mod_http_upload">>, <<"modules">>|_], + [{secret_access_key, V}]) -> + validate_string(V); +validate([<<"token_bytes">>, <<"mod_http_upload">>, <<"modules">>|_], + [{token_bytes, V}]) -> + validate_positive_integer(V); +validate([<<"api_version">>, <<"mod_push_service_mongoosepush">>, <<"modules">>|_], + [{api_version, V}]) -> + validate_string(V); +validate([<<"max_http_connections">>, <<"mod_push_service_mongoosepush">>, <<"modules">>|_], + [{max_http_connections, V}]) -> + validate_non_negative_integer(V); +validate([<<"pool_name">>, <<"mod_push_service_mongoosepush">>, <<"modules">>|_], + [{pool_name, V}]) -> + validate_pool_name(V); +validate([<<"backend">>, <<"mod_last">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_last, V); +validate([<<"iqdisc">>, <<"mod_last">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"bucket_type">>, <<"riak">>, <<"mod_last">>, <<"modules">>|_], + [{bucket_type, V}]) -> + validate_non_empty_binary(V); +validate([<<"iqdisc">>, <<"mod_time">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([item, <<"routes">>, <<"mod_revproxy">>, <<"modules">>|_], + [V]) -> + validate_revproxy_route(V); +validate([<<"backend">>, <<"mod_privacy">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_privacy, V); +validate([<<"bucket_type">>, <<"riak">>, <<"mod_privacy">>, <<"modules">>|_], + [{bucket_type, V}]) -> + validate_non_empty_binary(V); +validate([<<"defaults_bucket_type">>, <<"riak">>, <<"mod_privacy">>, <<"modules">>|_], + [{defaults_bucket_type, V}]) -> + validate_non_empty_binary(V); +validate([<<"names_bucket_type">>, <<"riak">>, <<"mod_privacy">>, <<"modules">>|_], + [{names_bucket_type, V}]) -> + validate_non_empty_binary(V); +validate([<<"archive_chat_markers">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{archive_chat_markers, V}]) -> + validate_boolean(V); +validate([<<"archive_groupchats">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{archive_groupchats, V}]) -> + validate_boolean(V); +validate([<<"async_writer">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{async_writer, V}]) -> + validate_boolean(V); +validate([<<"async_writer_rdbms_pool">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{async_writer_rdbms_pool, V}]) -> + validate_non_empty_atom(V); +validate([<<"backend">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{backend, V}]) -> + validate_enum(V, [rdbms,riak,cassandra,elasticsearch]); +validate([<<"cache_users">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{cache_users, V}]) -> + validate_boolean(V); +validate([<<"db_jid_format">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{db_jid_format, V}]) -> + validate_module(V); +validate([<<"db_message_format">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{db_message_format, V}]) -> + validate_module(V); +validate([<<"default_result_limit">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{default_result_limit, V}]) -> + validate_non_negative_integer(V); +validate([<<"extra_lookup_params">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{extra_lookup_params, V}]) -> + validate_module(V); +validate([<<"flush_interval">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{flush_interval, V}]) -> + validate_non_negative_integer(V); +validate([<<"full_text_search">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{full_text_search, V}]) -> + validate_boolean(V); +validate([<<"host">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{host, V}]) -> + validate_domain_template(V); +validate([<<"is_archivable_message">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{is_archivable_message, V}]) -> + validate_module(V); +validate([<<"max_batch_size">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{max_batch_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"max_result_limit">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{max_result_limit, V}]) -> + validate_non_negative_integer(V); +validate([<<"message_retraction">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{message_retraction, V}]) -> + validate_boolean(V); +validate([<<"archive_chat_markers">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{archive_chat_markers, V}]) -> + validate_boolean(V); +validate([<<"archive_groupchats">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{archive_groupchats, V}]) -> + validate_boolean(V); +validate([<<"async_writer">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{async_writer, V}]) -> + validate_boolean(V); +validate([<<"async_writer_rdbms_pool">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{async_writer_rdbms_pool, V}]) -> + validate_non_empty_atom(V); +validate([<<"backend">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{backend, V}]) -> + validate_enum(V, [rdbms,riak,cassandra,elasticsearch]); +validate([<<"cache_users">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{cache_users, V}]) -> + validate_boolean(V); +validate([<<"db_jid_format">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{db_jid_format, V}]) -> + validate_module(V); +validate([<<"db_message_format">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{db_message_format, V}]) -> + validate_module(V); +validate([<<"default_result_limit">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{default_result_limit, V}]) -> + validate_non_negative_integer(V); +validate([<<"extra_lookup_params">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{extra_lookup_params, V}]) -> + validate_module(V); +validate([<<"flush_interval">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{flush_interval, V}]) -> + validate_non_negative_integer(V); +validate([<<"full_text_search">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{full_text_search, V}]) -> + validate_boolean(V); +validate([<<"host">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{host, V}]) -> + validate_domain_template(V); +validate([<<"is_archivable_message">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{is_archivable_message, V}]) -> + validate_module(V); +validate([<<"max_batch_size">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{max_batch_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"max_result_limit">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{max_result_limit, V}]) -> + validate_non_negative_integer(V); +validate([<<"message_retraction">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{message_retraction, V}]) -> + validate_boolean(V); +validate([<<"no_stanzaid_element">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{no_stanzaid_element, V}]) -> + validate_boolean(V); +validate([<<"rdbms_message_format">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{rdbms_message_format, V}]) -> + validate_enum(V, [simple,internal]); +validate([<<"simple">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{simple, V}]) -> + validate_boolean(V); +validate([<<"user_prefs_store">>, <<"muc">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{user_prefs_store, V}]) -> + validate_enum(V, [false,rdbms,cassandra,mnesia]); +validate([<<"no_stanzaid_element">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{no_stanzaid_element, V}]) -> + validate_boolean(V); +validate([<<"archive_chat_markers">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{archive_chat_markers, V}]) -> + validate_boolean(V); +validate([<<"archive_groupchats">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{archive_groupchats, V}]) -> + validate_boolean(V); +validate([<<"async_writer">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{async_writer, V}]) -> + validate_boolean(V); +validate([<<"async_writer_rdbms_pool">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{async_writer_rdbms_pool, V}]) -> + validate_non_empty_atom(V); +validate([<<"backend">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{backend, V}]) -> + validate_enum(V, [rdbms,riak,cassandra,elasticsearch]); +validate([<<"cache_users">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{cache_users, V}]) -> + validate_boolean(V); +validate([<<"db_jid_format">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{db_jid_format, V}]) -> + validate_module(V); +validate([<<"db_message_format">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{db_message_format, V}]) -> + validate_module(V); +validate([<<"default_result_limit">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{default_result_limit, V}]) -> + validate_non_negative_integer(V); +validate([<<"extra_lookup_params">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{extra_lookup_params, V}]) -> + validate_module(V); +validate([<<"flush_interval">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{flush_interval, V}]) -> + validate_non_negative_integer(V); +validate([<<"full_text_search">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{full_text_search, V}]) -> + validate_boolean(V); +validate([<<"host">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{host, V}]) -> + validate_domain_template(V); +validate([<<"is_archivable_message">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{is_archivable_message, V}]) -> + validate_module(V); +validate([<<"max_batch_size">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{max_batch_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"max_result_limit">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{max_result_limit, V}]) -> + validate_non_negative_integer(V); +validate([<<"message_retraction">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{message_retraction, V}]) -> + validate_boolean(V); +validate([<<"no_stanzaid_element">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{no_stanzaid_element, V}]) -> + validate_boolean(V); +validate([<<"rdbms_message_format">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{rdbms_message_format, V}]) -> + validate_enum(V, [simple,internal]); +validate([<<"simple">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{simple, V}]) -> + validate_boolean(V); +validate([<<"user_prefs_store">>, <<"pm">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{user_prefs_store, V}]) -> + validate_enum(V, [false,rdbms,cassandra,mnesia]); +validate([<<"rdbms_message_format">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{rdbms_message_format, V}]) -> + validate_enum(V, [simple,internal]); +validate([<<"bucket_type">>, <<"riak">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{bucket_type, V}]) -> + validate_non_empty_binary(V); +validate([<<"search_index">>, <<"riak">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{search_index, V}]) -> + validate_non_empty_binary(V); +validate([<<"simple">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{simple, V}]) -> + validate_boolean(V); +validate([<<"user_prefs_store">>, <<"mod_mam_meta">>, <<"modules">>|_], + [{user_prefs_store, V}]) -> + validate_enum(V, [false,rdbms,cassandra,mnesia]); +validate([<<"iqdisc">>, <<"mod_auth_token">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"validity_period">>,<<"mod_auth_token">>,<<"modules">>|_], Vs) -> + lists:foreach(fun validate_validity_period/1, Vs); +validate([<<"listen_port">>, <<"mod_jingle_sip">>, <<"modules">>|_], + [{listen_port, V}]) -> + validate_network_port(V); +validate([<<"local_host">>, <<"mod_jingle_sip">>, <<"modules">>|_], + [{local_host, V}]) -> + validate_network_address(V); +validate([<<"proxy_host">>, <<"mod_jingle_sip">>, <<"modules">>|_], + [{proxy_host, V}]) -> + validate_network_address(V); +validate([<<"proxy_port">>, <<"mod_jingle_sip">>, <<"modules">>|_], + [{proxy_port, V}]) -> + validate_network_port(V); +validate([<<"sdp_origin">>, <<"mod_jingle_sip">>, <<"modules">>|_], + [{sdp_origin, V}]) -> + validate_ip_address(V); +validate([<<"buffer_max">>, <<"mod_csi">>, <<"modules">>|_], + [{buffer_max, V}]) -> + validate_non_negative_integer_or_infinity(V); +validate([<<"iqdisc">>, <<"mod_sic">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"backend">>, <<"mod_roster">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_roster, V); +validate([<<"iqdisc">>, <<"mod_roster">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"bucket_type">>, <<"riak">>, <<"mod_roster">>, <<"modules">>|_], + [{bucket_type, V}]) -> + validate_non_empty_binary(V); +validate([<<"version_bucket_type">>, <<"riak">>, <<"mod_roster">>, <<"modules">>|_], + [{version_bucket_type, V}]) -> + validate_non_empty_binary(V); +validate([<<"store_current_id">>, <<"mod_roster">>, <<"modules">>|_], + [{store_current_id, V}]) -> + validate_boolean(V); +validate([<<"versioning">>, <<"mod_roster">>, <<"modules">>|_], + [{versioning, V}]) -> + validate_boolean(V); +validate([item, <<"keys">>, <<"mod_keystore">>, <<"modules">>|_], + [V]) -> + validate_keystore_key(V); +validate([<<"ram_key_size">>, <<"mod_keystore">>, <<"modules">>|_], + [{ram_key_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"backend">>, <<"mod_vcard">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_vcard, V); +validate([<<"host">>, <<"mod_vcard">>, <<"modules">>|_], + [{host, V}]) -> + validate_domain_template(V); +validate([<<"iqdisc">>, <<"mod_vcard">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"ldap_base">>, <<"mod_vcard">>, <<"modules">>|_], + [{ldap_base, V}]) -> + validate_string(V); +validate([item, <<"ldap_binary_search_fields">>, <<"mod_vcard">>, <<"modules">>|_], + [V]) -> + validate_non_empty_binary(V); +validate([<<"ldap_deref">>, <<"mod_vcard">>, <<"modules">>|_], + [{ldap_deref, V}]) -> + validate_enum(V, [never,always,finding,searching]); +validate([<<"ldap_filter">>, <<"mod_vcard">>, <<"modules">>|_], + [{ldap_filter, V}]) -> + validate_string(V); +validate([<<"ldap_pool_tag">>, <<"mod_vcard">>, <<"modules">>|_], + [{ldap_pool_tag, V}]) -> + validate_pool_name(V); +validate([item, <<"ldap_search_fields">>, <<"mod_vcard">>, <<"modules">>|_], + [V]) -> + validate_ldap_search_field(V); +validate([<<"ldap_search_operator">>, <<"mod_vcard">>, <<"modules">>|_], + [{ldap_search_operator, V}]) -> + validate_enum(V, ['or','and']); +validate([item, <<"ldap_search_reported">>, <<"mod_vcard">>, <<"modules">>|_], + [V]) -> + validate_ldap_search_reported(V); +validate([item, <<"ldap_uids">>, <<"mod_vcard">>, <<"modules">>|_], + [V]) -> + validate_ldap_uids(V); +validate([item, <<"ldap_vcard_map">>, <<"mod_vcard">>, <<"modules">>|_], + [V]) -> + validate_ldap_vcard_map(V); +validate([<<"matches">>, <<"mod_vcard">>, <<"modules">>|_], + [{matches, V}]) -> + validate_non_negative_integer_or_infinity(V); +validate([<<"bucket_type">>, <<"riak">>, <<"mod_vcard">>, <<"modules">>|_], + [{bucket_type, V}]) -> + validate_non_empty_binary(V); +validate([<<"search_index">>, <<"riak">>, <<"mod_vcard">>, <<"modules">>|_], + [{search_index, V}]) -> + validate_non_empty_binary(V); +validate([<<"search">>, <<"mod_vcard">>, <<"modules">>|_], + [{search, V}]) -> + validate_boolean(V); +validate([<<"backend">>, <<"mod_private">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_private, V); +validate([<<"iqdisc">>, <<"mod_private">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"bucket_type">>, <<"riak">>, <<"mod_private">>, <<"modules">>|_], + [{bucket_type, V}]) -> + validate_non_empty_binary(V); +validate([<<"backend">>, <<"mod_bosh">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_bosh, V); +validate([<<"inactivity">>, <<"mod_bosh">>, <<"modules">>|_], + [{inactivity, V}]) -> + validate_non_negative_integer_or_infinity(V); +validate([<<"max_wait">>, <<"mod_bosh">>, <<"modules">>|_], + [{max_wait, V}]) -> + validate_non_negative_integer_or_infinity(V); +validate([<<"server_acks">>, <<"mod_bosh">>, <<"modules">>|_], + [{server_acks, V}]) -> + validate_boolean(V); +validate([<<"aff_changes">>, <<"mod_inbox">>, <<"modules">>|_], + [{aff_changes, V}]) -> + validate_boolean(V); +validate([<<"backend">>, <<"mod_inbox">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_inbox, V); +validate([item, <<"groupchat">>, <<"mod_inbox">>, <<"modules">>|_], + [V]) -> + validate_groupchat_type(V); +validate([<<"iqdisc">>, <<"mod_inbox">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"remove_on_kicked">>, <<"mod_inbox">>, <<"modules">>|_], + [{remove_on_kicked, V}]) -> + validate_boolean(V); +validate([item, <<"reset_markers">>, <<"mod_inbox">>, <<"modules">>|_], + [V]) -> + validate_chat_marker_type(V); +validate([item, <<"extra_domains">>, <<"mod_disco">>, <<"modules">>|_], + [V]) -> + validate_binary_domain(V); +validate([item, <<"module">>, item, <<"server_info">>, <<"mod_disco">>, <<"modules">>|_], + [V]) -> + validate_module(V); +validate([<<"name">>, item, <<"server_info">>, <<"mod_disco">>, <<"modules">>|_], + [V]) -> + validate_non_empty_binary(V); +validate([item, <<"urls">>, item, <<"server_info">>, <<"mod_disco">>, <<"modules">>|_], + [V]) -> + validate_url(V); +validate([item, <<"urls">>, <<"mod_disco">>, <<"modules">>|_], + [V]) -> + validate_url(V); +validate([<<"users_can_see_hidden_services">>, <<"mod_disco">>, <<"modules">>|_], + [{users_can_see_hidden_services, V}]) -> + validate_boolean(V); +validate([<<"all_can_configure">>, <<"mod_muc_light">>, <<"modules">>|_], + [{all_can_configure, V}]) -> + validate_boolean(V); +validate([<<"all_can_invite">>, <<"mod_muc_light">>, <<"modules">>|_], + [{all_can_invite, V}]) -> + validate_boolean(V); +validate([<<"backend">>, <<"mod_muc_light">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_muc_light_db, V); +validate([<<"blocking">>, <<"mod_muc_light">>, <<"modules">>|_], + [{blocking, V}]) -> + validate_boolean(V); +validate([item, <<"config_schema">>, <<"mod_muc_light">>, <<"modules">>|_], + [V]) -> + validate_muc_config_schema(V); +validate([<<"equal_occupants">>, <<"mod_muc_light">>, <<"modules">>|_], + [{equal_occupants, V}]) -> + validate_boolean(V); +validate([<<"host">>, <<"mod_muc_light">>, <<"modules">>|_], + [{host, V}]) -> + validate_domain_template(V); +validate([<<"legacy_mode">>, <<"mod_muc_light">>, <<"modules">>|_], + [{legacy_mode, V}]) -> + validate_boolean(V); +validate([<<"max_occupants">>, <<"mod_muc_light">>, <<"modules">>|_], + [{max_occupants, V}]) -> + validate_positive_integer_or_infinity(V); +validate([<<"rooms_in_rosters">>, <<"mod_muc_light">>, <<"modules">>|_], + [{rooms_in_rosters, V}]) -> + validate_boolean(V); +validate([<<"rooms_per_page">>, <<"mod_muc_light">>, <<"modules">>|_], + [{rooms_per_page, V}]) -> + validate_positive_integer_or_infinity(V); +validate([<<"iqdisc">>, <<"mod_carboncopy">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"access_max_user_messages">>, <<"mod_offline">>, <<"modules">>|_], + [{access_max_user_messages, V}]) -> + validate_access_rule(V); +validate([<<"backend">>, <<"mod_offline">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_offline, V); +validate([<<"bucket_type">>, <<"riak">>, <<"mod_offline">>, <<"modules">>|_], + [{bucket_type, V}]) -> + validate_non_empty_binary(V); +validate([<<"access_log">>, <<"mod_muc_log">>, <<"modules">>|_], + [{access_log, V}]) -> + validate_access_rule(V); +validate([<<"css_file">>, <<"mod_muc_log">>, <<"modules">>|_], + [{cssfile, V}]) -> + validate_maybe_css_file(V); +validate([<<"dirname">>, <<"mod_muc_log">>, <<"modules">>|_], + [{dirname, V}]) -> + validate_enum(V, [room_jid,room_name]); +validate([<<"dirtype">>, <<"mod_muc_log">>, <<"modules">>|_], + [{dirtype, V}]) -> + validate_enum(V, [subdirs,plain]); +validate([<<"file_format">>, <<"mod_muc_log">>, <<"modules">>|_], + [{file_format, V}]) -> + validate_enum(V, [html,plaintext]); +validate([<<"outdir">>, <<"mod_muc_log">>, <<"modules">>|_], + [{outdir, V}]) -> + validate_dirname(V); +validate([<<"spam_prevention">>, <<"mod_muc_log">>, <<"modules">>|_], + [{spam_prevention, V}]) -> + validate_boolean(V); +validate([<<"timezone">>, <<"mod_muc_log">>, <<"modules">>|_], + [{timezone, V}]) -> + validate_enum(V, [local,universal]); +validate([<<"top_link">>, <<"mod_muc_log">>, <<"modules">>|_], + [{top_link, V}]) -> + validate_top_link(V); +validate([<<"access_createnode">>, <<"mod_pubsub">>, <<"modules">>|_], + [{access_createnode, V}]) -> + validate_access_rule(V); +validate([<<"backend">>, <<"mod_pubsub">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_pubsub_db, V); +validate([<<"access_model">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{access_model, V}]) -> + validate_non_empty_atom(V); +validate([<<"deliver_notifications">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{deliver_notifications, V}]) -> + validate_boolean(V); +validate([<<"deliver_payloads">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{deliver_payloads, V}]) -> + validate_boolean(V); +validate([<<"max_items">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{max_items, V}]) -> + validate_non_negative_integer(V); +validate([<<"max_payload_size">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{max_payload_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"node_type">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{node_type, V}]) -> + validate_non_empty_atom(V); +validate([<<"notification_type">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{notification_type, V}]) -> + validate_non_empty_atom(V); +validate([<<"notify_config">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{notify_config, V}]) -> + validate_boolean(V); +validate([<<"notify_delete">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{notify_delete, V}]) -> + validate_boolean(V); +validate([<<"notify_retract">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{notify_retract, V}]) -> + validate_boolean(V); +validate([<<"persist_items">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{persist_items, V}]) -> + validate_boolean(V); +validate([<<"presence_based_delivery">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{presence_based_delivery, V}]) -> + validate_boolean(V); +validate([<<"publish_model">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{publish_model, V}]) -> + validate_non_empty_atom(V); +validate([<<"purge_offline">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{purge_offline, V}]) -> + validate_boolean(V); +validate([item, <<"roster_groups_allowed">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [V]) -> + validate_non_empty_binary(V); +validate([<<"send_last_published_item">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{send_last_published_item, V}]) -> + validate_non_empty_atom(V); +validate([<<"subscribe">>, <<"default_node_config">>, + <<"mod_pubsub">>, <<"modules">>|_], + [{subscribe, V}]) -> + validate_boolean(V); +validate([<<"host">>, <<"mod_pubsub">>, <<"modules">>|_], + [{host, V}]) -> + validate_domain_template(V); +validate([<<"ignore_pep_from_offline">>, <<"mod_pubsub">>, <<"modules">>|_], + [{ignore_pep_from_offline, V}]) -> + validate_boolean(V); +validate([<<"iqdisc">>, <<"mod_pubsub">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"item_publisher">>, <<"mod_pubsub">>, <<"modules">>|_], + [{item_publisher, V}]) -> + validate_boolean(V); +validate([<<"last_item_cache">>, <<"mod_pubsub">>, <<"modules">>|_], + [{last_item_cache, V}]) -> + validate_enum(V, [mnesia,rdbms,false]); +validate([<<"max_items_node">>, <<"mod_pubsub">>, <<"modules">>|_], + [{max_items_node, V}]) -> + validate_non_negative_integer(V); +validate([<<"max_subscriptions_node">>, <<"mod_pubsub">>, <<"modules">>|_], + [{max_subscriptions_node, V}]) -> + validate_non_negative_integer(V); +validate([<<"nodetree">>, <<"mod_pubsub">>, <<"modules">>|_], + [{nodetree, V}]) -> + validate_pubsub_nodetree(V); +validate([item, <<"pep_mapping">>, <<"mod_pubsub">>, <<"modules">>|_], + [V]) -> + validate_pubsub_pep_mapping(V); +validate([item, <<"plugins">>, <<"mod_pubsub">>, <<"modules">>|_], + [V]) -> + validate_pubsub_plugin(V); +validate([<<"sync_broadcast">>, <<"mod_pubsub">>, <<"modules">>|_], + [{sync_broadcast, V}]) -> + validate_boolean(V); +validate([<<"access">>, <<"mod_muc">>, <<"modules">>|_], + [{access, V}]) -> + validate_access_rule(V); +validate([<<"access_admin">>, <<"mod_muc">>, <<"modules">>|_], + [{access_admin, V}]) -> + validate_access_rule(V); +validate([<<"access_create">>, <<"mod_muc">>, <<"modules">>|_], + [{access_create, V}]) -> + validate_access_rule(V); +validate([<<"access_persistent">>, <<"mod_muc">>, <<"modules">>|_], + [{access_persistent, V}]) -> + validate_access_rule(V); +validate([<<"backend">>, <<"mod_muc">>, <<"modules">>|_], + [{backend, V}]) -> + validate_backend(mod_muc_db, V); +validate([item, <<"affiliations">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [V]) -> + validate_muc_affiliation_rule(V); +validate([<<"allow_change_subj">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [{allow_change_subj, V}]) -> + validate_boolean(V); +validate([<<"allow_multiple_sessions">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [{allow_multiple_sessions, V}]) -> + validate_boolean(V); +validate([<<"allow_private_messages">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [{allow_private_messages, V}]) -> + validate_boolean(V); +validate([<<"allow_query_users">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [{allow_query_users, V}]) -> + validate_boolean(V); +validate([<<"allow_user_invites">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [{allow_user_invites, V}]) -> + validate_boolean(V); +validate([<<"allow_visitor_nickchange">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [{allow_visitor_nickchange, V}]) -> + validate_boolean(V); +validate([<<"allow_visitor_status">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [{allow_visitor_status, V}]) -> + validate_boolean(V); +validate([<<"anonymous">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{anonymous, V}]) -> + validate_boolean(V); +validate([<<"description">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{description, V}]) -> + validate_binary(V); +validate([<<"logging">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{logging, V}]) -> + validate_boolean(V); +validate([<<"max_users">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{max_users, V}]) -> + validate_positive_integer(V); +validate([item, <<"maygetmemberlist">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [V]) -> + validate_non_empty_atom(V); +validate([<<"members_by_default">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [{members_by_default, V}]) -> + validate_boolean(V); +validate([<<"members_only">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{members_only, V}]) -> + validate_boolean(V); +validate([<<"moderated">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{moderated, V}]) -> + validate_boolean(V); +validate([<<"password">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{password, V}]) -> + validate_string(V); +validate([<<"password_protected">>, <<"default_room">>, + <<"mod_muc">>, <<"modules">>|_], + [{password_protected, V}]) -> + validate_boolean(V); +validate([<<"persistent">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{persistent, V}]) -> + validate_boolean(V); +validate([<<"public">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{public, V}]) -> + validate_boolean(V); +validate([<<"public_list">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{public_list, V}]) -> + validate_boolean(V); +validate([<<"subject">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{subject, V}]) -> + validate_string(V); +validate([<<"subject_author">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{subject_author, V}]) -> + validate_string(V); +validate([<<"title">>, <<"default_room">>, <<"mod_muc">>, <<"modules">>|_], + [{title, V}]) -> + validate_string(V); +validate([<<"hibernated_room_check_interval">>, <<"mod_muc">>, <<"modules">>|_], + [{hibernated_room_check_interval, V}]) -> + validate_non_negative_integer_or_infinity(V); +validate([<<"hibernated_room_timeout">>, <<"mod_muc">>, <<"modules">>|_], + [{hibernated_room_timeout, V}]) -> + validate_non_negative_integer_or_infinity(V); +validate([<<"history_size">>, <<"mod_muc">>, <<"modules">>|_], + [{history_size, V}]) -> + validate_non_negative_integer(V); +validate([<<"host">>, <<"mod_muc">>, <<"modules">>|_], + [{host, V}]) -> + validate_domain_template(V); +validate([<<"http_auth_pool">>, <<"mod_muc">>, <<"modules">>|_], + [{http_auth_pool, V}]) -> + validate_pool_name(V); +validate([<<"load_permanent_rooms_at_startup">>, <<"mod_muc">>, <<"modules">>|_], + [{load_permanent_rooms_at_startup, V}]) -> + validate_boolean(V); +validate([<<"max_room_desc">>, <<"mod_muc">>, <<"modules">>|_], + [{max_room_desc, V}]) -> + validate_non_negative_integer_or_infinity(V); +validate([<<"max_room_id">>, <<"mod_muc">>, <<"modules">>|_], + [{max_room_id, V}]) -> + validate_non_negative_integer_or_infinity(V); +validate([<<"max_room_name">>, <<"mod_muc">>, <<"modules">>|_], + [{max_room_name, V}]) -> + validate_non_negative_integer_or_infinity(V); +validate([<<"max_user_conferences">>, <<"mod_muc">>, <<"modules">>|_], + [{max_user_conferences, V}]) -> + validate_non_negative_integer(V); +validate([<<"max_users">>, <<"mod_muc">>, <<"modules">>|_], + [{max_users, V}]) -> + validate_positive_integer(V); +validate([<<"max_users_admin_threshold">>, <<"mod_muc">>, <<"modules">>|_], + [{max_users_admin_threshold, V}]) -> + validate_positive_integer(V); +validate([<<"min_message_interval">>, <<"mod_muc">>, <<"modules">>|_], + [{min_message_interval, V}]) -> + validate_non_negative_integer(V); +validate([<<"min_presence_interval">>, <<"mod_muc">>, <<"modules">>|_], + [{min_presence_interval, V}]) -> + validate_non_negative_integer(V); +validate([<<"room_shaper">>, <<"mod_muc">>, <<"modules">>|_], + [{room_shaper, V}]) -> + validate_shaper_name(V); +validate([<<"user_message_shaper">>, <<"mod_muc">>, <<"modules">>|_], + [{user_message_shaper, V}]) -> + validate_shaper_name(V); +validate([<<"user_presence_shaper">>, <<"mod_muc">>, <<"modules">>|_], + [{user_presence_shaper, V}]) -> + validate_shaper_name(V); +validate([<<"iqdisc">>, <<"mod_ping">>, <<"modules">>|_], + [{iqdisc, V}]) -> + validate_iqdisc(V); +validate([<<"ping_interval">>, <<"mod_ping">>, <<"modules">>|_], + [{ping_interval, V}]) -> + validate_positive_integer(V); +validate([<<"ping_req_timeout">>, <<"mod_ping">>, <<"modules">>|_], + [{ping_req_timeout, V}]) -> + validate_positive_integer(V); +validate([<<"send_pings">>, <<"mod_ping">>, <<"modules">>|_], + [{send_pings, V}]) -> + validate_boolean(V); +validate([<<"timeout_action">>, <<"mod_ping">>, <<"modules">>|_], + [{timeout_action, V}]) -> + validate_enum(V, [none,kill]); +validate(_Path, _Value) -> + ok. + + +%% validators + +validate_loglevel(Level) -> + mongoose_logs:loglevel_keyword_to_number(Level). + +validate_non_empty_binary(Value) when is_binary(Value), Value =/= <<>> -> ok. + +validate_binary(Value) when is_binary(Value) -> ok. + +validate_hosts(Hosts = [_|_]) -> + validate_unique_items(Hosts). + +validate_unique_items(Items) -> + L = sets:size(sets:from_list(Items)), + L = length(Items). + +validate_timeout(infinity) -> ok; +validate_timeout(Timeout) when is_integer(Timeout), Timeout > 0 -> ok. + +validate_boolean(Value) when is_boolean(Value) -> ok. + +validate_module(Mod) -> + case code:ensure_loaded(Mod) of + {module, _} -> + ok; + Other -> + error(#{what => module_not_found, module => Mod, reason => Other}) + end. + +validate_positive_integer(Value) when is_integer(Value), Value > 0 -> ok. + +validate_non_negative_integer(Value) when is_integer(Value), Value >= 0 -> ok. + +validate_non_negative_integer_or_infinity(Value) when is_integer(Value), Value >= 0 -> ok; +validate_non_negative_integer_or_infinity(infinity) -> ok. + +validate_positive_integer_or_infinity(Value) when is_integer(Value), Value > 0 -> ok; +validate_positive_integer_or_infinity(infinity) -> ok. + +validate_positive_integer_or_atom(Value, Atom) when is_atom(Value), Value == Atom -> ok; +validate_positive_integer_or_atom(Value, _) when is_integer(Value), Value > 0 -> ok. + +validate_positive_integer_or_infinity_or_atom(Value, _) when is_integer(Value), Value > 0 -> ok; +validate_positive_integer_or_infinity_or_atom(infinity, _) -> ok; +validate_positive_integer_or_infinity_or_atom(Value, Atom) when is_atom(Value), Value == Atom -> ok. + +validate_enum(Value, Values) -> + case lists:member(Value, Values) of + true -> + ok; + false -> + error(#{what => validate_enum_failed, + value => Value, + allowed_values => Values}) + end. + +validate_ip_address(Value) -> + {ok, _} = inet:parse_address(Value). + +validate_port(Value) when is_integer(Value), Value >= 0, Value =< 65535 -> ok. + +validate_non_empty_atom(Value) when is_atom(Value), Value =/= '' -> ok. + +validate_non_empty_string(Value) when is_list(Value), Value =/= "" -> ok. + +validate_non_empty_list(Value) when is_list(Value), Value =/= [] -> ok. + +validate_password_format({scram, [_|_]}) -> ok; +validate_password_format(Value) -> validate_enum(Value, [scram, plain]). + +validate_pool_scope(Value) when is_binary(Value) -> validate_non_empty_binary(Value); +validate_pool_scope(Value) -> validate_enum(Value, [host, global]). + +validate_root_or_host_config([]) -> ok; +validate_root_or_host_config([{host, _}, <<"host_config">>]) -> ok. + +validate_jid(Jid) -> + case jid:from_binary(Jid) of + #jid{} -> + ok; + _ -> + error(#{what => validate_jid_failed, value => Jid}) + end. + +validate_iqdisc(no_queue) -> ok; +validate_iqdisc(one_queue) -> ok; +validate_iqdisc(parallel) -> ok; +validate_iqdisc({queues, N}) when is_integer(N), N > 0 -> ok. + +-spec validate_auth_token_domain(mod_auth_token:token_type()) -> ok. +validate_auth_token_domain(Type) -> + validate_enum(Type, [access, refresh, provision]). + +validate_validity_period({{validity_period, Token}, {Value, Unit}}) -> + validate_auth_token_domain(Token), + validate_non_negative_integer(Value), + validate_period_unit(Unit). + +validate_period_unit(Unit) -> + validate_enum(Unit, [days, hours, minutes, seconds]). + +validate_ip_access({Access, IPMask}) -> + validate_enum(Access, [allow, deny]), + validate_ip_mask_string(IPMask). + +validate_backend(Mod, Backend) -> + validate_module(backend_module:backend_module(Mod, Backend)). + +validate_chat_marker_type(Type) -> + validate_enum(Type, [displayed, received, acknowledged]). + +validate_groupchat_type(Type) -> + validate_enum(Type, [muc, muclight]). + +validate_domain(Domain) when is_list(Domain) -> + #jid{luser = <<>>, lresource = <<>>} = jid:from_binary(list_to_binary(Domain)), + validate_domain_res(Domain). + +validate_domain_res(Domain) -> + case inet_res:gethostbyname(Domain) of + {ok, _} -> + ok; + {error,formerr} -> + error(#{what => cfg_validate_domain_failed, + reason => formerr, text => <<"Invalid domain name">>, + domain => Domain}); + {error,Reason} -> %% timeout, nxdomain + ?LOG_WARNING(#{what => cfg_validate_domain, + reason => Reason, domain => Domain, + text => <<"Couldn't resolve domain. " + "It could cause issues with production installations">>}), + ignore + end. + +validate_binary_domain(Domain) when is_binary(Domain) -> + #jid{luser = <<>>, lresource = <<>>} = jid:from_binary(Domain), + validate_domain_res(binary_to_list(Domain)). + +validate_domain_template(Domain) -> + validate_binary_domain(gen_mod:make_subhost(Domain, <<"example.com">>)). + +validate_url(Url) -> + validate_non_empty_string(Url). + +validate_string(Value) -> + is_binary(unicode:characters_to_binary(Value)). + +validate_ip_mask_string(IPMaskString) -> + validate_non_empty_string(IPMaskString), + {ok, IPMask} = mongoose_lib:parse_ip_netmask(IPMaskString), + validate_ip_mask(IPMask). + +validate_ip_mask({IP, Mask}) -> + validate_string(inet:ntoa(IP)), + case IP of + {_,_,_,_} -> + validate_ipv4_mask(Mask); + _ -> + validate_ipv6_mask(Mask) + end. + +validate_ipv4_mask(Mask) -> + validate_range(Mask, 0, 32). + +validate_ipv6_mask(Mask) -> + validate_range(Mask, 0, 128). + +validate_network_address(Value) -> + ?LOG_DEBUG(#{what => validate_network_address, + value => Value}), + validate_oneof(Value, [fun validate_domain/1, fun validate_ip_address/1]). + +validate_oneof(Value, Funs) -> + Results = [safe_call_validator(F, Value) || F <- Funs], + case lists:any(fun(R) -> R =:= ok end, Results) of + true -> + ok; + false -> + error(#{what => validate_oneof_failed, + validation_results => Results}) + end. + +safe_call_validator(F, Value) -> + try + F(Value), + ok + catch error:Reason:Stacktrace -> + #{reason => Reason, stacktrace => Stacktrace} + end. + +validate_network_port(Value) -> + validate_range(Value, 0, 65535). + +validate_range(Value, Min, Max) when Value >= Min, Value =< Max -> + ok. + +validate_wpool_strategy(Value) -> + validate_enum(Value, [best_worker, random_worker, next_worker, + available_worker, next_available_worker]). + +validate_filename(Filename) -> + case file:read_file_info(Filename) of + {ok, _} -> + ok; + Reason -> + error(#{what => invalid_filename, filename => Filename, reason => Reason}) + end. + +validate_dirname(Dirname) -> + case file:list_dir(Dirname) of + {ok, _} -> + ok; + Reason -> + error(#{what => invalid_dirname, dirname => Dirname, reason => Reason}) + end. + +validate_keystore_key({Name, ram}) -> + validate_non_empty_atom(Name); +validate_keystore_key({Name, {file, Path}}) -> + validate_non_empty_atom(Name), + validate_filename(Path). + +validate_muc_affiliation_rule({{User, Server, Resource}, Affiliation}) -> + validate_non_empty_binary(User), + validate_binary_domain(Server), + validate_binary(Resource), + validate_non_empty_atom(Affiliation). + +validate_maybe_css_file(false) -> + ok; +validate_maybe_css_file(Bin) -> + validate_non_empty_binary(Bin). %% Could be more precise type + +validate_top_link({Url, Text}) -> + validate_url(Url), + validate_non_empty_string(Text). + +validate_muc_config_schema({Field, Value}) -> + validate_non_empty_string(Field), + validate_string(Value); +validate_muc_config_schema({Field, Value, InternalField, FieldType}) + when is_list(Value); is_float(Value); is_integer(Value) -> + validate_non_empty_string(Field), + validate_enum(FieldType, [binary, integer, float]), + validate_non_empty_atom(InternalField). + +validate_pubsub_nodetree(Value) -> + validate_non_empty_binary(Value), + validate_backend(nodetree, b2a(Value)). + +validate_pubsub_plugin(Value) -> + validate_non_empty_binary(Value), + validate_backend(node, b2a(Value)). + +validate_pubsub_pep_mapping({Namespace, Id}) -> + validate_non_empty_string(Namespace), + validate_non_empty_string(Id). + +b2a(Bin) -> + binary_to_atom(Bin, utf8). + +validate_revproxy_route({Host, Path, Method, Upstream}) -> + validate_non_empty_string(Host), + validate_string(Path), + validate_string(Method), + validate_non_empty_string(Upstream); +validate_revproxy_route({Host, Path, Upstream}) -> + validate_non_empty_string(Host), + validate_string(Path), + validate_non_empty_string(Upstream). + +validate_ldap_vcard_map({VCardField, LDAPPattern, LDAPFields}) -> + validate_non_empty_binary(VCardField), + validate_non_empty_binary(LDAPPattern), + lists:foreach(fun validate_non_empty_binary/1, LDAPFields). + +validate_ldap_search_field({SearchField, LDAPField}) -> + validate_non_empty_binary(SearchField), + validate_non_empty_binary(LDAPField). + +validate_ldap_search_reported({SearchField, VCardField}) -> + validate_non_empty_binary(SearchField), + validate_non_empty_binary(VCardField). + +validate_ldap_uids({Attribute, Format}) -> + validate_non_empty_string(Attribute), + validate_non_empty_string(Format); +validate_ldap_uids(Attribute) -> + validate_non_empty_string(Attribute). + +validate_pool_name(V) -> + validate_non_empty_atom(V). + +validate_access_rule(V) -> + validate_non_empty_atom(V). + +validate_shaper_name(V) -> + validate_non_empty_atom(V). diff --git a/src/ejabberd_config.erl b/src/ejabberd_config.erl index 7bb509f1cfb..741db5368be 100644 --- a/src/ejabberd_config.erl +++ b/src/ejabberd_config.erl @@ -55,7 +55,7 @@ -export([other_cluster_nodes/0]). --import(mongoose_config_parser, [can_be_ignored/1]). +-import(mongoose_config_reload, [can_be_ignored/1]). -export([apply_reloading_change/1]). @@ -89,7 +89,7 @@ start() -> {local_content, true}, {attributes, record_info(fields, local_config)}]), mnesia:add_table_copy(local_config, node(), ram_copies), - Config = get_ejabberd_config_path(), + Config = get_config_path(), ejabberd_config:load_file(Config), %% This start time is used by mod_last: add_local_option(node_start, {node_start, erlang:system_time(second)}), @@ -97,11 +97,11 @@ start() -> %% @doc Get the filename of the ejabberd configuration file. -%% The filename can be specified with: erl -config "/path/to/mongooseim.cfg". +%% The filename can be specified with: erl -config "/path/to/mongooseim.toml". %% It can also be specified with the environtment variable EJABBERD_CONFIG_PATH. -%% If not specified, the default value 'mongooseim.cfg' is assumed. --spec get_ejabberd_config_path() -> string(). -get_ejabberd_config_path() -> +%% If not specified, the default value 'mongooseim.toml' is assumed. +-spec get_config_path() -> string(). +get_config_path() -> DefaultPath = case os:getenv("EJABBERD_CONFIG_PATH") of false -> ?CONFIG_PATH; @@ -116,78 +116,13 @@ get_ejabberd_config_path() -> %% This function will crash if finds some error in the configuration file. -spec load_file(File :: string()) -> ok. load_file(File) -> - State = parse_file(File), + State = mongoose_config_parser:parse_file(File), assert_required_files_exist(State), set_opts(State). - -%% @doc Read an ejabberd configuration file and return the terms. -%% Input is an absolute or relative path to an ejabberd config file. -%% Returns a list of plain terms, -%% in which the options 'include_config_file' were parsed -%% and the terms in those files were included. --spec get_plain_terms_file(string()) -> [term()]. -get_plain_terms_file(File1) -> - File = mongoose_config_utils:get_absolute_path(File1), - case file:consult(File) of - {ok, Terms} -> - include_config_files(Terms); - {error, {LineNumber, erl_parse, _ParseMessage} = Reason} -> - ExitText = describe_config_problem(File, Reason, LineNumber), - ?LOG_ERROR(#{what => ejabberd_config_file_loading_failed, - file => File, line => LineNumber, reason => Reason}), - mongoose_config_utils:exit_or_halt(ExitText); - {error, Reason} -> - ExitText = describe_config_problem(File, Reason), - ?LOG_ERROR(#{what => mim_config_file_loading_failed, - file => File, reason => Reason}), - mongoose_config_utils:exit_or_halt(ExitText) - end. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%% Errors reading the config file - --type config_problem() :: atom() | {integer(), atom() | tuple(), _}. % spec me better - --spec describe_config_problem(Filename :: string(), - Reason :: config_problem()) -> string(). -describe_config_problem(Filename, Reason) -> - Text1 = lists:flatten("Problem loading MongooseIM config file " ++ Filename), - Text2 = lists:flatten(" : " ++ file:format_error(Reason)), - ExitText = Text1 ++ Text2, - ExitText. - - --spec describe_config_problem(Filename :: string(), - Reason :: config_problem(), - Line :: pos_integer()) -> string(). -describe_config_problem(Filename, Reason, LineNumber) -> - Text1 = lists:flatten("Problem loading ejabberd config file " ++ Filename), - Text2 = lists:flatten(" approximately in the line " - ++ file:format_error(Reason)), - ExitText = Text1 ++ Text2, - Lines = mongoose_config_utils:get_config_lines(Filename, LineNumber, 10, 3), - ?LOG_ERROR(#{what => mim_config_file_loading_failed, lines => Lines, - text => <<"The following lines from your configuration file might be relevant to the error">>}), - ExitText. - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%% Support for 'include_config_file' - -%% @doc Include additional configuration files in the list of terms. --spec include_config_files([term()]) -> [term()]. -include_config_files(Terms) -> - Filenames = mongoose_config_parser:config_filenames_to_include(Terms), - Configs = lists:map(fun(Filename) -> - {Filename, get_plain_terms_file(Filename)} - end, Filenames), - mongoose_config_parser:include_config_files(Terms, Configs). - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% Process terms - -spec set_opts(state()) -> 'ok' | none(). set_opts(State) -> case mnesia:transaction(fun() -> do_set_opts(State) end) of @@ -296,9 +231,8 @@ get_local_option_or_default(Opt, Default) -> %% @doc Return the list of hosts handled by a given module get_vh_by_auth_method(AuthMethod) -> - mnesia:dirty_select(local_config, - [{#local_config{key = {auth_method, '$1'}, - value = AuthMethod}, [], ['$1']}]). + lists:filter(fun(Host) -> lists:member(AuthMethod, ejabberd_auth:auth_methods(Host)) end, + ?MYHOSTS). handle_table_does_not_exist_error(Table) -> MnesiaDirectory = mnesia:system_info(directory), @@ -316,11 +250,6 @@ handle_table_does_not_exist_error(Table) -> %%-------------------------------------------------------------------- %% Configuration reload %%-------------------------------------------------------------------- --spec parse_file(file:name()) -> state(). -parse_file(ConfigFile) -> - Terms = get_plain_terms_file(ConfigFile), - mongoose_config_parser:parse_terms(Terms). - -spec reload_local() -> {ok, iolist()} | no_return(). reload_local() -> reload_nodes(reload_local, [node()], false). @@ -469,7 +398,7 @@ handle_config_del(#config{key = hosts, value = Hosts}) -> %% handle add/remove new hosts handle_config_change({hosts, OldHosts, NewHosts}) -> - {ToDel, ToAdd} = mongoose_config_parser:check_hosts(NewHosts, OldHosts), + {ToDel, ToAdd} = mongoose_config_reload:check_hosts(NewHosts, OldHosts), lists:foreach(fun remove_virtual_host/1, ToDel), lists:foreach(fun add_virtual_host/1, ToAdd); handle_config_change({language, _Old, _New}) -> @@ -589,7 +518,7 @@ get_host_local_config() -> -spec get_local_config() -> [{local_config, term(), term()}]. get_local_config() -> - Keys = lists:filter(fun mongoose_config_parser:is_not_host_specific/1, mnesia:dirty_all_keys(local_config)), + Keys = lists:filter(fun mongoose_config_reload:is_not_host_specific/1, mnesia:dirty_all_keys(local_config)), lists:flatten(lists:map(fun(Key) -> mnesia:dirty_read(local_config, Key) end, @@ -610,17 +539,17 @@ get_categorized_options() -> %% This function prepares all state data to pass into pure code part %% (i.e. mongoose_config_parser and mongoose_config_reload). config_state() -> - ConfigFile = get_ejabberd_config_path(), - Terms = get_plain_terms_file(ConfigFile), + ConfigFile = get_config_path(), + State = mongoose_config_parser:parse_file(ConfigFile), %% Performance optimization hint: %% terms_to_missing_and_required_files/1 actually parses Terms into State. #{missing_files := MissingFiles, required_files := RequiredFiles} = - terms_to_missing_and_required_files(Terms), + state_to_missing_and_required_files(State), #{mongoose_node => node(), config_file => ConfigFile, loaded_categorized_options => get_categorized_options(), - ondisc_config_terms => Terms, + ondisc_config_state => State, missing_files => MissingFiles, required_files => RequiredFiles}. @@ -641,8 +570,8 @@ config_states(Nodes) -> end. compute_config_file_version() -> - ConfigFile = get_ejabberd_config_path(), - State = parse_file(ConfigFile), + ConfigFile = get_config_path(), + State = mongoose_config_parser:parse_file(ConfigFile), mongoose_config_reload:compute_config_file_version(State). compute_loaded_config_version() -> @@ -677,8 +606,7 @@ assert_required_files_exist(State) -> filenames => MissingFiles}) end. -terms_to_missing_and_required_files(Terms) -> - State = mongoose_config_parser:parse_terms(Terms), +state_to_missing_and_required_files(State) -> RequiredFiles = mongoose_config_parser:state_to_required_files(State), MissingFiles = missing_files(RequiredFiles), #{missing_files => MissingFiles, required_files => RequiredFiles}. diff --git a/src/ejabberd_s2s_out.erl b/src/ejabberd_s2s_out.erl index 4aa88044cb6..5458c047751 100644 --- a/src/ejabberd_s2s_out.erl +++ b/src/ejabberd_s2s_out.erl @@ -1078,8 +1078,8 @@ outgoing_s2s_port() -> -spec outgoing_s2s_families() -> ['ipv4' | 'ipv6', ...]. outgoing_s2s_families() -> - case ejabberd_config:get_local_option(outgoing_s2s_options) of - {Families, _} when is_list(Families) -> + case ejabberd_config:get_local_option(outgoing_s2s_families) of + Families when is_list(Families) -> Families; undefined -> %% DISCUSSION: Why prefer IPv4 first? @@ -1099,10 +1099,10 @@ outgoing_s2s_families() -> -spec outgoing_s2s_timeout() -> non_neg_integer() | infinity. outgoing_s2s_timeout() -> - case ejabberd_config:get_local_option(outgoing_s2s_options) of - {_, Timeout} when is_integer(Timeout) -> + case ejabberd_config:get_local_option(outgoing_s2s_timeout) of + Timeout when is_integer(Timeout) -> Timeout; - {_, infinity} -> + infinity -> infinity; undefined -> %% 10 seconds diff --git a/src/ejabberd_service.erl b/src/ejabberd_service.erl index 0d5bffc5868..54597cd36e8 100644 --- a/src/ejabberd_service.erl +++ b/src/ejabberd_service.erl @@ -62,7 +62,7 @@ sockmod :: ejabberd:sockmod(), socket_monitor, streamid, - password :: binary(), + password :: string(), host :: binary() | undefined, is_subdomain :: boolean(), hidden_components = false :: boolean(), diff --git a/src/ejabberd_sm.erl b/src/ejabberd_sm.erl index 15e3e85bbe1..638535a0655 100644 --- a/src/ejabberd_sm.erl +++ b/src/ejabberd_sm.erl @@ -504,7 +504,10 @@ node_cleanup(Acc, Node) -> %%-------------------------------------------------------------------- -spec init(_) -> {ok, state()}. init([]) -> - {Backend, Opts} = ejabberd_config:get_global_option(sm_backend), + {Backend, Opts} = case ejabberd_config:get_global_option(sm_backend) of + undefined -> {mnesia, []}; + Value -> Value + end, {Mod, Code} = dynamic_compile:from_string(sm_backend(Backend)), code:load_binary(Mod, "ejabberd_sm_backend.erl", Code), diff --git a/src/gen_mod.erl b/src/gen_mod.erl index a99fd977cdf..a3587758b47 100644 --- a/src/gen_mod.erl +++ b/src/gen_mod.erl @@ -60,6 +60,7 @@ set_module_opt/4, set_module_opts/3, get_module_opts/2, + make_subhost/2, make_subhosts/2, get_opt_subhost/3, get_opt_subhost/4, diff --git a/src/mam/mod_mam_meta.erl b/src/mam/mod_mam_meta.erl index 09e763e3c0b..2280a4c9687 100644 --- a/src/mam/mod_mam_meta.erl +++ b/src/mam/mod_mam_meta.erl @@ -101,9 +101,9 @@ handle_nested_opts(Key, RootOpts, Default, Deps) -> parse_opts(Key, FullOpts, Deps) end. - -spec parse_opts(Type :: pm | muc, Opts :: proplists:proplist(), deps()) -> deps(). parse_opts(Type, Opts, Deps) -> + %% Opts are merged root options with options inside pm or muc section CoreMod = mam_type_to_core_mod(Type), CoreModOpts = filter_opts(Opts, valid_core_mod_opts(CoreMod)), WithCoreDeps = add_dep(CoreMod, CoreModOpts, Deps), @@ -123,30 +123,23 @@ filter_opts(Opts, ValidOpts) -> end end, ValidOpts). +%% Get a list of options to pass into the two modules. +%% They don't required to be defined in pm or muc sections, +%% the root section is enough. -spec valid_core_mod_opts(module()) -> [atom()]. valid_core_mod_opts(mod_mam) -> - [ - no_stanzaid_element, - is_archivable_message, - archive_chat_markers, - extra_lookup_params, - full_text_search, - message_retraction, - archive_groupchats, - default_result_limit, - max_result_limit - ]; + [no_stanzaid_element, archive_groupchats] ++ common_opts(); valid_core_mod_opts(mod_mam_muc) -> - [ - is_archivable_message, + [host] ++ common_opts(). + +common_opts() -> + [is_archivable_message, archive_chat_markers, - host, extra_lookup_params, full_text_search, message_retraction, default_result_limit, - max_result_limit - ]. + max_result_limit]. -spec parse_backend_opts(rdbms | cassandra | riak | elasticsearch, Type :: pm | muc, Opts :: proplists:proplist(), deps()) -> deps(). diff --git a/src/mod_extdisco.erl b/src/mod_extdisco.erl index dc02038506c..28546439564 100644 --- a/src/mod_extdisco.erl +++ b/src/mod_extdisco.erl @@ -48,7 +48,7 @@ process_iq(_From, _To, Acc, #iq{type = get, sub_el = SubEl} = IQ) -> {result, create_iq_response(RequestedServices)}; {credentials, {Type, Host}} -> Services = get_external_services(Type), - RequestedServices = lists:filter(fun({_, Opts}) -> + RequestedServices = lists:filter(fun(Opts) -> gen_mod:get_opt(host, Opts, undefined) == binary_to_list(Host) end, Services), {result, create_iq_response_credentials(RequestedServices)}; @@ -104,21 +104,22 @@ get_external_services() -> end. get_external_services(Type) -> - [{T, Opts} || {T, Opts} <- get_external_services(), T == Type]. + [Opts || Opts <- get_external_services(), gen_mod:get_opt(type, Opts) == Type]. prepare_services_element(Services) -> lists:reverse( lists:foldl( - fun({Type, Opts}, Acc) -> - RequiredElements = required_elements(Type, Opts), + fun(Opts, Acc) -> + RequiredElements = required_elements(Opts), OptionalElements = optional_elements(Opts), NewResult = #xmlel{name = <<"service">>, attrs = RequiredElements ++ OptionalElements}, [NewResult | Acc] end, [], Services)). -required_elements(Type, Opts) -> +required_elements(Opts) -> Host = gen_mod:get_opt(host, Opts, <<"">>), + Type = gen_mod:get_opt(type, Opts), [{<<"type">>, atom_to_binary(Type, utf8)}, {<<"host">>, Host}]. optional_elements(Opts) -> @@ -126,7 +127,7 @@ optional_elements(Opts) -> Transport = gen_mod:get_opt(transport, Opts, undefined), Password = gen_mod:get_opt(password, Opts, undefined), Username = gen_mod:get_opt(username, Opts, undefined), - Elements = [{<<"port">>, Port}, + Elements = [{<<"port">>, i2b(Port)}, {<<"transport">>, Transport}, {<<"password">>, Password}, {<<"username">>, Username}], @@ -136,3 +137,6 @@ filter_undefined_elements(Elements) -> lists:filter(fun({_, undefined}) -> false; (_) -> true end, Elements). + +i2b(X) when is_integer(X) -> integer_to_binary(X); +i2b(X) -> X. diff --git a/src/mod_muc.erl b/src/mod_muc.erl index 3ad65ec6b49..08efd100259 100644 --- a/src/mod_muc.erl +++ b/src/mod_muc.erl @@ -701,7 +701,7 @@ check_user_can_create_room(ServerHost, AccessCreate, From, RoomID) -> case acl:match_rule(ServerHost, AccessCreate, From) of allow -> (size(RoomID) =< gen_mod:get_module_opt(ServerHost, mod_muc, - max_room_id, infinite)); + max_room_id, infinity)); _ -> false end. diff --git a/src/mod_muc_room.erl b/src/mod_muc_room.erl index d206118db56..f4de649e874 100644 --- a/src/mod_muc_room.erl +++ b/src/mod_muc_room.erl @@ -3349,7 +3349,7 @@ is_allowed_room_name_desc_limits(XEl, StateData) -> {value, {_, [N]}} -> byte_size(N) =< gen_mod:get_module_opt(StateData#state.server_host, mod_muc, max_room_name, - infinite); + infinity); _ -> true end, @@ -3359,7 +3359,7 @@ is_allowed_room_name_desc_limits(XEl, StateData) -> {value, {_, [D]}} -> byte_size(D) =< gen_mod:get_module_opt(StateData#state.server_host, mod_muc, max_room_desc, - infinite); + infinity); _ -> true end, diff --git a/src/mod_register.erl b/src/mod_register.erl index 076736b6dd8..d371c6adb9f 100644 --- a/src/mod_register.erl +++ b/src/mod_register.erl @@ -430,9 +430,11 @@ is_strong_password(Server, Password) -> get_ip_access(Host) -> IPAccess = gen_mod:get_module_opt(Host, ?MODULE, ip_access, []), lists:flatmap( - fun({Access, S}) -> - case parse_ip_netmask(S) of - {ok, IP, Mask} -> + fun({Access, {IP, Mask}}) -> + [{Access, IP, Mask}]; + ({Access, S}) -> + case mongoose_lib:parse_ip_netmask(S) of + {ok, {IP, Mask}} -> [{Access, IP, Mask}]; error -> ?LOG_ERROR(#{what => reg_invalid_network_specification, @@ -441,38 +443,6 @@ get_ip_access(Host) -> end end, IPAccess). -parse_ip_netmask(S) -> - case string:tokens(S, "/") of - [IPStr] -> parse_ip_netmask(IPStr, undefined); - [IPStr, MaskStr] -> parse_ip_netmask(IPStr, MaskStr); - _ -> error - end. - -parse_ip_netmask(IPStr, undefined) -> - case inet_parse:address(IPStr) of - {ok, {_, _, _, _} = IP} -> - {ok, IP, 32}; - {ok, {_, _, _, _, _, _, _, _} = IP} -> - {ok, IP, 128}; - _ -> - error - end; -parse_ip_netmask(IPStr, MaskStr) -> - case catch list_to_integer(MaskStr) of - Mask when is_integer(Mask), - Mask >= 0 -> - case inet_parse:address(IPStr) of - {ok, {_, _, _, _} = IP} when Mask =< 32 -> - {ok, IP, Mask}; - {ok, {_, _, _, _, _, _, _, _} = IP} when Mask =< 128 -> - {ok, IP, Mask}; - _ -> - error - end; - _ -> - error - end. - check_ip_access(_Source, []) -> allow; check_ip_access({User, Server, Resource}, IPAccess) -> @@ -502,10 +472,11 @@ check_ip_access(IP, [_ | IPAccess]) -> check_ip_access(IP, IPAccess). ip_to_integer({IP1, IP2, IP3, IP4}) -> - (((((IP1 bsl 8) bor IP2) bsl 8) bor IP3) bsl 8) bor IP4; + <> = <>, + X; ip_to_integer({IP1, IP2, IP3, IP4, IP5, IP6, IP7, IP8}) -> - (((((((((((((IP1 bsl 16) bor IP2) bsl 16) bor IP3) bsl 16) bor IP4) - bsl 16) bor IP5) bsl 16) bor IP6) bsl 16) bor IP7) bsl 16) bor IP8. + <> = <>, + X. make_host_only_jid(Name) when is_binary(Name) -> jid:make(<<>>, Name, <<>>). diff --git a/src/mongoose_http_client.erl b/src/mongoose_http_client.erl index 4039daccc51..bf3db7bc369 100644 --- a/src/mongoose_http_client.erl +++ b/src/mongoose_http_client.erl @@ -14,14 +14,6 @@ %% limitations under the License. %%============================================================================== %%%------------------------------------------------------------------- -%%% @doc -%% options and defaults: -%% * server - (required) -%% * path_prefix - "" -%% * request_timeout - 2000, -%% * http_opts - [] % passed to fusco -%%% -%%% @end %%% Created : 26. Jun 2018 13:07 %%%------------------------------------------------------------------- -module(mongoose_http_client). diff --git a/src/mongoose_lib.erl b/src/mongoose_lib.erl index f4577039b72..7d4133f4ec5 100644 --- a/src/mongoose_lib.erl +++ b/src/mongoose_lib.erl @@ -8,6 +8,7 @@ -export([maps_or_pairs_foreach/2]). %% Busy Wait -export([wait_until/2, wait_until/3]). +-export([parse_ip_netmask/1]). %% Private, just for warning -export([deprecated_logging/1]). @@ -110,3 +111,38 @@ deprecated_logging(Location) -> Map = #{what => deprecated_logging_macro, text => <<"Deprecated logging macro is used in your code">>}, mongoose_deprecations:log(Location, Map, [{log_level, warning}]). +%% ------------------------------------------------------------------ +%% Parse IP +%% ------------------------------------------------------------------ + +parse_ip_netmask(S) -> + case string:tokens(S, "/") of + [IPStr] -> parse_ip_netmask(IPStr, undefined); + [IPStr, MaskStr] -> parse_ip_netmask(IPStr, MaskStr); + _ -> error + end. + +parse_ip_netmask(IPStr, undefined) -> + case inet_parse:address(IPStr) of + {ok, {_, _, _, _} = IP} -> + {ok, {IP, 32}}; + {ok, {_, _, _, _, _, _, _, _} = IP} -> + {ok, {IP, 128}}; + _ -> + error + end; +parse_ip_netmask(IPStr, MaskStr) -> + case catch list_to_integer(MaskStr) of + Mask when is_integer(Mask), + Mask >= 0 -> + case inet_parse:address(IPStr) of + {ok, {_, _, _, _} = IP} when Mask =< 32 -> + {ok, {IP, Mask}}; + {ok, {_, _, _, _, _, _, _, _} = IP} when Mask =< 128 -> + {ok, {IP, Mask}}; + _ -> + error + end; + _ -> + error + end. diff --git a/src/mongooseim.app.src b/src/mongooseim.app.src index f0a73f079a6..a9d907f1121 100644 --- a/src/mongooseim.app.src +++ b/src/mongooseim.app.src @@ -51,6 +51,7 @@ erl_csv_generator, trails, cowboy_swagger, + tomerl, flatlog ]}, {env, []}, diff --git a/src/rdbms/mongoose_rdbms.erl b/src/rdbms/mongoose_rdbms.erl index abb106b0e56..faf363adc16 100644 --- a/src/rdbms/mongoose_rdbms.erl +++ b/src/rdbms/mongoose_rdbms.erl @@ -426,6 +426,8 @@ init(Opts) -> process_flag(trap_exit, true), {server, Settings} = lists:keyfind(server, 1, Opts), KeepaliveInterval = proplists:get_value(keepalive_interval, Opts), + % retries are delayed exponentially, this param limits the delay + % so if we start with 2 and try 6 times, we have 2, 4, 8, 16, 30 MaxStartInterval = proplists:get_value(start_interval, Opts, 30), case connect(Settings, ?CONNECT_RETRIES, 2, MaxStartInterval) of {ok, DbRef} -> diff --git a/src/wpool/mongoose_wpool_http.erl b/src/wpool/mongoose_wpool_http.erl index 7af89b067d5..fc00d9b5a43 100644 --- a/src/wpool/mongoose_wpool_http.erl +++ b/src/wpool/mongoose_wpool_http.erl @@ -1,3 +1,11 @@ +%%% @doc +%% options and defaults: +%% * server - (required) +%% * path_prefix - "" +%% * request_timeout - 2000, +%% * http_opts - [] % passed to fusco +%%% +%%% @end -module(mongoose_wpool_http). -behaviour(mongoose_wpool). diff --git a/test/auth_http_SUITE.erl b/test/auth_http_SUITE.erl index c98b5749f1b..3dee9ecd665 100644 --- a/test/auth_http_SUITE.erl +++ b/test/auth_http_SUITE.erl @@ -24,7 +24,6 @@ -define(DOMAIN2, <<"localhost2">>). -define(AUTH_HOST, "http://localhost:12000"). -define(BASIC_AUTH, "softkitty:purrpurrpurr"). --define(CERT_PATH, "../../../../tools/ssl"). %%-------------------------------------------------------------------- %% Suite configuration @@ -95,14 +94,16 @@ end_per_suite(Config) -> Config. init_per_group(cert_auth, Config) -> + Root = small_path_helper:repo_dir(Config), + SslDir = filename:join(Root, "tools/ssl"), try - {ok, Cert1} = file:read_file(?CERT_PATH ++ "/mongooseim/cert.pem"), - {ok, Cert2} = file:read_file(?CERT_PATH ++ "/ca/cacert.pem"), + {ok, Cert1} = file:read_file(filename:join(SslDir, "mongooseim/cert.pem")), + {ok, Cert2} = file:read_file(filename:join(SslDir, "ca/cacert.pem")), [{'Certificate', DerBin, not_encrypted} | _] = public_key:pem_decode(Cert2), [{der_cert, DerBin}, {pem_cert1, Cert1}, {pem_cert2, Cert2} | Config] catch _:E -> - {skip, {E, ?CERT_PATH, element(2, file:get_cwd())}} + {skip, {E, SslDir, element(2, file:get_cwd())}} end; init_per_group(GroupName, Config) -> Config2 = lists:keystore(scram_group, 1, Config, diff --git a/test/config_parser_SUITE.erl b/test/config_parser_SUITE.erl new file mode 100644 index 00000000000..08a69ed98ee --- /dev/null +++ b/test/config_parser_SUITE.erl @@ -0,0 +1,3254 @@ +-module(config_parser_SUITE). +-compile([export_all]). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-include("ejabberd_config.hrl"). + +-define(eq(Expected, Actual), ?assertEqual(Expected, Actual)). + +-define(err(Expr), ?assertError(_, Expr)). + +-define(HOST, <<"myhost">>). + +-define(add_loc(X), {X, #{line => ?LINE}}). + +-define(eqf(Expected, Actual), eq_host_config(Expected, Actual)). +-define(errf(Config), + begin ?err(parse_with_host(Config)), ?err(parse_host_config(Config)) end). + +%% Constructs HOF to pass into run_multi/1 function +%% It's a HOF, so it would always pass if not passed into run_multi/1 +-define(_eqf(Expected, Actual), ?add_loc(fun() -> ?eqf(Expected, Actual) end)). +-define(_errf(Config), ?add_loc(fun() -> ?errf(Config) end)). + +-import(mongoose_config_parser_toml, [parse/1]). + +all() -> + [{group, equivalence}, + {group, general}, + {group, listen}, + {group, auth}, + {group, pool}, + {group, shaper_acl_access}, + {group, s2s}, + {group, modules}, + {group, services}]. + +groups() -> + [{equivalence, [parallel], [sample_pgsql, + miscellaneous, + s2s, + modules, + outgoing_pools]}, + {general, [parallel], [loglevel, + hosts, + registration_timeout, + language, + all_metrics_are_global, + sm_backend, + max_fsm_queue, + http_server_name, + rdbms_server_type, + override, + pgsql_users_number_estimate, + route_subdomains, + mongooseimctl_access_commands, + routing_modules, + replaced_wait_timeout, + hide_service_name]}, + {listen, [parallel], [listen_portip, + listen_proto, + listen_ip_version, + listen_backlog, + listen_proxy_protocol, + listen_num_acceptors, + listen_access, + listen_shaper, + listen_xml_socket, + listen_zlib, + listen_hibernate_after, + listen_max_fsm_queue, + listen_max_stanza_size, + listen_tls_mode, + listen_tls_module, + listen_tls_verify, + listen_tls_verify_mode, + listen_tls_crl_files, + listen_tls_certfile, + listen_tls_cacertfile, + listen_tls_dhfile, + listen_tls_ciphers, + listen_tls_versions, + listen_tls_protocol_options, + listen_check_from, + listen_hidden_components, + listen_conflict_behaviour, + listen_password, + listen_http_num_acceptors, + listen_http_max_connections, + listen_http_compress, + listen_http_handlers, + listen_http_handlers_websockets, + listen_http_handlers_lasse, + listen_http_handlers_static, + listen_http_handlers_api]}, + {auth, [parallel], [auth_methods, + auth_password_format, + auth_scram_iterations, + auth_sasl_external, + auth_allow_multiple_connections, + auth_anonymous_protocol, + auth_sasl_mechanisms, + auth_ldap_pool, + auth_ldap_bind_pool, + auth_ldap_base, + auth_ldap_uids, + auth_ldap_filter, + auth_ldap_dn_filter, + auth_ldap_local_filter, + auth_ldap_deref, + auth_external_instances, + auth_external_program, + auth_http_basic_auth, + auth_jwt, + auth_riak_bucket_type]}, + {pool, [parallel], [pool_type, + pool_tag, + pool_scope, + pool_workers, + pool_strategy, + pool_call_timeout, + pool_rdbms_settings, + pool_rdbms_keepalive_interval, + pool_rdbms_server, + pool_rdbms_port, + pool_rdbms_tls, + pool_http_host, + pool_http_path_prefix, + pool_http_request_timeout, + pool_http_tls, + pool_redis_host, + pool_redis_port, + pool_redis_database, + pool_redis_password, + pool_riak_address, + pool_riak_port, + pool_riak_credentials, + pool_riak_cacertfile, + pool_riak_certfile, + pool_riak_keyfile, + pool_riak_tls, + pool_cassandra_servers, + pool_cassandra_keyspace, + pool_cassandra_auth, + pool_cassandra_tls, + pool_ldap_host, + pool_ldap_port, + pool_ldap_servers, + pool_ldap_encrypt, + pool_ldap_rootdn, + pool_ldap_password, + pool_ldap_connect_interval, + pool_ldap_tls]}, + {shaper_acl_access, [parallel], [shaper, + acl, + access]}, + {s2s, [parallel], [s2s_dns_timeout, + s2s_dns_retries, + s2s_outgoing_port, + s2s_outgoing_ip_versions, + s2s_outgoing_timeout, + s2s_use_starttls, + s2s_certfile, + s2s_default_policy, + s2s_host_policy, + s2s_address, + s2s_ciphers, + s2s_domain_certfile, + s2s_shared, + s2s_max_retry_delay]}, + {modules, [parallel], [mod_adhoc, + mod_auth_token, + mod_bosh, + mod_caps, + mod_carboncopy, + mod_csi, + mod_disco, + mod_inbox, + mod_global_distrib, + mod_event_pusher, + mod_extdisco, + mod_http_upload, + mod_jingle_sip, + mod_keystore, + mod_last, + mod_mam_meta, + mod_muc, + mod_muc_log, + mod_muc_light, + mod_offline, + mod_ping, + mod_privacy, + mod_private, + mod_pubsub, + mod_push_service_mongoosepush, + mod_register, + mod_revproxy, + mod_roster, + mod_shared_roster_ldap, + mod_sic, + mod_stream_management, + mod_time, + mod_vcard, + mod_version]}, + {services, [parallel], [service_admin_extra, + service_mongoose_system_metrics]} + ]. + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(jid), + create_files(Config), + Config. + +end_per_suite(_Config) -> + ok. + +sample_pgsql(Config) -> + test_equivalence_between_files(Config, "mongooseim-pgsql.cfg", "mongooseim-pgsql.toml"). + +miscellaneous(Config) -> + test_equivalence_between_files(Config, "miscellaneous.cfg", "miscellaneous.toml"). + +s2s(Config) -> + test_equivalence_between_files(Config, "s2s_only.cfg", "s2s_only.toml"). + +modules(Config) -> + test_equivalence_between_files(Config, "modules.cfg", "modules.toml"). + +outgoing_pools(Config) -> + test_equivalence_between_files(Config, "outgoing_pools.cfg", "outgoing_pools.toml"). + +%% tests: general +loglevel(_Config) -> + ?eq([#local_config{key = loglevel, value = debug}], + parse(#{<<"general">> => #{<<"loglevel">> => <<"debug">>}})), + ?err(parse(#{<<"general">> => #{<<"loglevel">> => <<"bebug">>}})), + %% make sure non-host options are not accepted in host_config + ?err(parse_host_config(#{<<"general">> => #{<<"loglevel">> => <<"debug">>}})). + +hosts(_Config) -> + ?eq([#config{key = hosts, value = [<<"host1">>, <<"host2">>]}], + parse(#{<<"general">> => #{<<"hosts">> => [<<"host1">>, <<"host2">>]}})), + ?err(parse(#{<<"general">> => #{<<"hosts">> => [<<"what is this?">>]}})), + ?err(parse(#{<<"general">> => #{<<"hosts">> => [<<>>]}})), + ?err(parse(#{<<"general">> => #{<<"hosts">> => []}})), + ?err(parse(#{<<"general">> => #{<<"hosts">> => [<<"host1">>, <<"host1">>]}})). + +registration_timeout(_Config) -> + ?eq([#local_config{key = registration_timeout, value = infinity}], + parse(#{<<"general">> => #{<<"registration_timeout">> => <<"infinity">>}})), + ?eq([#local_config{key = registration_timeout, value = 300}], + parse(#{<<"general">> => #{<<"registration_timeout">> => 300}})), + ?err(parse(#{<<"general">> => #{<<"registration_timeout">> => 0}})). + +language(_Config) -> + ?eq([#config{key = language, value = <<"en">>}], + parse(#{<<"general">> => #{<<"language">> => <<"en">>}})), + ?err(parse(#{<<"general">> => #{<<"language">> => <<>>}})). + +all_metrics_are_global(_Config) -> + ?eq([#local_config{key = all_metrics_are_global, value = true}], + parse(#{<<"general">> => #{<<"all_metrics_are_global">> => true}})), + ?err(parse(#{<<"general">> => #{<<"all_metrics_are_global">> => <<"true">>}})). + +sm_backend(_Config) -> + ?eq([#config{key = sm_backend, value = {mnesia, []}}], + parse(#{<<"general">> => #{<<"sm_backend">> => <<"mnesia">>}})), + ?eq([#config{key = sm_backend, value = {redis, []}}], + parse(#{<<"general">> => #{<<"sm_backend">> => <<"redis">>}})), + ?err(parse(#{<<"general">> => #{<<"sm_backend">> => <<"amnesia">>}})). + +max_fsm_queue(_Config) -> + ?eq([#local_config{key = max_fsm_queue, value = 100}], + parse(#{<<"general">> => #{<<"max_fsm_queue">> => 100}})), + ?err(parse(#{<<"general">> => #{<<"max_fsm_queue">> => -10}})). + +http_server_name(_Config) -> + ?eq([#local_config{key = cowboy_server_name, value = "my server"}], + parse(#{<<"general">> => #{<<"http_server_name">> => <<"my server">>}})), + ?err(parse(#{<<"general">> => #{<<"http_server_name">> => #{}}})). + +rdbms_server_type(_Config) -> + ?eq([#local_config{key = rdbms_server_type, value = mssql}], + parse(#{<<"general">> => #{<<"rdbms_server_type">> => <<"mssql">>}})), + ?eq([#local_config{key = rdbms_server_type, value = pgsql}], + parse(#{<<"general">> => #{<<"rdbms_server_type">> => <<"pgsql">>}})), + ?err(parse(#{<<"general">> => #{<<"rdbms_server_type">> => <<"nosql">>}})). + +override(_Config) -> + ?eq([{override, local}, {override, global}, {override, acls}], + parse(#{<<"general">> => #{<<"override">> => [<<"local">>, <<"global">>, <<"acls">>]}})), + ?err(parse(#{<<"general">> => #{<<"override">> => [<<"local">>, <<"global">>, <<"local">>]}})), + ?err(parse(#{<<"general">> => #{<<"override">> => [<<"pingpong">>]}})). + +pgsql_users_number_estimate(_Config) -> + eq_host_config([#local_config{key = {pgsql_users_number_estimate, ?HOST}, value = true}], + #{<<"general">> => #{<<"pgsql_users_number_estimate">> => true}}), + err_host_config(#{<<"general">> => #{<<"pgsql_users_number_estimate">> => 1200}}). + +route_subdomains(_Config) -> + eq_host_config([#local_config{key = {route_subdomains, ?HOST}, value = s2s}], + #{<<"general">> => #{<<"route_subdomains">> => <<"s2s">>}}), + err_host_config(#{<<"general">> => #{<<"route_subdomains">> => <<"c2s">>}}). + +mongooseimctl_access_commands(_Config) -> + AccessRule = #{<<"commands">> => [<<"join_cluster">>], + <<"argument_restrictions">> => #{<<"node">> => <<"mim1@host1">>}}, + ?eq([#local_config{key = mongooseimctl_access_commands, + value = [{local, ["join_cluster"], [{node, "mim1@host1"}]}] + }], + parse(#{<<"general">> => #{<<"mongooseimctl_access_commands">> => + #{<<"local">> => AccessRule}}})), + ?eq([#local_config{key = mongooseimctl_access_commands, + value = [{local, all, []}] + }], + parse(#{<<"general">> => #{<<"mongooseimctl_access_commands">> => + #{<<"local">> => #{<<"commands">> => <<"all">>}}}})), + ?err(parse(#{<<"general">> => + #{<<"mongooseimctl_access_commands">> => + #{<<"local">> => #{<<"argument_restrictions">> => + #{<<"node">> => <<"mim1@host1">>}}} + }})), + ?err(parse(#{<<"general">> => #{<<"mongooseimctl_access_commands">> => + #{<<"local">> => #{<<"commands">> => <<"none">>}} + }})). + +routing_modules(_Config) -> + ?eq([#local_config{key = routing_modules, value = [mongoose_router_global, + mongoose_router_localdomain]}], + parse(#{<<"general">> => #{<<"routing_modules">> => [<<"mongoose_router_global">>, + <<"mongoose_router_localdomain">>]}})), + ?err(parse(#{<<"general">> => #{<<"routing_modules">> => [<<"moongoose_router_global">>]}})). + +replaced_wait_timeout(_Config) -> + eq_host_config([#local_config{key = {replaced_wait_timeout, ?HOST}, value = 1000}], + #{<<"general">> => #{<<"replaced_wait_timeout">> => 1000}}), + err_host_config(#{<<"general">> => #{<<"replaced_wait_timeout">> => 0}}). + +hide_service_name(_Config) -> + eq_host_config([#local_config{key = {hide_service_name, ?HOST}, value = false}], + #{<<"general">> => #{<<"hide_service_name">> => false}}), + err_host_config(#{<<"general">> => #{<<"hide_service_name">> => []}}). + +%% tests: listen + +listen_portip(_Config) -> + ?eq(listener_config(ejabberd_c2s, []), parse_listener(<<"c2s">>, #{})), + ?eq([#local_config{key = listen, + value = [{{5222, {192, 168, 1, 16}, tcp}, ejabberd_c2s, []}]}], + parse_listener(<<"c2s">>, #{<<"ip_address">> => <<"192.168.1.16">>})), + ?eq([#local_config{key = listen, + value = [{{5222, {8193, 3512, 3, 4, 5, 6, 7, 8}, tcp}, ejabberd_c2s, []}]}], + parse_listener(<<"c2s">>, #{<<"ip_address">> => <<"2001:db8:3:4:5:6:7:8">>})), + ?err(parse_listener(<<"c2s">>, #{<<"ip_address">> => <<"192.168.1.999">>})), + ?err(parse(#{<<"listen">> => #{<<"c2s">> => [#{<<"ip_address">> => <<"192.168.1.16">>}]}})), + ?err(parse(#{<<"listen">> => #{<<"c2s">> => [#{<<"port">> => <<"5222">>}]}})), + ?err(parse(#{<<"listen">> => #{<<"c2s">> => [#{<<"port">> => 522222}]}})). + +listen_proto(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{proto, tcp}]), + parse_listener(<<"c2s">>, #{<<"proto">> => <<"tcp">>})), + ?eq([#local_config{key = listen, + value = [{{5222, {0, 0, 0, 0}, udp}, ejabberd_c2s, [{proto, udp}]}]}], + parse_listener(<<"c2s">>, #{<<"proto">> => <<"udp">>})), + %% 'ejabberd_listener:normalize_proto/1' shows a warning message and falls back to 'tcp' + ?eq(listener_config(ejabberd_c2s, [{proto, pigeon}]), + parse_listener(<<"c2s">>, #{<<"proto">> => <<"pigeon">>})). + +listen_ip_version(_Config) -> + ?eq(listener_config(ejabberd_c2s, [inet]), + parse_listener(<<"c2s">>, #{<<"ip_version">> => 4})), + ?eq([#local_config{key = listen, + value = [{{5222, {0, 0, 0, 0, 0, 0, 0, 0}, tcp}, ejabberd_c2s, []}]}], + parse_listener(<<"c2s">>, #{<<"ip_version">> => 6})), + ?err(parse_listener(<<"c2s">>, #{<<"ip_version">> => 7})). + +listen_backlog(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{backlog, 10}]), + parse_listener(<<"c2s">>, #{<<"backlog">> => 10})), + ?err(parse_listener(<<"c2s">>, #{<<"backlog">> => -10})). + +listen_proxy_protocol(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{proxy_protocol, true}]), + parse_listener(<<"c2s">>, #{<<"proxy_protocol">> => true})), + ?eq(listener_config(ejabberd_s2s_in, [{proxy_protocol, true}]), + parse_listener(<<"s2s">>, #{<<"proxy_protocol">> => true})), + ?eq(listener_config(ejabberd_service, [{proxy_protocol, true}]), + parse_listener(<<"service">>, #{<<"proxy_protocol">> => true})), + ?err(parse_listener(<<"c2s">>, #{<<"proxy_protocol">> => <<"awesome">>})). + +listen_num_acceptors(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{acceptors_num, 100}]), + parse_listener(<<"c2s">>, #{<<"num_acceptors">> => 100})), + ?eq(listener_config(ejabberd_s2s_in, [{acceptors_num, 100}]), + parse_listener(<<"s2s">>, #{<<"num_acceptors">> => 100})), + ?eq(listener_config(ejabberd_service, [{acceptors_num, 100}]), + parse_listener(<<"service">>, #{<<"num_acceptors">> => 100})), + ?err(parse_listener(<<"c2s">>, #{<<"num_acceptors">> => 0})). + +listen_access(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{access, rule1}]), + parse_listener(<<"c2s">>, #{<<"access">> => <<"rule1">>})), + ?eq(listener_config(ejabberd_service, [{access, rule1}]), + parse_listener(<<"service">>, #{<<"access">> => <<"rule1">>})), + ?err(parse_listener(<<"c2s">>, #{<<"access">> => <<>>})). + +listen_shaper(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{shaper, c2s_shaper}]), + parse_listener(<<"c2s">>, #{<<"shaper">> => <<"c2s_shaper">>})), + ?eq(listener_config(ejabberd_s2s_in, [{shaper, s2s_shaper}]), + parse_listener(<<"s2s">>, #{<<"shaper">> => <<"s2s_shaper">>})), + ?eq(listener_config(ejabberd_service, [{shaper_rule, fast}]), + parse_listener(<<"service">>, #{<<"shaper_rule">> => <<"fast">>})), + ?err(parse_listener(<<"s2s">>, #{<<"shaper">> => <<>>})). + +listen_xml_socket(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{xml_socket, true}]), + parse_listener(<<"c2s">>, #{<<"xml_socket">> => true})), + ?err(parse_listener(<<"c2s">>, #{<<"xml_socket">> => 10})). + +listen_zlib(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{zlib, 1024}]), + parse_listener(<<"c2s">>, #{<<"zlib">> => 1024})), + ?err(parse_listener(<<"c2s">>, #{<<"zlib">> => 0})). + +listen_hibernate_after(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{hibernate_after, 10}]), + parse_listener(<<"c2s">>, #{<<"hibernate_after">> => 10})), + ?eq(listener_config(ejabberd_s2s_in, [{hibernate_after, 10}]), + parse_listener(<<"s2s">>, #{<<"hibernate_after">> => 10})), + ?eq(listener_config(ejabberd_service, [{hibernate_after, 10}]), + parse_listener(<<"service">>, #{<<"hibernate_after">> => 10})), + ?err(parse_listener(<<"c2s">>, #{<<"hibernate_after">> => -10})). + +listen_max_stanza_size(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{max_stanza_size, 10000}]), + parse_listener(<<"c2s">>, #{<<"max_stanza_size">> => 10000})), + ?eq(listener_config(ejabberd_s2s_in, [{max_stanza_size, 10000}]), + parse_listener(<<"s2s">>, #{<<"max_stanza_size">> => 10000})), + ?eq(listener_config(ejabberd_service, [{max_stanza_size, 10000}]), + parse_listener(<<"service">>, #{<<"max_stanza_size">> => 10000})), + ?err(parse_listener(<<"c2s">>, #{<<"max_stanza_size">> => <<"infinity">>})). + +listen_max_fsm_queue(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{max_fsm_queue, 1000}]), + parse_listener(<<"c2s">>, #{<<"max_fsm_queue">> => 1000})), + ?eq(listener_config(ejabberd_service, [{max_fsm_queue, 1000}]), + parse_listener(<<"service">>, #{<<"max_fsm_queue">> => 1000})), + ?err(parse_listener(<<"s2s">>, #{<<"max_fsm_queue">> => 1000})), % only for c2s and service + ?err(parse_listener(<<"c2s">>, #{<<"max_fsm_queue">> => 0})). + +listen_tls_mode(_Config) -> + ?eq(listener_config(ejabberd_c2s, [starttls]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"mode">> => <<"starttls">>}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"mode">> => <<"stoptls">>}})). + +listen_tls_module(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>}})), + ?eq(listener_config(ejabberd_c2s, []), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"fast_tls">>}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"slow_tls">>}})). + +listen_tls_verify(_Config) -> + ?eq(listener_config(ejabberd_c2s, [verify_peer]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"verify_peer">> => true}})), + ?eq(listener_config(ejabberd_c2s, [verify_none]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"verify_peer">> => false}})), + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, verify_peer]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"verify_peer">> => true}})), + ?eq(listener_config(ejabberd_cowboy, [{ssl, [{verify, verify_peer}]}]), + parse_listener(<<"http">>, #{<<"tls">> => #{<<"verify_peer">> => true}})), + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, verify_none]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"verify_peer">> => false}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"verify_peer">> => <<"maybe">>}})). + +listen_tls_verify_mode(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, + {ssl_options, [{verify_fun, {peer, true}}]}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"verify_mode">> => <<"peer">>}})), + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, + {ssl_options, [{verify_fun, {selfsigned_peer, false}}]}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"verify_mode">> => <<"selfsigned_peer">>, + <<"disconnect_on_failure">> => false}})), + ?eq(listener_config(ejabberd_cowboy, [{ssl, [{verify_mode, peer}]}]), + parse_listener(<<"http">>, #{<<"tls">> => #{<<"verify_mode">> => <<"peer">>}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"verify_mode">> => <<"peer">>, + <<"disconnect_on_failure">> => <<"false">>}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"verify_mode">> => <<"whatever">>}})), + ?err(parse_listener(<<"http">>, #{<<"tls">> => #{<<"verify_mode">> => <<"whatever">>}})). + +listen_tls_crl_files(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, + {crlfiles, ["file1", "file2"]}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"crl_files">> => [<<"file1">>, + <<"file2">>]}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"crl_files">> => [<<>>]}})), + %% only for just_tls + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"crl_files">> => [<<"file1">>, + <<"file2">>]}})). + +listen_tls_certfile(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{certfile, "mycert.pem"}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"certfile">> => <<"mycert.pem">>}})), + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, + {ssl_options, [{certfile, "mycert.pem"}]}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"certfile">> => <<"mycert.pem">>}})), + ?eq(listener_config(ejabberd_cowboy, [{ssl, [{certfile, "mycert.pem"}]}]), + parse_listener(<<"http">>, #{<<"tls">> => #{<<"certfile">> => <<"mycert.pem">>}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"certfile">> => <<>>}})). + +listen_tls_cacertfile(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{cafile, "cacert.pem"}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"cacertfile">> => <<"cacert.pem">>}})), + ?eq(listener_config(ejabberd_s2s_in, [{cafile, "cacert.pem"}]), + parse_listener(<<"s2s">>, #{<<"tls">> => #{<<"cacertfile">> => <<"cacert.pem">>}})), + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, + {ssl_options, [{cacertfile, "cacert.pem"}]}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"cacertfile">> => <<"cacert.pem">>}})), + ?eq(listener_config(ejabberd_cowboy, [{ssl, [{cacertfile, "cacert.pem"}]}]), + parse_listener(<<"http">>, #{<<"tls">> => #{<<"cacertfile">> => <<"cacert.pem">>}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"cacertfile">> => <<>>}})). + +listen_tls_dhfile(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{dhfile, "dh.pem"}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"dhfile">> => <<"dh.pem">>}})), + ?eq(listener_config(ejabberd_s2s_in, [{dhfile, "dh.pem"}]), + parse_listener(<<"s2s">>, #{<<"tls">> => #{<<"dhfile">> => <<"dh.pem">>}})), + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, + {ssl_options, [{dhfile, "dh.pem"}]}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"dhfile">> => <<"dh.pem">>}})), + ?eq(listener_config(ejabberd_cowboy, [{ssl, [{dhfile, "dh.pem"}]}]), + parse_listener(<<"http">>, #{<<"tls">> => #{<<"dhfile">> => <<"dh.pem">>}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"dhfile">> => <<>>}})). + + +listen_tls_ciphers(_Config) -> + %% fast_tls ciphers may contain versions as well + ?eq(listener_config(ejabberd_c2s, [{ciphers, "TLSv1.2:TLSv1.3"}]), + parse_listener(<<"c2s">>, + #{<<"tls">> => #{<<"ciphers">> => <<"TLSv1.2:TLSv1.3">>}})), + ?eq(listener_config(ejabberd_s2s_in, [{ciphers, "TLSv1.2:TLSv1.3"}]), + parse_listener(<<"s2s">>, + #{<<"tls">> => #{<<"ciphers">> => <<"TLSv1.2:TLSv1.3">>}})), + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, + {ssl_options, [{ciphers, ["TLS_AES_256_GCM_SHA384"]}]}]), + parse_listener(<<"c2s">>, + #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"ciphers">> => [<<"TLS_AES_256_GCM_SHA384">>]}})), + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, + {ssl_options, [{ciphers, [#{cipher => aes_256_gcm, + key_exchange => any, + mac => aead, + prf => sha384}]}]}]), + parse_listener(<<"c2s">>, + #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"ciphers">> => [#{<<"cipher">> => <<"aes_256_gcm">>, + <<"key_exchange">> => <<"any">>, + <<"mac">> => <<"aead">>, + <<"prf">> => <<"sha384">>}]}})), + ?err(parse_listener(<<"c2s">>, + #{<<"tls">> => + #{<<"module">> => <<"just_tls">>, + <<"ciphers">> => [#{<<"cipher">> => <<"aes_256_gcm">>}]}})). + +listen_tls_versions(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{tls_module, just_tls}, + {ssl_options, [{versions, ['tlsv1.2', 'tlsv1.3']}]}]), + parse_listener(<<"c2s">>, + #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"versions">> => [<<"tlsv1.2">>, <<"tlsv1.3">>]}})), + ?err(parse_listener(<<"c2s">>, + #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"versions">> => <<"tlsv1.2">>}})). + +listen_tls_protocol_options(_Config) -> + ?eq(listener_config(ejabberd_c2s, [{protocol_options, ["nosslv2"]}]), + parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"protocol_options">> => [<<"nosslv2">>]}})), + ?eq(listener_config(ejabberd_s2s_in, [{protocol_options, ["nosslv2"]}]), + parse_listener(<<"s2s">>, #{<<"tls">> => #{<<"protocol_options">> => [<<"nosslv2">>]}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"protocol_options">> => [<<>>]}})), + ?err(parse_listener(<<"s2s">>, #{<<"tls">> => #{<<"protocol_options">> => [<<>>]}})), + ?err(parse_listener(<<"c2s">>, #{<<"tls">> => #{<<"module">> => <<"just_tls">>, + <<"protocol_options">> => [<<"nosslv2">>]}})). + +listen_check_from(_Config) -> + ?eq(listener_config(ejabberd_service, [{service_check_from, false}]), + parse_listener(<<"service">>, #{<<"check_from">> => false})), + ?err(parse_listener(<<"service">>, #{<<"check_from">> => 1})). + +listen_hidden_components(_Config) -> + ?eq(listener_config(ejabberd_service, [{hidden_components, true}]), + parse_listener(<<"service">>, #{<<"hidden_components">> => true})), + ?err(parse_listener(<<"service">>, #{<<"hidden_components">> => <<"yes">>})). + +listen_conflict_behaviour(_Config) -> + ?eq(listener_config(ejabberd_service, [{conflict_behaviour, kick_old}]), + parse_listener(<<"service">>, #{<<"conflict_behaviour">> => <<"kick_old">>})), + ?err(parse_listener(<<"service">>, #{<<"conflict_behaviour">> => <<"kill_server">>})). + +listen_password(_Config) -> + ?eq(listener_config(ejabberd_service, [{password, "secret"}]), + parse_listener(<<"service">>, #{<<"password">> => <<"secret">>})), + ?err(parse_listener(<<"service">>, #{<<"password">> => <<>>})). + +listen_http_num_acceptors(_Config) -> + ?eq(listener_config(ejabberd_cowboy, [{transport_options, [{num_acceptors, 10}]}]), + parse_listener(<<"http">>, #{<<"transport">> => #{<<"num_acceptors">> => 10}})), + ?err(parse_listener(<<"http">>, #{<<"transport">> => #{<<"num_acceptors">> => 0}})). + +listen_http_max_connections(_Config) -> + ?eq(listener_config(ejabberd_cowboy, [{transport_options, [{max_connections, 100}]}]), + parse_listener(<<"http">>, #{<<"transport">> => #{<<"max_connections">> => 100}})), + ?eq(listener_config(ejabberd_cowboy, [{transport_options, [{max_connections, infinity}]}]), + parse_listener(<<"http">>, #{<<"transport">> => + #{<<"max_connections">> => <<"infinity">>}})), + ?err(parse_listener(<<"http">>, #{<<"transport">> => #{<<"max_connections">> => -1}})). + +listen_http_compress(_Config) -> + ?eq(listener_config(ejabberd_cowboy, [{protocol_options, [{compress, true}]}]), + parse_listener(<<"http">>, #{<<"protocol">> => #{<<"compress">> => true}})), + ?err(parse_listener(<<"http">>, #{<<"protocol">> => #{<<"compress">> => 0}})). + +listen_http_handlers(_Config) -> + ?eq(listener_config(ejabberd_cowboy, [{modules, [{"_", "/http-bind", mod_bosh, []}]}]), + parse_listener(<<"http">>, #{<<"handlers">> => + #{<<"mod_bosh">> => + [#{<<"host">> => <<"_">>, + <<"path">> => <<"/http-bind">>}]}})), + ?err(parse_listener(<<"http">>, #{<<"handlers">> => + #{<<"mod_bosch">> => + [#{<<"host">> => <<"dishwasher">>, + <<"path">> => <<"/cutlery">>}]}})), + ?err(parse_listener(<<"http">>, #{<<"handlers">> => + #{<<"mod_bosh">> => + [#{<<"host">> => <<"pathless">>}]}})), + ?err(parse_listener(<<"http">>, #{<<"handlers">> => + #{<<"mod_bosh">> => + [#{<<"host">> => <<>>, + <<"path">> => <<"/">>}]}})), + ?err(parse_listener(<<"http">>, #{<<"handlers">> => + #{<<"mod_bosh">> => + [#{<<"path">> => <<"hostless">>}]}})). + +listen_http_handlers_websockets(_Config) -> + ?eq(listener_config(ejabberd_cowboy, [{modules, [{"localhost", "/api", mod_websockets, []}]}]), + parse_http_handler(<<"mod_websockets">>, #{})), + ?eq(listener_config(ejabberd_cowboy, [{modules, [{"localhost", "/api", mod_websockets, + [{ejabberd_service, [{access, all}]}] + }]}]), + parse_http_handler(<<"mod_websockets">>, #{<<"service">> => #{<<"access">> => <<"all">>}})), + ?err(parse_http_handler(<<"mod_websockets">>, #{<<"service">> => <<"unbelievable">>})). + +listen_http_handlers_lasse(_Config) -> + ?eq(listener_config(ejabberd_cowboy, [{modules, [{"localhost", "/api", lasse_handler, + [mongoose_client_api_sse] + }]}]), + parse_http_handler(<<"lasse_handler">>, #{<<"module">> => <<"mongoose_client_api_sse">>})), + ?err(parse_http_handler(<<"lasse_handler">>, #{<<"module">> => <<"mooongooose_api_ssie">>})), + ?err(parse_http_handler(<<"lasse_handler">>, #{})). + +listen_http_handlers_static(_Config) -> + ?eq(listener_config(ejabberd_cowboy, [{modules, [{"localhost", "/api", cowboy_static, + {priv_dir, cowboy_swagger, "swagger", + [{mimetypes, cow_mimetypes, all}]} + }]}]), + parse_http_handler(<<"cowboy_static">>, #{<<"type">> => <<"priv_dir">>, + <<"app">> => <<"cowboy_swagger">>, + <<"content_path">> => <<"swagger">>})), + ?err(parse_http_handler(<<"cowboy_static">>, #{<<"type">> => <<"priv_dir">>, + <<"app">> => <<"cowboy_swagger">>})). + +listen_http_handlers_api(_Config) -> + ?eq(listener_config(ejabberd_cowboy, [{modules, [{"localhost", "/api", mongoose_api, + [{handlers, [mongoose_api_metrics, + mongoose_api_users]}]} + ]}]), + parse_http_handler(<<"mongoose_api">>, #{<<"handlers">> => [<<"mongoose_api_metrics">>, + <<"mongoose_api_users">>]})), + ?err(parse_http_handler(<<"mongoose_api">>, #{<<"handlers">> => [<<"not_an_api_module">>]})), + ?err(parse_http_handler(<<"mongoose_api">>, #{})). + +%% tests: auth + +auth_methods(_Config) -> + eq_host_config( + [#local_config{key = {auth_opts, ?HOST}, value = []}, + #local_config{key = {auth_method, ?HOST}, value = [internal, rdbms]}], + #{<<"auth">> => #{<<"methods">> => [<<"internal">>, <<"rdbms">>]}}), + err_host_config(#{<<"auth">> => #{<<"methods">> => [<<"supernatural">>]}}). + +auth_password_format(_Config) -> + eq_host_config( + [#local_config{key = {auth_opts, ?HOST}, + value = [{password_format, {scram, [sha, sha256]}}]}], + #{<<"auth">> => #{<<"password">> => #{<<"format">> => <<"scram">>, + <<"hash">> => [<<"sha">>, <<"sha256">>]}}}), + eq_host_config( + [#local_config{key = {auth_opts, ?HOST}, + value = [{password_format, scram}]}], + #{<<"auth">> => #{<<"password">> => #{<<"format">> => <<"scram">>}}}), + eq_host_config( + [#local_config{key = {auth_opts, ?HOST}, + value = [{password_format, plain}]}], + #{<<"auth">> => #{<<"password">> => #{<<"format">> => <<"plain">>}}}), + + err_host_config(#{<<"auth">> => #{<<"password">> => #{<<"format">> => <<"no password">>}}}), + err_host_config(#{<<"auth">> => #{<<"password">> => #{<<"format">> => <<"scram">>, + <<"hash">> => []}}}), + err_host_config(#{<<"auth">> => #{<<"password">> => #{<<"format">> => <<"scram">>, + <<"hash">> => [<<"sha1234">>]}}}). + +auth_scram_iterations(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{scram_iterations, 1000}]}], + #{<<"auth">> => #{<<"scram_iterations">> => 1000}}), + err_host_config(#{<<"auth">> => #{<<"scram_iterations">> => false}}). + +auth_sasl_external(_Config) -> + eq_host_config( + [#local_config{key = {auth_opts, ?HOST}, + value = [{cyrsasl_external, [standard, + common_name, + {mod, cyrsasl_external_verification}] + }]}], + #{<<"auth">> => #{<<"sasl_external">> => + [<<"standard">>, + <<"common_name">>, + <<"cyrsasl_external_verification">>]}}), + err_host_config(#{<<"auth">> => #{<<"sasl_external">> => [<<"unknown">>]}}). + +auth_sasl_mechanisms(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, value = []}, + #local_config{key = {sasl_mechanisms, ?HOST}, + value = [cyrsasl_external, cyrsasl_scram]}], + #{<<"auth">> => #{<<"sasl_mechanisms">> => [<<"external">>, <<"scram">>]}}), + err_host_config(#{<<"auth">> => #{<<"sasl_mechanisms">> => [<<"none">>]}}). + +auth_allow_multiple_connections(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, value = []}, + #local_config{key = {allow_multiple_connections, ?HOST}, value = true}], + auth_config(<<"anonymous">>, #{<<"allow_multiple_connections">> => true})), + err_host_config(auth_config(<<"anonymous">>, #{<<"allow_multiple_connections">> => <<"yes">>})). + +auth_anonymous_protocol(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, value = []}, + #local_config{key = {anonymous_protocol, ?HOST}, value = login_anon}], + auth_config(<<"anonymous">>, #{<<"protocol">> => <<"login_anon">>})), + err_host_config(auth_config(<<"anonymous">>, #{<<"protocol">> => <<"none">>})). + +auth_ldap_pool(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{ldap_pool_tag, ldap_pool}]}], + auth_ldap(#{<<"pool_tag">> => <<"ldap_pool">>})), + err_host_config(auth_ldap(#{<<"pool_tag">> => <<>>})). + +auth_ldap_bind_pool(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{ldap_bind_pool_tag, ldap_bind_pool}]}], + auth_ldap(#{<<"bind_pool_tag">> => <<"ldap_bind_pool">>})), + err_host_config(auth_ldap(#{<<"bind_pool_tag">> => true})). + +auth_ldap_base(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{ldap_base, "ou=Users,dc=example,dc=com"}]}], + auth_ldap(#{<<"base">> => <<"ou=Users,dc=example,dc=com">>})), + err_host_config(auth_ldap(#{<<"base">> => 10})). + +auth_ldap_uids(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{ldap_uids, [{"uid1", "user=%u"}]}]}], + auth_ldap(#{<<"uids">> => [#{<<"attr">> => <<"uid1">>, + <<"format">> => <<"user=%u">>}]})), + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{ldap_uids, ["uid1"]}]}], + auth_ldap(#{<<"uids">> => [#{<<"attr">> => <<"uid1">>}]})), + err_host_config(auth_ldap(#{<<"uids">> => [#{<<"format">> => <<"user=%u">>}]})). + +auth_ldap_filter(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{ldap_filter, "(objectClass=inetOrgPerson)"}]}], + auth_ldap(#{<<"filter">> => <<"(objectClass=inetOrgPerson)">>})), + err_host_config(auth_ldap(#{<<"filter">> => 10})). + +auth_ldap_dn_filter(_Config) -> + Filter = #{<<"filter">> => <<"(&(name=%s)(owner=%D)(user=%u@%d))">>, + <<"attributes">> => [<<"sn">>]}, + eq_host_config( + [#local_config{key = {auth_opts, ?HOST}, + value = [{ldap_dn_filter, {"(&(name=%s)(owner=%D)(user=%u@%d))", ["sn"]}}]}], + auth_ldap(#{<<"dn_filter">> => Filter})), + [err_host_config(auth_ldap(#{<<"dn_filter">> => maps:without([K], Filter)})) || + K <- maps:keys(Filter)], + err_host_config(auth_ldap(#{<<"dn_filter">> => Filter#{<<"filter">> := 12}})), + err_host_config(auth_ldap(#{<<"dn_filter">> => Filter#{<<"attributes">> := <<"sn">>}})). + +auth_ldap_local_filter(_Config) -> + Filter = #{<<"operation">> => <<"equal">>, + <<"attribute">> => <<"accountStatus">>, + <<"values">> => [<<"enabled">>]}, + eq_host_config( + [#local_config{key = {auth_opts, ?HOST}, + value = [{ldap_local_filter, {equal, {"accountStatus", ["enabled"]}}}]}], + auth_ldap(#{<<"local_filter">> => Filter})), + [err_host_config(auth_ldap(#{<<"local_filter">> => maps:without([K], Filter)})) || + K <- maps:keys(Filter)], + err_host_config(auth_ldap(#{<<"local_filter">> => Filter#{<<"operation">> := <<"lt">>}})), + err_host_config(auth_ldap(#{<<"local_filter">> => Filter#{<<"attribute">> := <<>>}})), + err_host_config(auth_ldap(#{<<"local_filter">> => Filter#{<<"values">> := []}})). + +auth_ldap_deref(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{ldap_deref, always}]}], + auth_ldap(#{<<"deref">> => <<"always">>})), + err_host_config(auth_ldap(#{<<"deref">> => <<"sometimes">>})). + +auth_external_instances(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, value = []}, + #local_config{key = {extauth_instances, ?HOST}, value = 2}], + auth_config(<<"external">>, #{<<"instances">> => 2})), + err_host_config(auth_config(<<"external">>, #{<<"instances">> => 0})). + +auth_external_program(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{extauth_program, "/usr/bin/auth"}]}], + auth_config(<<"external">>, #{<<"program">> => <<"/usr/bin/auth">>})), + err_host_config(auth_config(<<"external">>, #{<<"program">> => <<>>})). + +auth_http_basic_auth(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{basic_auth, "admin:admin123"}]}], + auth_config(<<"http">>, #{<<"basic_auth">> => <<"admin:admin123">>})), + err_host_config(auth_config(<<"http">>, #{<<"basic_auth">> => true})). + +auth_jwt(_Config) -> + Opts = #{<<"secret">> => #{<<"value">> => <<"secret123">>}, + <<"algorithm">> => <<"HS512">>, + <<"username_key">> => <<"user">>}, % tested together as all options are required + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{jwt_algorithm, "HS512"}, + {jwt_secret, "secret123"}, + {jwt_username_key, user}]}], + auth_config(<<"jwt">>, Opts)), + FileOpts = Opts#{<<"secret">> := #{<<"file">> => <<"/home/user/jwt_secret">>}}, + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{jwt_algorithm, "HS512"}, + {jwt_secret_source, "/home/user/jwt_secret"}, + {jwt_username_key, user}]}], + auth_config(<<"jwt">>, FileOpts)), + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{jwt_algorithm, "HS512"}, + {jwt_secret_source, {env, "SECRET"}}, + {jwt_username_key, user}]}], + auth_config(<<"jwt">>, Opts#{<<"secret">> := #{<<"env">> => <<"SECRET">>}})), + err_host_config(auth_config(<<"jwt">>, Opts#{<<"secret">> := #{<<"value">> => 123}})), + err_host_config(auth_config(<<"jwt">>, Opts#{<<"secret">> := #{<<"file">> => <<>>}})), + err_host_config(auth_config(<<"jwt">>, Opts#{<<"secret">> := #{<<"env">> => <<>>}})), + err_host_config(auth_config(<<"jwt">>, Opts#{<<"secret">> := #{<<"file">> => <<"/jwt_secret">>, + <<"env">> => <<"SECRET">>}})), + err_host_config(auth_config(<<"jwt">>, Opts#{<<"algorithm">> := <<"bruteforce">>})), + err_host_config(auth_config(<<"jwt">>, Opts#{<<"username_key">> := <<>>})), + [err_host_config(auth_config(<<"jwt">>, maps:without([K], Opts))) || K <- maps:keys(Opts)]. + +auth_riak_bucket_type(_Config) -> + eq_host_config([#local_config{key = {auth_opts, ?HOST}, + value = [{bucket_type, <<"buckethead">>}]}], + auth_config(<<"riak">>, #{<<"bucket_type">> => <<"buckethead">>})), + err_host_config(auth_config(<<"riak">>, #{<<"bucket_type">> => <<>>})). + +%% tests: outgoing_pools + +pool_type(_Config) -> + ?eq(pool_config({http, global, default, [], []}), + parse_pool(<<"http">>, <<"default">>, #{})), + ?err(parse_pool(<<"swimming_pool">>, <<"default">>, #{})). + +pool_tag(_Config) -> + ?eq(pool_config({http, global, my_pool, [], []}), + parse_pool(<<"http">>, <<"my_pool">>, #{})), + ?err(parse_pool(<<"http">>, 1000, #{})). + +pool_scope(_Config) -> + ?eq(pool_config({http, global, default, [], []}), + parse_pool(<<"http">>, <<"default">>, #{})), + ?eq(pool_config({http, global, default, [], []}), + parse_pool(<<"http">>, <<"default">>, #{<<"scope">> => <<"global">>})), + ?eq(pool_config({http, host, default, [], []}), + parse_pool(<<"http">>, <<"default">>, #{<<"scope">> => <<"host">>})), + ?eq(pool_config({http, <<"localhost">>, default, [], []}), + parse_pool(<<"http">>, <<"default">>, #{<<"scope">> => <<"single_host">>, + <<"host">> => <<"localhost">>})), + ?err(parse_pool(<<"http">>, <<"default">>, #{<<"scope">> => <<"whatever">>})), + ?err(parse_pool(<<"http">>, <<"default">>, #{<<"scope">> => <<"single_host">>})). + +pool_workers(_Config) -> + ?eq(pool_config({http, global, default, [{workers, 10}], []}), + parse_pool(<<"http">>, <<"default">>, #{<<"workers">> => 10})), + ?err(parse_pool(<<"http">>, <<"default">>, #{<<"workers">> => 0})). + +pool_strategy(_Config) -> + ?eq(pool_config({http, global, default, [{strategy, best_worker}], []}), + parse_pool(<<"http">>, <<"default">>, #{<<"strategy">> => <<"best_worker">>})), + ?err(parse_pool(<<"http">>, <<"default">>, #{<<"strategy">> => <<"worst_worker">>})). + +pool_call_timeout(_Config) -> + ?eq(pool_config({http, global, default, [{call_timeout, 5000}], []}), + parse_pool(<<"http">>, <<"default">>, #{<<"call_timeout">> => 5000})), + ?err(parse_pool(<<"http">>, <<"default">>, #{<<"call_timeout">> => 0})). + +pool_rdbms_settings(_Config) -> + ?eq(pool_config({rdbms, global, default, [], [{server, "DSN=mydb"}]}), + parse_pool_conn(<<"rdbms">>, #{<<"driver">> => <<"odbc">>, + <<"settings">> => <<"DSN=mydb">>})), + ?err(parse_pool_conn(<<"rdbms">>, #{<<"driver">> => <<"mysql">>, + <<"settings">> => <<"DSN=mydb">>})), + ?err(parse_pool_conn(<<"rdbms">>, #{<<"driver">> => <<"odbc">>, + <<"settings">> => true})), + ?err(parse_pool_conn(<<"rdbms">>, #{<<"driver">> => <<"odbc">>})). + +pool_rdbms_keepalive_interval(_Config) -> + ?eq(pool_config({rdbms, global, default, [], [{server, "DSN=mydb"}, + {keepalive_interval, 1000}]}), + parse_pool_conn(<<"rdbms">>, #{<<"driver">> => <<"odbc">>, + <<"settings">> => <<"DSN=mydb">>, + <<"keepalive_interval">> => 1000})), + ?err(parse_pool_conn(<<"rdbms">>, #{<<"driver">> => <<"odbc">>, + <<"settings">> => <<"DSN=mydb">>, + <<"keepalive_interval">> => false})). + +pool_rdbms_server(_Config) -> + ServerOpts = rdbms_opts(), + ?eq(pool_config({rdbms, global, default, [], + [{server, {pgsql, "localhost", "db", "dbuser", "secret"}}]}), + parse_pool_conn(<<"rdbms">>, ServerOpts)), + ?err(parse_pool_conn(<<"rdbms">>, ServerOpts#{<<"driver">> := <<"odbc">>})), + [?err(parse_pool_conn(<<"rdbms">>, maps:without([K], ServerOpts))) || + K <- maps:keys(ServerOpts)], + [?err(parse_pool_conn(<<"rdbms">>, ServerOpts#{K := 123})) || + K <- maps:keys(ServerOpts)]. + +pool_rdbms_port(_Config) -> + ServerOpts = rdbms_opts(), + ?eq(pool_config({rdbms, global, default, [], + [{server, {pgsql, "localhost", 1234, "db", "dbuser", "secret"}}]}), + parse_pool_conn(<<"rdbms">>, ServerOpts#{<<"port">> => 1234})), + ?err(parse_pool_conn(<<"rdbms">>, ServerOpts#{<<"port">> => <<"airport">>})). + +pool_rdbms_tls(_Config) -> + ServerOpts = rdbms_opts(), + ?eq(pool_config({rdbms, global, default, [], + [{server, {pgsql, "localhost", "db", "dbuser", "secret", + [{ssl, required}]}}]}), + parse_pool_conn(<<"rdbms">>, ServerOpts#{<<"tls">> => #{<<"required">> => true}})), + ?eq(pool_config({rdbms, global, default, [], + [{server, {pgsql, "localhost", "db", "dbuser", "secret", + [{ssl, true}]}}]}), + parse_pool_conn(<<"rdbms">>, ServerOpts#{<<"tls">> => #{}})), + ?eq(pool_config({rdbms, global, default, [], + [{server, {mysql, "localhost", "db", "dbuser", "secret", []}}]}), + parse_pool_conn(<<"rdbms">>, ServerOpts#{<<"driver">> => <<"mysql">>, + <<"tls">> => #{}})), + ?eq(pool_config({rdbms, global, default, [], + [{server, {pgsql, "localhost", 1234, "db", "dbuser", "secret", + [{ssl, true}]}}]}), + parse_pool_conn(<<"rdbms">>, ServerOpts#{<<"tls">> => #{}, + <<"port">> => 1234})), + + %% one option tested here as they are all checked by 'listen_tls_*' tests + ?eq(pool_config({rdbms, global, default, [], + [{server, {pgsql, "localhost", "db", "dbuser", "secret", + [{ssl, true}, {ssl_opts, [{certfile, "cert.pem"}]}]}}]}), + parse_pool_conn(<<"rdbms">>, ServerOpts#{<<"tls">> => + #{<<"certfile">> => <<"cert.pem">>}})), + ?err(parse_pool_conn(<<"rdbms">>, ServerOpts#{<<"tls">> => + #{<<"certfile">> => true}})), + ?err(parse_pool_conn(<<"rdbms">>, ServerOpts#{<<"tls">> => <<"secure">>})). + +pool_http_host(_Config) -> + ?eq(pool_config({http, global, default, [], [{server, "https://localhost:8443"}]}), + parse_pool_conn(<<"http">>, #{<<"host">> => <<"https://localhost:8443">>})), + ?err(parse_pool_conn(<<"http">>, #{<<"host">> => 8443})), + ?err(parse_pool_conn(<<"http">>, #{<<"host">> => ""})). + +pool_http_path_prefix(_Config) -> + ?eq(pool_config({http, global, default, [], [{path_prefix, "/"}]}), + parse_pool_conn(<<"http">>, #{<<"path_prefix">> => <<"/">>})), + ?err(parse_pool_conn(<<"http">>, #{<<"path_prefix">> => 8443})), + ?err(parse_pool_conn(<<"http">>, #{<<"path_prefix">> => ""})). + +pool_http_request_timeout(_Config) -> + ?eq(pool_config({http, global, default, [], [{request_timeout, 2000}]}), + parse_pool_conn(<<"http">>, #{<<"request_timeout">> => 2000})), + ?err(parse_pool_conn(<<"http">>, #{<<"request_timeout">> => -1000})), + ?err(parse_pool_conn(<<"http">>, #{<<"request_timeout">> => <<"infinity">>})). + +pool_http_tls(_Config) -> + ?eq(pool_config({http, global, default, [], [{http_opts, [{certfile, "cert.pem"} ]}]}), + parse_pool_conn(<<"http">>, #{<<"tls">> => #{<<"certfile">> => <<"cert.pem">>}})), + ?err(parse_pool_conn(<<"http">>, #{<<"tls">> => #{<<"certfile">> => true}})), + ?err(parse_pool_conn(<<"http">>, #{<<"tls">> => <<"secure">>})). + +pool_redis_host(_Config) -> + ?eq(pool_config({redis, global, default, [], [{host, "localhost"}]}), + parse_pool_conn(<<"redis">>, #{<<"host">> => <<"localhost">>})), + ?err(parse_pool_conn(<<"redis">>, #{<<"host">> => 8443})), + ?err(parse_pool_conn(<<"redis">>, #{<<"host">> => ""})). + +pool_redis_port(_Config) -> + ?eq(pool_config({redis, global, default, [], [{port, 6379}]}), + parse_pool_conn(<<"redis">>, #{<<"port">> => 6379})), + ?err(parse_pool_conn(<<"redis">>, #{<<"port">> => 666666})), + ?err(parse_pool_conn(<<"redis">>, #{<<"port">> => <<"airport">>})). + +pool_redis_database(_Config) -> + ?eq(pool_config({redis, global, default, [], [{database, 0}]}), + parse_pool_conn(<<"redis">>, #{<<"database">> => 0})), + ?err(parse_pool_conn(<<"redis">>, #{<<"database">> => -1})), + ?err(parse_pool_conn(<<"redis">>, #{<<"database">> => <<"my_database">>})). + +pool_redis_password(_Config) -> + ?eq(pool_config({redis, global, default, [], [{password, ""}]}), + parse_pool_conn(<<"redis">>, #{<<"password">> => <<"">>})), + ?eq(pool_config({redis, global, default, [], [{password, "password1"}]}), + parse_pool_conn(<<"redis">>, #{<<"password">> => <<"password1">>})), + ?err(parse_pool_conn(<<"redis">>, #{<<"password">> => 0})). + +pool_riak_address(_Config) -> + ?eq(pool_config({riak, global, default, [], [{address, "127.0.0.1"}]}), + parse_pool_conn(<<"riak">>, #{<<"address">> => <<"127.0.0.1">>})), + ?err(parse_pool_conn(<<"riak">>, #{<<"address">> => 66})), + ?err(parse_pool_conn(<<"riak">>, #{<<"address">> => <<"">>})). + +pool_riak_port(_Config) -> + ?eq(pool_config({riak, global, default, [], [{port, 8087}]}), + parse_pool_conn(<<"riak">>, #{<<"port">> => 8087})), + ?err(parse_pool_conn(<<"riak">>, #{<<"port">> => 666666})), + ?err(parse_pool_conn(<<"riak">>, #{<<"port">> => <<"airport">>})). + +pool_riak_credentials(_Config) -> + ?eq(pool_config({riak, global, default, [], [{credentials, "user", "pass"}]}), + parse_pool_conn(<<"riak">>, #{<<"credentials">> => + #{<<"user">> => <<"user">>, <<"password">> => <<"pass">>}})), + ?err(parse_pool_conn(<<"riak">>, #{<<"credentials">> => #{<<"user">> => <<"user">>}})), + ?err(parse_pool_conn(<<"riak">>, #{<<"credentials">> => #{<<"user">> => <<"">>, <<"password">> => 011001}})). + +pool_riak_cacertfile(_Config) -> + ?eq(pool_config({riak, global, default, [], [{cacertfile, "path/to/cacert.pem"}]}), + parse_pool_conn(<<"riak">>, #{<<"tls">> => #{<<"cacertfile">> => <<"path/to/cacert.pem">>}})), + ?err(parse_pool_conn(<<"riak">>, #{<<"cacertfile">> => <<"">>})). + +pool_riak_certfile(_Config) -> + ?eq(pool_config({riak, global, default, [], [{certfile, "path/to/cert.pem"}]}), + parse_pool_conn(<<"riak">>, #{<<"tls">> => #{<<"certfile">> => <<"path/to/cert.pem">>}})), + ?err(parse_pool_conn(<<"riak">>, #{<<"certfile">> => <<"">>})). + +pool_riak_keyfile(_Config) -> + ?eq(pool_config({riak, global, default, [], [{keyfile, "path/to/key.pem"}]}), + parse_pool_conn(<<"riak">>, #{<<"tls">> => #{<<"keyfile">> => <<"path/to/key.pem">>}})), + ?err(parse_pool_conn(<<"riak">>, #{<<"keyfile">> => <<"">>})). + +pool_riak_tls(_Config) -> + %% one option tested here as they are all checked by 'listen_tls_*' tests + ?eq(pool_config({riak, global, default, [], [{ssl_opts, [{dhfile, "cert.pem"} + ]}]}), + parse_pool_conn(<<"riak">>, #{<<"tls">> => #{<<"dhfile">> => <<"cert.pem">>}})), + ?err(parse_pool_conn(<<"riak">>, #{<<"tls">> => #{<<"dhfile">> => true}})), + ?err(parse_pool_conn(<<"riak">>, #{<<"tls">> => <<"secure">>})). + +pool_cassandra_servers(_Config) -> + ?eq(pool_config({cassandra, global, default, [], + [{servers, [{"cassandra_server1.example.com", 9042}, {"cassandra_server2.example.com", 9042}]}]}), + parse_pool_conn(<<"cassandra">>, #{<<"servers">> => [ + #{<<"ip_address">> => <<"cassandra_server1.example.com">>, <<"port">> => 9042}, + #{<<"ip_address">> => <<"cassandra_server2.example.com">>, <<"port">> => 9042} + ]})), + ?err(parse_pool_conn(<<"cassandra">>, #{<<"servers">> => + #{<<"ip_address">> => <<"cassandra_server1.example.com">>, <<"port">> => 9042}})). + +pool_cassandra_keyspace(_Config) -> + ?eq(pool_config({cassandra, global, default, [], [{keyspace, "big_mongooseim"}]}), + parse_pool_conn(<<"cassandra">>, #{<<"keyspace">> => <<"big_mongooseim">>})), + ?err(parse_pool_conn(<<"cassandra">>, #{<<"keyspace">> => <<"">>})). + +pool_cassandra_auth(_Config) -> + ?eq(pool_config({cassandra, global, default, [], [{auth, {cqerl_auth_plain_handler, [{<<"auser">>, <<"secretpass">>}]}}]}), + parse_pool_conn(<<"cassandra">>, + #{<<"auth">> => #{<<"plain">> => #{<<"username">> => <<"auser">>, + <<"password">> => <<"secretpass">>}}})), + ?err(parse_pool_conn(<<"cassandra">>, #{<<"tls">> => #{<<"verify">> => <<"verify_none">>}})). + +pool_cassandra_tls(_Config) -> + %% one option tested here as they are all checked by 'listen_tls_*' tests + ?eq(pool_config({cassandra, global, default, [], [{ssl, [{verify, verify_none} + ]}]}), + parse_pool_conn(<<"cassandra">>, #{<<"tls">> => #{<<"verify_peer">> => false}})), + ?err(parse_pool_conn(<<"cassandra">>, #{<<"tls">> => #{<<"verify">> => <<"verify_none">>}})). + +pool_elastic_host(_Config) -> + ?eq(pool_config({elastic, global, default, [], [{host, "localhost"}]}), + parse_pool_conn(<<"elastic">>, #{<<"host">> => <<"localhost">>})), + ?err(parse_pool_conn(<<"elastic">>, #{<<"host">> => <<"">>})). + +pool_elastic_port(_Config) -> + ?eq(pool_config({elastic, global, default, [], [{port, 9200}]}), + parse_pool_conn(<<"elastic">>, #{<<"port">> => 9200})), + ?err(parse_pool_conn(<<"elastic">>, #{<<"port">> => 122333})), + ?err(parse_pool_conn(<<"elastic">>, #{<<"port">> => <<"airport">>})). + +pool_rabbit_amqp_host(_Config) -> + ?eq(pool_config({rabbit, global, default, [], [{amqp_host, "localhost"}]}), + parse_pool_conn(<<"rabbit">>, #{<<"amqp_host">> => <<"localhost">>})), + ?err(parse_pool_conn(<<"rabbit">>, #{<<"amqp_host">> => <<"">>})). + +pool_rabbit_amqp_port(_Config) -> + ?eq(pool_config({rabbit, global, default, [], [{amqp_port, 5672}]}), + parse_pool_conn(<<"rabbit">>, #{<<"amqp_port">> => 5672})), + ?err(parse_pool_conn(<<"rabbit">>, #{<<"amqp_port">> => <<"airport">>})). + +pool_rabbit_amqp_username(_Config) -> + ?eq(pool_config({rabbit, global, default, [], [{amqp_username, "guest"}]}), + parse_pool_conn(<<"rabbit">>, #{<<"amqp_username">> => <<"guest">>})), + ?err(parse_pool_conn(<<"rabbit">>, #{<<"amqp_username">> => <<"">>})). + +pool_rabbit_amqp_password(_Config) -> + ?eq(pool_config({rabbit, global, default, [], [{amqp_password, "guest"}]}), + parse_pool_conn(<<"rabbit">>, #{<<"amqp_password">> => <<"guest">>})), + ?err(parse_pool_conn(<<"rabbit">>, #{<<"amqp_password">> => <<"">>})). + +pool_rabbit_amqp_confirms_enabled(_Config) -> + ?eq(pool_config({rabbit, global, default, [], [{confirms_enabled, true}]}), + parse_pool_conn(<<"rabbit">>, #{<<"confirms_enabled">> => true})), + ?err(parse_pool_conn(<<"rabbit">>, #{<<"confirms_enabled">> => <<"yes">>})). + +pool_rabbit_amqp_max_worker_queue_len(_Config) -> + ?eq(pool_config({rabbit, global, default, [], [{max_worker_queue_len, 100}]}), + parse_pool_conn(<<"rabbit">>, #{<<"max_worker_queue_len">> => 100})), + ?err(parse_pool_conn(<<"rabbit">>, #{<<"max_worker_queue_len">> => 0})). + +pool_ldap_host(_Config) -> + ?eq(pool_config({ldap, global, default, [], [{host, "localhost"}]}), + parse_pool_conn(<<"ldap">>, #{<<"host">> => <<"localhost">>})), + ?err(parse_pool_conn(<<"ldap">>, #{<<"host">> => <<"">>})). + +pool_ldap_port(_Config) -> + ?eq(pool_config({ldap, global, default, [], [{port, 389}]}), + parse_pool_conn(<<"ldap">>, #{<<"port">> => 389})), + ?err(parse_pool_conn(<<"ldap">>, #{<<"port">> => <<"airport">>})). + +pool_ldap_servers(_Config) -> + ?eq(pool_config({ldap, global, default, [], + [{servers, ["primary-ldap-server.example.com", "secondary-ldap-server.example.com"]}]}), + parse_pool_conn(<<"ldap">>, #{<<"servers">> => + [<<"primary-ldap-server.example.com">>, <<"secondary-ldap-server.example.com">>]})), + ?err(parse_pool_conn(<<"ldap">>, #{<<"servers">> => #{<<"server">> => <<"example.com">>}})). + +pool_ldap_encrypt(_Config) -> + ?eq(pool_config({ldap, global, default, [], [{encrypt, none}]}), + parse_pool_conn(<<"ldap">>, #{<<"encrypt">> => <<"none">>})), + ?err(parse_pool_conn(<<"ldap">>, #{<<"encrypt">> => true})). + +pool_ldap_rootdn(_Config) -> + ?eq(pool_config({ldap, global, default, [], [{rootdn, ""}]}), + parse_pool_conn(<<"ldap">>, #{<<"rootdn">> => <<"">>})), + ?err(parse_pool_conn(<<"ldap">>, #{<<"rootdn">> => false})). + +pool_ldap_password(_Config) -> + ?eq(pool_config({ldap, global, default, [], [{password, "pass"}]}), + parse_pool_conn(<<"ldap">>, #{<<"password">> => <<"pass">>})), + ?err(parse_pool_conn(<<"ldap">>, #{<<"password">> => true})). + +pool_ldap_connect_interval(_Config) -> + ?eq(pool_config({ldap, global, default, [], [{connect_interval, 10000}]}), + parse_pool_conn(<<"ldap">>, #{<<"connect_interval">> => 10000})), + ?err(parse_pool_conn(<<"ldap">>, #{<<"connect_interval">> => <<"infinity">>})). + +pool_ldap_tls(_Config) -> + %% one option tested here as they are all checked by 'listen_tls_*' tests + ?eq(pool_config({ldap, global, default, [], [{tls_options, [{verify, verify_peer} + ]}]}), + parse_pool_conn(<<"ldap">>, #{<<"tls">> => #{<<"verify_peer">> => true}})), + ?err(parse_pool_conn(<<"ldap">>, #{<<"tls">> => #{<<"verify">> => <<"verify_none">>}})). + +%% tests: shaper, acl, access +shaper(_Config) -> + eq_host_or_global( + fun(Host) -> [#config{key = {shaper, normal, Host}, value = {maxrate, 1000}}] end, + #{<<"shaper">> => #{<<"normal">> => #{<<"max_rate">> => 1000}}}), + err_host_config(#{<<"shaper">> => #{<<"unlimited">> => #{<<"max_rate">> => <<"infinity">>}}}), + err_host_config(#{<<"shaper">> => #{<<"fast">> => #{}}}). + +acl(_Config) -> + eq_host_or_global( + fun(Host) -> [{acl, {local, Host}, all}] end, + #{<<"acl">> => #{<<"local">> => [#{<<"match">> => <<"all">>}]}}), + eq_host_or_global( + fun(Host) -> [{acl, {local, Host}, {user_regexp, <<>>}}] end, + #{<<"acl">> => #{<<"local">> => [#{<<"user_regexp">> => <<>>}]}}), + eq_host_or_global( + fun(Host) -> [{acl, {alice, Host}, {node_regexp, <<"ali.*">>, <<".*host">>}}] end, + #{<<"acl">> => #{<<"alice">> => [#{<<"user_regexp">> => <<"ali.*">>, + <<"server_regexp">> => <<".*host">>}]}}), + eq_host_or_global( + fun(Host) -> [{acl, {alice, Host}, {user, <<"alice">>, <<"localhost">>}}] end, + #{<<"acl">> => #{<<"alice">> => [#{<<"user">> => <<"alice">>, + <<"server">> => <<"localhost">>}]}}), + err_host_config(#{<<"acl">> => #{<<"local">> => <<"everybody">>}}), + err_host_config(#{<<"acl">> => #{<<"alice">> => [#{<<"user_glob">> => <<"a*">>, + <<"server_blog">> => <<"bloghost">>}]}}). + +access(_Config) -> + eq_host_or_global( + fun(Host) -> [#config{key = {access, c2s, Host}, value = [{deny, blocked}, + {allow, all}]}] + end, + #{<<"access">> => #{<<"c2s">> => [#{<<"acl">> => <<"blocked">>, + <<"value">> => <<"deny">>}, + #{<<"acl">> => <<"all">>, + <<"value">> => <<"allow">>}]}}), + eq_host_or_global( + fun(Host) -> [#config{key = {access, max_user_sessions, Host}, value = [{10, all}]}] end, + #{<<"access">> => #{<<"max_user_sessions">> => [#{<<"acl">> => <<"all">>, + <<"value">> => 10}]}}), + err_host_config(#{<<"access">> => #{<<"max_user_sessions">> => [#{<<"acl">> => <<"all">>}]}}), + err_host_config(#{<<"access">> => #{<<"max_user_sessions">> => [#{<<"value">> => 10}]}}), + err_host_config(#{<<"access">> => #{<<"max_user_sessions">> => [#{<<"acl">> => 10, + <<"value">> => 10}]}}). + +%% tests: s2s + +s2s_dns_timeout(_Config) -> + ?eq([#local_config{key = s2s_dns_options, value = [{timeout, 5}]}], + parse(#{<<"s2s">> => #{<<"dns">> => #{<<"timeout">> => 5}}})), + ?err(parse(#{<<"s2s">> => #{<<"dns">> => #{<<"timeout">> => 0}}})). + +s2s_dns_retries(_Config) -> + ?eq([#local_config{key = s2s_dns_options, value = [{retries, 1}]}], + parse(#{<<"s2s">> => #{<<"dns">> => #{<<"retries">> => 1}}})), + ?err(parse(#{<<"s2s">> => #{<<"dns">> => #{<<"retries">> => 0}}})). + +s2s_outgoing_port(_Config) -> + ?eq([#local_config{key = outgoing_s2s_port, value = 5270}], + parse(#{<<"s2s">> => #{<<"outgoing">> => #{<<"port">> => 5270}}})), + ?err(parse(#{<<"s2s">> => #{<<"outgoing">> => #{<<"port">> => <<"http">>}}})). + +s2s_outgoing_ip_versions(_Config) -> + ?eq([#local_config{key = outgoing_s2s_families, value = [ipv6, ipv4]}], + parse(#{<<"s2s">> => #{<<"outgoing">> => #{<<"ip_versions">> => [6, 4]}}})), + ?err(parse(#{<<"s2s">> => #{<<"outgoing">> => #{<<"ip_versions">> => []}}})), + ?err(parse(#{<<"s2s">> => #{<<"outgoing">> => #{<<"ip_versions">> => [<<"http">>]}}})). + +s2s_outgoing_timeout(_Config) -> + ?eq([#local_config{key = outgoing_s2s_timeout, value = 5}], + parse(#{<<"s2s">> => #{<<"outgoing">> => #{<<"connection_timeout">> => 5}}})), + ?eq([#local_config{key = outgoing_s2s_timeout, value = infinity}], + parse(#{<<"s2s">> => #{<<"outgoing">> => #{<<"connection_timeout">> => <<"infinity">>}}})), + ?err(parse(#{<<"s2s">> => #{<<"outgoing">> => #{<<"connection_timeout">> => 0}}})). + +s2s_use_starttls(_Config) -> + ?eq([#local_config{key = s2s_use_starttls, value = required}], + parse(#{<<"s2s">> => #{<<"use_starttls">> => <<"required">>}})), + ?err(parse(#{<<"s2s">> => #{<<"use_starttls">> => <<"unnecessary">>}})). + +s2s_certfile(_Config) -> + ?eq([#local_config{key = s2s_certfile, value = "cert.pem"}], + parse(#{<<"s2s">> => #{<<"certfile">> => <<"cert.pem">>}})), + ?err(parse(#{<<"s2s">> => #{<<"certfile">> => []}})). + +s2s_default_policy(_Config) -> + eq_host_config([#local_config{key = {s2s_default_policy, ?HOST}, value = deny}], + #{<<"s2s">> => #{<<"default_policy">> => <<"deny">>}}), + err_host_config(#{<<"s2s">> => #{<<"default_policy">> => <<"ask">>}}). + +s2s_host_policy(_Config) -> + Policy = #{<<"host">> => <<"host1">>, + <<"policy">> => <<"allow">>}, + eq_host_config([#local_config{key = {{s2s_host, <<"host1">>}, ?HOST}, value = allow}], + #{<<"s2s">> => #{<<"host_policy">> => [Policy]}}), + eq_host_config([#local_config{key = {{s2s_host, <<"host1">>}, ?HOST}, value = allow}, + #local_config{key = {{s2s_host, <<"host2">>}, ?HOST}, value = deny}], + #{<<"s2s">> => #{<<"host_policy">> => [Policy, #{<<"host">> => <<"host2">>, + <<"policy">> => <<"deny">>}]}}), + err_host_config(#{<<"s2s">> => #{<<"host_policy">> => [maps:without([<<"host">>], Policy)]}}), + err_host_config(#{<<"s2s">> => #{<<"host_policy">> => [maps:without([<<"policy">>], Policy)]}}), + err_host_config(#{<<"s2s">> => #{<<"host_policy">> => [Policy#{<<"host">> => <<>>}]}}), + err_host_config(#{<<"s2s">> => #{<<"host_policy">> => [Policy#{<<"policy">> => <<"huh">>}]}}). + +s2s_address(_Config) -> + Addr = #{<<"host">> => <<"host1">>, + <<"ip_address">> => <<"192.168.1.2">>, + <<"port">> => 5321}, + ?eq([#local_config{key = {s2s_addr, <<"host1">>}, value = {"192.168.1.2", 5321}}], + parse(#{<<"s2s">> => #{<<"address">> => [Addr]}})), + ?eq([#local_config{key = {s2s_addr, <<"host1">>}, value = "192.168.1.2"}], + parse(#{<<"s2s">> => #{<<"address">> => [maps:without([<<"port">>], Addr)]}})), + ?err(parse(#{<<"s2s">> => #{<<"address">> => [maps:without([<<"host">>], Addr)]}})), + ?err(parse(#{<<"s2s">> => #{<<"address">> => [maps:without([<<"ip_address">>], Addr)]}})), + ?err(parse(#{<<"s2s">> => #{<<"address">> => [Addr#{<<"host">> => <<>>}]}})), + ?err(parse(#{<<"s2s">> => #{<<"address">> => [Addr#{<<"ip_address">> => <<"host2">>}]}})), + ?err(parse(#{<<"s2s">> => #{<<"address">> => [Addr#{<<"port">> => <<"seaport">>}]}})). + +s2s_ciphers(_Config) -> + ?eq([#local_config{key = s2s_ciphers, value = "TLSv1.2:TLSv1.3"}], + parse(#{<<"s2s">> => #{<<"ciphers">> => <<"TLSv1.2:TLSv1.3">>}})), + ?err(parse(#{<<"s2s">> => #{<<"ciphers">> => [<<"cipher1">>, <<"cipher2">>]}})). + +s2s_domain_certfile(_Config) -> + DomCert = #{<<"domain">> => <<"myxmpp.com">>, + <<"certfile">> => <<"mycert.pem">>}, + ?eq([#local_config{key = {domain_certfile, "myxmpp.com"}, value = "mycert.pem"}], + parse(#{<<"s2s">> => #{<<"domain_certfile">> => [DomCert]}})), + [?err(parse(#{<<"s2s">> => #{<<"domain_certfile">> => [maps:without([K], DomCert)]}})) + || K <- maps:keys(DomCert)], + [?err(parse(#{<<"s2s">> => #{<<"domain_certfile">> => [DomCert#{K := <<>>}]}})) + || K <- maps:keys(DomCert)]. + +s2s_shared(_Config) -> + eq_host_config([#local_config{key = {s2s_shared, ?HOST}, value = <<"secret">>}], + #{<<"s2s">> => #{<<"shared">> => <<"secret">>}}), + err_host_config(#{<<"s2s">> => #{<<"shared">> => 536837}}). + +s2s_max_retry_delay(_Config) -> + eq_host_config([#local_config{key = {s2s_max_retry_delay, ?HOST}, value = 120}], + #{<<"s2s">> => #{<<"max_retry_delay">> => 120}}), + err_host_config(#{<<"s2s">> => #{<<"max_retry_delay">> => 0}}). + +%% modules + +mod_adhoc(_Config) -> + check_iqdisc(mod_adhoc), + run_multi(mod_adhoc_cases()). + +mod_adhoc_cases() -> + M = fun(K, V) -> modopts(mod_adhoc, [{K, V}]) end, + T = fun(K, V) -> #{<<"modules">> => #{<<"mod_adhoc">> => #{K => V}}} end, + %% report_commands_node is boolean + [?_eqf(M(report_commands_node, true), T(<<"report_commands_node">>, true)), + ?_eqf(M(report_commands_node, false), T(<<"report_commands_node">>, false)), + %% not boolean + ?_errf(T(<<"report_commands_node">>, <<"hello">>))]. + +mod_auth_token(_Config) -> + check_iqdisc(mod_auth_token), + run_multi(mod_auth_token_cases()). + +mod_auth_token_cases() -> + P = fun(X) -> + Opts = #{<<"validity_period">> => X}, + #{<<"modules">> => #{<<"mod_auth_token">> => Opts}} + end, + [?_eqf(modopts(mod_auth_token, [{{validity_period,access}, {13,minutes}}, + {{validity_period,refresh}, {31,days}}]), + P([#{<<"token">> => <<"access">>, <<"value">> => 13, <<"unit">> => <<"minutes">>}, + #{<<"token">> => <<"refresh">>, <<"value">> => 31, <<"unit">> => <<"days">>}])), + ?_errf(P([#{<<"token">> => <<"access">>, <<"value">> => <<"13">>, <<"unit">> => <<"minutes">>}])), + ?_errf(P([#{<<"token">> => <<"access">>, <<"value">> => 13, <<"unit">> => <<"minute">>}])), + ?_errf(P([#{<<"token">> => <<"Access">>, <<"value">> => 13, <<"unit">> => <<"minutes">>}])), + ?_errf(P([#{<<"value">> => 13, <<"unit">> => <<"minutes">>}])), + ?_errf(P([#{<<"token">> => <<"access">>, <<"unit">> => <<"minutes">>}])), + ?_errf(P([#{<<"token">> => <<"access">>, <<"value">> => 13}]))]. + + +mod_bosh(_Config) -> + run_multi(mod_bosh_cases()). + +mod_bosh_cases() -> + T = fun(K, V) -> #{<<"modules">> => #{<<"mod_bosh">> => #{K => V}}} end, + M = fun(K, V) -> modopts(mod_bosh, [{K, V}]) end, + [?_eqf(M(inactivity, 10), T(<<"inactivity">>, 10)), + ?_eqf(M(inactivity, infinity), T(<<"inactivity">>, <<"infinity">>)), + ?_eqf(M(inactivity, 10), T(<<"inactivity">>, 10)), + ?_eqf(M(max_wait, infinity), T(<<"max_wait">>, <<"infinity">>)), + ?_eqf(M(server_acks, true), T(<<"server_acks">>, true)), + ?_eqf(M(server_acks, false), T(<<"server_acks">>, false)), + ?_eqf(M(backend, mnesia), T(<<"backend">>, <<"mnesia">>)), + ?errf(T(<<"inactivity">>, -1)), + ?errf(T(<<"inactivity">>, <<"10">>)), + ?errf(T(<<"inactivity">>, <<"inactivity">>)), + ?errf(T(<<"max_wait">>, <<"10">>)), + ?errf(T(<<"max_wait">>, -1)), + ?errf(T(<<"server_acks">>, -1)), + ?errf(T(<<"backend">>, <<"devnull">>))]. + +mod_caps(_Config) -> + run_multi(mod_caps_cases()). + +mod_caps_cases() -> + T = fun(K, V) -> #{<<"modules">> => #{<<"mod_caps">> => #{K => V}}} end, + M = fun(K, V) -> modopts(mod_caps, [{K, V}]) end, + [?_eqf(M(cache_size, 10), T(<<"cache_size">>, 10)), + ?_eqf(M(cache_life_time, 10), T(<<"cache_life_time">>, 10)), + ?_errf(T(<<"cache_size">>, -1)), + ?_errf(T(<<"cache_size">>, <<"infinity">>)), + ?_errf(T(<<"cache_life_time">>, -1)), + ?_errf(T(<<"cache_life_time">>, <<"cache_life_time">>))]. + +mod_carboncopy(_Config) -> + check_iqdisc(mod_carboncopy). + +mod_csi(_Config) -> + run_multi(mod_csi_cases()). + +mod_csi_cases() -> + T = fun(K, V) -> #{<<"modules">> => #{<<"mod_csi">> => #{K => V}}} end, + M = fun(K, V) -> modopts(mod_csi, [{K, V}]) end, + [?_eqf(M(buffer_max, 10), T(<<"buffer_max">>, 10)), + ?_eqf(M(buffer_max, infinity), T(<<"buffer_max">>, <<"infinity">>)), + ?_errf(T(<<"buffer_max">>, -1))]. + +mod_disco(_Config) -> + T = fun(K, V) -> #{<<"modules">> => #{<<"mod_disco">> => #{K => V}}} end, + ?eqf(modopts(mod_disco, [{users_can_see_hidden_services, true}]), + T(<<"users_can_see_hidden_services">>, true)), + ?eqf(modopts(mod_disco, [{users_can_see_hidden_services, false}]), + T(<<"users_can_see_hidden_services">>, false)), + %% extra_domains are binaries + ?eqf(modopts(mod_disco, [{extra_domains, [<<"localhost">>, <<"erlang-solutions.com">>]}]), + T(<<"extra_domains">>, [<<"localhost">>, <<"erlang-solutions.com">>])), + ?eqf(modopts(mod_disco, [{extra_domains, []}]), + T(<<"extra_domains">>, [])), + ?eqf(modopts(mod_disco, [{server_info, [{all, "abuse-address", ["admin@example.com"]}, + {[mod_muc, mod_disco], "friendly-spirits", + ["spirit1@localhost", "spirit2@localhost"]}]} ]), + T(<<"server_info">>, [#{<<"module">> => <<"all">>, <<"name">> => <<"abuse-address">>, + <<"urls">> => [<<"admin@example.com">>]}, + #{<<"module">> => [<<"mod_muc">>, <<"mod_disco">>], + <<"name">> => <<"friendly-spirits">>, + <<"urls">> => [<<"spirit1@localhost">>, <<"spirit2@localhost">>]} ])), + %% Correct version, used as a prototype to make invalid versions +%% ?errf(T(<<"server_info">>, [#{<<"module">> => <<"all">>, <<"name">> => <<"abuse-address">>, +%% <<"urls">> => [<<"admin@example.com">>]}])), + %% Invalid name + ?errf(T(<<"server_info">>, [#{<<"module">> => <<"all">>, <<"name">> => 1, + <<"urls">> => [<<"admin@example.com">>]}])), + %% Mising name + ?errf(T(<<"server_info">>, [#{<<"module">> => <<"all">>, + <<"urls">> => [<<"admin@example.com">>]}])), + %% Invalid module + ?errf(T(<<"server_info">>, [#{<<"module">> => <<"roll">>, + <<"name">> => <<"abuse-address">>, + <<"urls">> => [<<"admin@example.com">>]}])), + %% Invalid module + ?errf(T(<<"server_info">>, [#{<<"module">> => [<<"meow_meow_meow">>], + <<"name">> => <<"abuse-address">>, + <<"urls">> => [<<"admin@example.com">>]}])), + %% Missing urls + ?errf(T(<<"server_info">>, [#{<<"module">> => <<"all">>, + <<"name">> => <<"abuse-address">>}])), + %% Missing module + ?errf(T(<<"server_info">>, [#{<<"name">> => <<"abuse-address">>, + <<"urls">> => [<<"admin@example.com">>]}])), + %% Invalid url + ?errf(T(<<"server_info">>, [#{<<"module">> => <<"all">>, + <<"name">> => <<"abuse-address">>, + <<"urls">> => [1]}])), + ?errf(T(<<"users_can_see_hidden_services">>, 1)), + ?errf(T(<<"users_can_see_hidden_services">>, <<"true">>)), + ?errf(T(<<"extra_domains">>, [<<"user@localhost">>])), + ?errf(T(<<"extra_domains">>, [1])), + ?errf(T(<<"extra_domains">>, <<"domains domains domains">>)). + +mod_extdisco(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_extdisco">> => Opts}} end, + Service = #{ + <<"type">> => <<"stun">>, + <<"host">> => <<"stun1">>, + <<"port">> => 3478, + <<"transport">> => <<"udp">>, + <<"username">> => <<"username">>, + <<"password">> => <<"password">>}, + Base = #{<<"service">> => [Service]}, + MBase = [{host, "stun1"}, + {password, "password"}, + {port, 3478}, + {transport, "udp"}, + {type, stun}, + {username, "username"}], + ?eqf(modopts(mod_extdisco, [MBase]), T(Base)), + %% Invalid service type + ?errf(T(Base#{<<"service">> => [Base#{<<"type">> => -1}]})), + ?errf(T(Base#{<<"service">> => [Base#{<<"type">> => ["stun"]}]})), + %% Invalid host + ?errf(T(Base#{<<"service">> => [Base#{<<"host">> => [1]}]})), + ?errf(T(Base#{<<"service">> => [Base#{<<"host">> => true}]})), + %% Invalid port + ?errf(T(Base#{<<"service">> => [Base#{<<"port">> => -1}]})), + ?errf(T(Base#{<<"service">> => [Base#{<<"port">> => 9999999}]})), + ?errf(T(Base#{<<"service">> => [Base#{<<"port">> => "port"}]})), + %% Invalid transport + ?errf(T(Base#{<<"service">> => [Base#{<<"transport">> => -1}]})), + ?errf(T(Base#{<<"service">> => [Base#{<<"transport">> => ""}]})), + %% Invalid username + ?errf(T(Base#{<<"service">> => [Base#{<<"username">> => -2}]})), + %% Invalid password + ?errf(T(Base#{<<"service">> => [Base#{<<"password">> => 1}]})), + ?errf(T(Base#{<<"service">> => [Base#{<<"password">> => [<<"test">>]}]})). + +mod_inbox(_Config) -> + T = fun(K, V) -> #{<<"modules">> => #{<<"mod_inbox">> => #{K => V}}} end, + ?eqf(modopts(mod_inbox, [{reset_markers, [displayed, received, acknowledged]}]), + T(<<"reset_markers">>, [<<"displayed">>, <<"received">>, <<"acknowledged">>])), + ?eqf(modopts(mod_inbox, [{reset_markers, []}]), + T(<<"reset_markers">>, [])), + ?eqf(modopts(mod_inbox, [{groupchat, [muc, muclight]}]), + T(<<"groupchat">>, [<<"muc">>, <<"muclight">>])), + ?eqf(modopts(mod_inbox, [{groupchat, []}]), + T(<<"groupchat">>, [])), + ?eqf(modopts(mod_inbox, [{aff_changes, true}]), + T(<<"aff_changes">>, true)), + ?eqf(modopts(mod_inbox, [{aff_changes, false}]), + T(<<"aff_changes">>, false)), + ?eqf(modopts(mod_inbox, [{remove_on_kicked, true}]), + T(<<"remove_on_kicked">>, true)), + ?eqf(modopts(mod_inbox, [{remove_on_kicked, false}]), + T(<<"remove_on_kicked">>, false)), + ?eqf(modopts(mod_inbox, [{backend, rdbms}]), + T(<<"backend">>, <<"rdbms">>)), + ?errf(T(<<"reset_markers">>, 1)), + ?errf(T(<<"reset_markers">>, <<"test">>)), + ?errf(T(<<"reset_markers">>, [<<"test">>])), + ?errf(T(<<"groupchat">>, [<<"test">>])), + ?errf(T(<<"groupchat">>, <<"test">>)), + ?errf(T(<<"groupchat">>, true)), + ?errf(T(<<"aff_changes">>, 1)), + ?errf(T(<<"aff_changes">>, <<"true">>)), + ?errf(T(<<"remove_on_kicked">>, 1)), + ?errf(T(<<"remove_on_kicked">>, <<"true">>)), + ?errf(T(<<"backend">>, <<"devnull">>)), + check_iqdisc(mod_inbox). + +mod_global_distrib(_Config) -> + ConnOpts = [ + {advertised_endpoints, [{"172.16.0.1", 5555}, {"localhost", 80}, {"example.com", 5555}]}, + {connections_per_endpoint, 22}, + {disabled_gc_interval, 60}, + {endpoint_refresh_interval, 120}, + {endpoint_refresh_interval_when_empty, 5}, + {endpoints, [{"172.16.0.2", 5555}, {"localhost", 80}, {"example.com", 5555}]}, + {tls_opts, [ + {cafile, "/dev/null"}, + {certfile, "/dev/null"}, + {ciphers, "TLS_AES_256_GCM_SHA384"}, + {dhfile, "/dev/null"} + ]} + ], + CacheOpts = [ {cache_missed, false}, {domain_lifetime_seconds, 60}, + {jid_lifetime_seconds, 30}, {max_jids, 9999} ], + BounceOpts = [ {max_retries, 3}, {resend_after_ms, 300} ], + RedisOpts = [ {expire_after, 120}, {pool, global_distrib}, {refresh_after, 60} ], + TTOpts = #{ + <<"certfile">> => <<"/dev/null">>, + <<"cacertfile">> => <<"/dev/null">>, + <<"dhfile">> => <<"/dev/null">>, + <<"ciphers">> => <<"TLS_AES_256_GCM_SHA384">> + }, + TConnOpts = #{ + <<"endpoints">> => [#{<<"host">> => <<"172.16.0.2">>, <<"port">> => 5555}, + #{<<"host">> => <<"localhost">>, <<"port">> => 80}, + #{<<"host">> => <<"example.com">>, <<"port">> => 5555}], + <<"advertised_endpoints">> => + [#{<<"host">> => <<"172.16.0.1">>, <<"port">> => 5555}, + #{<<"host">> => <<"localhost">>, <<"port">> => 80}, + #{<<"host">> => <<"example.com">>, <<"port">> => 5555}], + <<"connections_per_endpoint">> => 22, + <<"disabled_gc_interval">> => 60, + <<"endpoint_refresh_interval">> => 120, + <<"endpoint_refresh_interval_when_empty">> => 5, + <<"tls">> => TTOpts + }, + TCacheOpts = #{ <<"cache_missed">> => false, + <<"domain_lifetime_seconds">> => 60, + <<"jid_lifetime_seconds">> => 30, + <<"max_jids">> => 9999 }, + TBounceOpts = #{ <<"resend_after_ms">> => 300, <<"max_retries">> => 3 }, + TRedisOpts = #{ <<"pool">> => <<"global_distrib">>, + <<"expire_after">> => 120, + <<"refresh_after">> => 60 }, + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_global_distrib">> => Opts}} end, + Base = #{ + <<"global_host">> => <<"example.com">>, + <<"local_host">> => <<"datacenter1.example.com">>, + <<"message_ttl">> => 42, + <<"hosts_refresh_interval">> => 100, + <<"connections">> => TConnOpts, + <<"cache">> => TCacheOpts, + <<"bounce">> => TBounceOpts, + <<"redis">> => TRedisOpts + }, + MBase = [ + {bounce, BounceOpts}, + {cache, CacheOpts}, + {connections, ConnOpts}, + {global_host, "example.com"}, + {hosts_refresh_interval, 100}, + {local_host, "datacenter1.example.com"}, + {message_ttl, 42}, + {redis, RedisOpts} + ], + ?eqf(modopts(mod_global_distrib, [ + {bounce, BounceOpts}, + {cache, CacheOpts}, + {connections, ConnOpts}, + {global_host, "example.com"}, + {hosts_refresh_interval, 100}, + {local_host, "datacenter1.example.com"}, + {message_ttl, 42}, + {redis, RedisOpts} + ]), T(Base)), + ?eqf(modopts(mod_global_distrib, + set_pl(connections, + set_pl(advertised_endpoints, false, ConnOpts), + MBase)), + T(Base#{<<"connections">> => TConnOpts#{ + <<"advertised_endpoints">> => false}})), + ?eqf(modopts(mod_global_distrib, + set_pl(connections, + set_pl(tls_opts, false, ConnOpts), + MBase)), + T(Base#{<<"connections">> => TConnOpts#{<<"tls">> => false}})), + ?eqf(modopts(mod_global_distrib, + set_pl(connections, + set_pl(tls_opts, false, ConnOpts), + MBase)), + T(Base#{<<"connections">> => TConnOpts#{<<"tls">> => false}})), + %% Connection opts + ?errf(T(Base#{<<"connections">> => TConnOpts#{ + <<"tls">> =>TTOpts#{<<"certfile">> => <<"/this/does/not/exist">>}}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{ + <<"tls">> =>TTOpts#{<<"dhfile">> => <<"/this/does/not/exist">>}}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{ + <<"tls">> =>TTOpts#{<<"cacertfile">> => <<"/this/does/not/exist">>}}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{ + <<"tls">> =>TTOpts#{<<"ciphers">> => 42}}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{ + <<"endpoints">> =>[#{<<"host">> => 234, <<"port">> => 5555}]}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{ + <<"advertised_endpoints">> =>[#{<<"host">> => 234, <<"port">> => 5555}]}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{ + <<"connections_per_endpoint">> => -1}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{ + <<"connections_per_endpoint">> => <<"kek">>}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{<<"connections_per_endpoint">> => -1}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{<<"disabled_gc_interval">> => -1}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{<<"endpoint_refresh_interval">> => -1}})), + ?errf(T(Base#{<<"connections">> => TConnOpts#{<<"endpoint_refresh_interval_when_empty">> => -1}})), + %% Redis Opts + ?errf(T(Base#{<<"redis">> => TRedisOpts#{<<"pool">> => -1}})), + ?errf(T(Base#{<<"redis">> => TRedisOpts#{<<"expire_after">> => -1}})), + ?errf(T(Base#{<<"redis">> => TRedisOpts#{<<"refresh_after">> => -1}})), + %% Cache Opts + ?errf(T(Base#{<<"cache">> => TCacheOpts#{<<"cache_missed">> => 1}})), + ?errf(T(Base#{<<"cache">> => TCacheOpts#{<<"domain_lifetime_seconds">> => -1}})), + ?errf(T(Base#{<<"cache">> => TCacheOpts#{<<"jid_lifetime_seconds">> => -1}})), + ?errf(T(Base#{<<"cache">> => TCacheOpts#{<<"max_jids">> => -1}})), + %% Bouncing Opts + ?errf(T(Base#{<<"bounce">> => TCacheOpts#{<<"resend_after_ms">> => -1}})), + ?errf(T(Base#{<<"bounce">> => TCacheOpts#{<<"max_retries">> => -1}})), + %% Global Opts + ?errf(T(Base#{<<"global_host">> => <<"example omm omm omm">>})), + ?errf(T(Base#{<<"global_host">> => 1})), + ?errf(T(Base#{<<"local_host">> => <<"example omm omm omm">>})), + ?errf(T(Base#{<<"local_host">> => 1})), + ?errf(T(Base#{<<"message_ttl">> => <<"kek">>})), + ?errf(T(Base#{<<"message_ttl">> => -1})), + ?errf(T(Base#{<<"hosts_refresh_interval">> => <<"kek">>})), + ?errf(T(Base#{<<"hosts_refresh_interval">> => -1})). + +mod_event_pusher(_Config) -> + T = fun(Backend, Opt, Value) -> + Opts = #{<<"backend">> => #{ + atom_to_binary(Backend, utf8) => + #{atom_to_binary(Opt, utf8) => Value}}}, + #{<<"modules">> => #{<<"mod_event_pusher">> => Opts}} + end, + M = fun(Backend, Opt, Value) -> + Backends = [{Backend, [{Opt, Value}]}], + modopts(mod_event_pusher, [{backends, Backends}]) + end, + [?eqf(M(Backend, Opt, Mim), T(Backend, Opt, Toml)) + || {Backend, Opt, Toml, Mim} <- mod_event_pusher_valid_opts()], + [begin + FullToml = T(Backend, Opt, Toml), + try + ?errf(FullToml) + catch Class:Error:Stacktrace -> + erlang:raise(Class, #{what => passed_but_shouldnt, + backend => Backend, opt => Opt, + full_toml => FullToml, + toml => Toml, reason => Error}, + Stacktrace) + end + end + || {Backend, Opt, Toml} <- mod_event_pusher_ivalid_opts()], + ok. + +mod_event_pusher_valid_opts() -> + %% {BackendName, BackendOptionName, TomlValue, MongooseValue} + [%% sns + {sns, access_key_id, <<"AKIAIOSFODNN7EXAMPLE">>,"AKIAIOSFODNN7EXAMPLE"}, + {sns, secret_access_key, <<"KEY">>, "KEY"}, + {sns, region, <<"eu-west-1">>, "eu-west-1"}, + {sns, account_id, <<"123456789012">>, "123456789012"}, + {sns, sns_host, <<"sns.eu-west-1.amazonaws.com">>, "sns.eu-west-1.amazonaws.com"}, + {sns, muc_host, <<"conference.HOST">>, "conference.HOST"}, + {sns, plugin_module, <<"mod_event_pusher_sns_defaults">>, mod_event_pusher_sns_defaults}, + {sns, presence_updates_topic, <<"user_presence_updated">>, "user_presence_updated"}, + {sns, pm_messages_topic, <<"user_message_sent">>, "user_message_sent"}, + {sns, muc_messages_topic, <<"user_messagegroup_sent">>, "user_messagegroup_sent"}, + {sns, pool_size, 100, 100}, + {sns, publish_retry_count, 2, 2}, + {sns, publish_retry_time_ms, 50, 50}, + %% push + {push, plugin_module, <<"mod_event_pusher_push_plugin_defaults">>, mod_event_pusher_push_plugin_defaults}, + {push, virtual_pubsub_hosts, [<<"host1">>, <<"host2">>], ["host1", "host2"]}, + {push, backend, <<"mnesia">>, mnesia}, + {push, wpool, #{<<"workers">> => 200}, [{workers, 200}]}, + %% http + {http, pool_name, <<"http_pool">>, http_pool}, + {http, path, <<"/notifications">>, "/notifications"}, + {http, callback_module, <<"mod_event_pusher_http_defaults">>, mod_event_pusher_http_defaults}, + %% rabbit + {rabbit, presence_exchange, + #{<<"name">> => <<"presence">>, <<"type">> => <<"topic">>}, + [{name, <<"presence">>}, {type, <<"topic">>}]}, + {rabbit, chat_msg_exchange, + #{<<"name">> => <<"chat_msg">>, + <<"recv_topic">> => <<"chat_msg_recv">>, + <<"sent_topic">> => <<"chat_msg_sent">>}, + [{name, <<"chat_msg">>}, + {recv_topic, <<"chat_msg_recv">>}, + {sent_topic, <<"chat_msg_sent">>}]}, + {rabbit, groupchat_msg_exchange, + #{<<"name">> => <<"groupchat_msg">>, + <<"sent_topic">> => <<"groupchat_msg_sent">>, + <<"recv_topic">> => <<"groupchat_msg_recv">>}, + [{name, <<"groupchat_msg">>}, + {recv_topic, <<"groupchat_msg_recv">>}, + {sent_topic, <<"groupchat_msg_sent">>}]}]. + +mod_event_pusher_ivalid_opts() -> + %% {BackendName, BackendOptionName, TomlValue} + [%% sns + {sns, access_key_id, 1}, + {sns, secret_access_key, 1}, + {sns, region, 1}, + {sns, account_id, 1}, + {sns, sns_host, 1}, + {sns, muc_host, 1}, + {sns, muc_host, <<"kek kek">>}, + {sns, plugin_module, <<"wow_cool_but_missing">>}, + {sns, plugin_module, 1}, + {sns, presence_updates_topic, 1}, + {sns, pm_messages_topic, 1}, + {sns, muc_messages_topic, 1}, + {sns, pool_size, <<"1">>}, + {sns, publish_retry_count, <<"1">>}, + {sns, publish_retry_time_ms, <<"1">>}, + %% push + {push, plugin_module, <<"wow_cool_but_missing">>}, + {push, plugin_module, 1}, + {push, virtual_pubsub_hosts, [<<"host with whitespace">>]}, + {push, backend, <<"mnesiAD">>}, + {push, wpool, #{<<"workers">> => <<"500">>}}, + %% http + {http, pool_name, 1}, + {http, path, 1}, + {http, callback_module, <<"wow_cool_but_missing">>}, + {http, callback_module, 1}, + %% rabbit + {rabbit, presence_exchange, + #{<<"namesss">> => <<"presence">>, <<"type">> => <<"topic">>}}, + {rabbit, presence_exchange, + #{<<"name">> => <<"presence">>, <<"typessss">> => <<"topic">>}}, + {rabbit, presence_exchange, + #{<<"name">> => 1, <<"type">> => <<"topic">>}}, + {rabbit, presence_exchange, + #{<<"name">> => <<"presence">>, <<"type">> => 1}} + ] ++ make_chat_exchange_invalid_opts(chat_msg_exchange) + ++ make_chat_exchange_invalid_opts(groupchat_msg_exchange). + +make_chat_exchange_invalid_opts(Exchange) -> + [{rabbit, Exchange, Val} || Val <- chat_exchange_invalid_opts()]. + +chat_exchange_invalid_opts() -> + [#{<<"names4">> => <<"chat_msg">>, + <<"recv_topic">> => <<"chat_msg_recv">>, + <<"sent_topic">> => <<"chat_msg_sent">>}, + #{<<"name">> => <<"chat_msg">>, + <<"recv_topicsss">> => <<"chat_msg_recv">>, + <<"sent_topic">> => <<"chat_msg_sent">>}, + #{<<"name">> => <<"chat_msg">>, + <<"recv_topics33">> => <<"chat_msg_recv">>, + <<"sent_topic">> => <<"chat_msg_sent">>}, + #{<<"name">> => <<"chat_msg">>, + <<"recv_topic">> => <<"chat_msg_recv">>, + <<"sent_topics444">> => <<"chat_msg_sent">>}, + #{<<"name">> => 1, + <<"recv_topic">> => <<"chat_msg_recv">>, + <<"sent_topic">> => <<"chat_msg_sent">>}, + #{<<"name">> => <<"chat_msg">>, + <<"recv_topic">> => 1, + <<"sent_topic">> => <<"chat_msg_sent">>}, + #{<<"name">> => <<"chat_msg">>, + <<"recv_topic">> => <<"chat_msg_recv">>, + <<"sent_topic">> => 1}]. + +mod_http_upload(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_http_upload">> => Opts}} end, + S3 = #{ + <<"bucket_url">> => <<"https://s3-eu-west-1.amazonaws.com/mybucket">>, + <<"add_acl">> => true, + <<"region">> => <<"antarctica-1">>, + <<"access_key_id">> => <<"PLEASE">>, + <<"secret_access_key">> => <<"ILOVEU">> + }, + Base = #{ + <<"iqdisc">> => #{<<"type">> => <<"one_queue">>}, + <<"host">> => <<"upload.@HOST@">>, + <<"backend">> => <<"s3">>, + <<"expiration_time">> => 666, + <<"token_bytes">> => 32, + <<"max_file_size">> => 42, + <<"s3">> => S3 + }, + MS3 = [{access_key_id, "PLEASE"}, + {add_acl, true}, + {bucket_url, "https://s3-eu-west-1.amazonaws.com/mybucket"}, + {region, "antarctica-1"}, + {secret_access_key, "ILOVEU"}], + MBase = [{backend, s3}, + {expiration_time, 666}, + {host, "upload.@HOST@"}, + {iqdisc, one_queue}, + {max_file_size, 42}, + {s3, MS3}, + {token_bytes, 32}], + ?eqf(modopts(mod_http_upload, MBase), T(Base)), + ?errf(T(Base#{<<"host">> => -1})), + ?errf(T(Base#{<<"host">> => <<" f g ">>})), + ?errf(T(Base#{<<"backend">> => <<"dev_null_as_a_service">>})), + ?errf(T(Base#{<<"expiration_time">> => <<>>})), + ?errf(T(Base#{<<"expiration_time">> => -1})), + ?errf(T(Base#{<<"token_bytes">> => -1})), + ?errf(T(Base#{<<"max_file_size">> => -1})), + ?errf(T(Base#{<<"s3">> => S3#{<<"access_key_id">> => -1}})), + ?errf(T(Base#{<<"s3">> => S3#{<<"add_acl">> => -1}})), + ?errf(T(Base#{<<"s3">> => S3#{<<"bucket_url">> => -1}})), + ?errf(T(Base#{<<"s3">> => S3#{<<"region">> => -1}})), + ?errf(T(Base#{<<"s3">> => S3#{<<"secret_access_key">> => -1}})), + check_iqdisc(mod_http_upload). + +mod_jingle_sip(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_jingle_sip">> => Opts}} end, + Base = #{ + <<"proxy_host">> => <<"proxxxy">>, + <<"proxy_port">> => 5600, + <<"listen_port">> => 5601, + <<"local_host">> => <<"localhost">>, + <<"sdp_origin">> => <<"127.0.0.1">> + }, + MBase = [ + {listen_port, 5601}, + {local_host, "localhost"}, + {proxy_host, "proxxxy"}, + {proxy_port, 5600}, + {sdp_origin, "127.0.0.1"} + ], + ?eqf(modopts(mod_jingle_sip, MBase), T(Base)), + ?errf(T(Base#{<<"proxy_host">> => -1})), + ?errf(T(Base#{<<"proxy_host">> => <<"test test">>})), + ?errf(T(Base#{<<"listen_port">> => -1})), + ?errf(T(Base#{<<"listen_port">> => 10000000})), + ?errf(T(Base#{<<"proxy_port">> => -1})), + ?errf(T(Base#{<<"proxy_port">> => 10000000})), + ?errf(T(Base#{<<"local_host">> => 1})), + ?errf(T(Base#{<<"local_host">> => <<"ok ok">>})), + ?errf(T(Base#{<<"sdp_origin">> => <<"aaaaaaaaa">>})). + +mod_keystore(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_keystore">> => Opts}} end, + Keys = [#{<<"name">> => <<"access_secret">>, + <<"type">> => <<"ram">>}, + #{<<"name">> => <<"access_psk">>, + <<"type">> => <<"file">>, + <<"path">> => <<"priv/access_psk">>}, + #{<<"name">> => <<"provision_psk">>, + <<"type">> => <<"file">>, + <<"path">> => <<"priv/provision_psk">>}], + NotExistingKey = #{<<"name">> => <<"provision_psk">>, + <<"type">> => <<"file">>, + <<"path">> => <<"does/not/esit">>}, + InvalidTypeKey = #{<<"name">> => <<"provision_psk">>, + <<"type">> => <<"some_cooool_type">>}, + MKeys = [{access_secret, ram}, + {access_psk, {file, "priv/access_psk"}}, + {provision_psk, {file, "priv/provision_psk"}}], + Base = #{<<"keys">> => Keys, <<"ram_key_size">> => 10000}, + MBase = [{keys, MKeys}, {ram_key_size, 10000}], + ?eqf(modopts(mod_keystore, MBase), T(Base)), + ?errf(T(Base#{<<"keys">> => [NotExistingKey]})), + ?errf(T(Base#{<<"keys">> => [InvalidTypeKey]})). + +mod_last(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_last">> => Opts}} end, + Base = #{<<"iqdisc">> => #{<<"type">> => <<"one_queue">>}, + <<"backend">> => <<"riak">>, + <<"riak">> => #{<<"bucket_type">> => <<"test">>}}, + MBase = [{backend, riak}, + {bucket_type, <<"test">>}, + {iqdisc, one_queue}], + ?eqf(modopts(mod_last, MBase), T(Base)), + ?errf(T(Base#{<<"backend">> => <<"riak_is_the_best">>})), + ?errf(T(Base#{<<"riak">> => #{<<"bucket_type">> => 1}})), + check_iqdisc(mod_last). + +mod_mam_meta(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_mam_meta">> => Opts}} end, + %% You can define options in mod_mam and they would work. + %% + %% We _could_ validate that `host' option does not exist in the PM + %% section, but it would just make everything harder. + %% + %% Same with `no_stanzaid_element' for PM. + Common = #{<<"archive_chat_markers">> => true, + <<"archive_groupchats">> => true, + <<"async_writer">> => true, + <<"async_writer_rdbms_pool">> => <<"poop">>, + <<"backend">> => <<"riak">>, + <<"cache_users">> => true, + <<"db_jid_format">> => <<"mam_jid_rfc">>, % module + <<"db_message_format">> => <<"mam_message_xml">>, % module + <<"default_result_limit">> => 50, + <<"extra_lookup_params">> => <<"mod_mam_utils">>, + <<"host">> => <<"conf.localhost">>, + <<"flush_interval">> => 500, + <<"full_text_search">> => true, + <<"max_batch_size">> => 50, + <<"max_result_limit">> => 50, + <<"message_retraction">> => true, + <<"rdbms_message_format">> => <<"simple">>, + <<"simple">> => true, + <<"no_stanzaid_element">> => true, + <<"is_archivable_message">> => <<"mod_mam_utils">>, + <<"user_prefs_store">> => false}, %% or rdbms. but not true + MCommon = [{archive_chat_markers, true}, + {archive_groupchats, true}, + {async_writer, true}, + {async_writer_rdbms_pool, poop}, + {backend, riak}, + {cache_users, true}, + {db_jid_format, mam_jid_rfc}, + {db_message_format, mam_message_xml}, + {default_result_limit, 50}, + {extra_lookup_params, mod_mam_utils}, + {flush_interval, 500}, + {full_text_search, true}, + %% While applied just for MUC, it could be specified as root option + %% Still, it probably should've been called muc_host from the + %% beginning + {host, "conf.localhost"}, + {is_archivable_message, mod_mam_utils}, + {max_batch_size, 50}, + {max_result_limit, 50}, + {message_retraction, true}, + {no_stanzaid_element, true}, + {rdbms_message_format, simple}, + {simple, true}, + {user_prefs_store, false}], + ensure_sorted(MCommon), + Riak = #{<<"bucket_type">> => <<"mam_yz">>, <<"search_index">> => <<"mam">>}, + MRiak = [{bucket_type, <<"mam_yz">>}, {search_index, <<"mam">>}], + Base = Common#{ + <<"pm">> => Common, + <<"muc">> => Common, + %% Separate section for riak. We don't need it in pm or in muc, + %% because there is no separate riak module for muc. + <<"riak">> => Riak}, + MBase0 = [{muc, MCommon}, + {pm, MCommon}] + ++ MRiak, %% This one is flatten into mim opts + MBase = pl_merge(MCommon, MBase0), + %% It's not easy to test riak options with check_one_opts function, + %% so skip it. + %% We also skip single muc/pm options on this step. + KeysForOneOpts = binaries_to_atoms(maps:keys(Common)), + TPM = fun(Map) -> T(#{<<"pm">> => Map}) end, + TMuc = fun(Map) -> T(#{<<"muc">> => Map}) end, + TB = fun(Map) -> T(maps:merge(Base, Map)) end, + %% by default parser adds pm and muc keys set to false + Hook = fun(Mim, Toml) -> {lists:sort([{pm, false}, {muc, false}|Mim]), Toml} end, + run_multi( + %% Test configurations with one option only + check_one_opts(mod_mam_meta, MBase, Base, T, KeysForOneOpts, Hook) ++ [ + ?_eqf(modopts(mod_mam_meta, [{muc, false}, {pm, false}]), T(#{})), + ?_eqf(modopts(mod_mam_meta, MBase), T(Base)), + %% Second format for user_prefs_store + ?_eqf(modopts(mod_mam_meta, pl_merge(MBase, [{user_prefs_store, rdbms}])), + T(Base#{<<"user_prefs_store">> => <<"rdbms">>})) + ] + ++ mam_failing_cases(T) + ++ mam_failing_cases(TPM) + ++ mam_failing_cases(TMuc) + ++ mam_failing_cases(TB) + ++ mam_failing_riak_cases(T) + ). + +mam_failing_cases(T) -> + [?_errf(T(#{<<"pm">> => false})), % should be a section + ?_errf(T(#{<<"muc">> => false})), % should be a section + ?_errf(T(#{<<"archive_chat_markers">> => 1})), + ?_errf(T(#{<<"archive_groupchats">> => 1})), + ?_errf(T(#{<<"async_writer">> => 1})), + ?_errf(T(#{<<"async_writer_rdbms_pool">> => 1})), + ?_errf(T(#{<<"backend">> => 1})), + ?_errf(T(#{<<"cache_users">> => 1})), + ?_errf(T(#{<<"db_jid_format">> => 1})), + ?_errf(T(#{<<"db_jid_format">> => <<"does_not_exist_mod">>})), + ?_errf(T(#{<<"db_message_format">> => 1})), + ?_errf(T(#{<<"db_message_format">> => <<"does_not_exist_mod">>})), + ?_errf(T(#{<<"default_result_limit">> => <<"meow">>})), + ?_errf(T(#{<<"default_result_limit">> => -20})), + ?_errf(T(#{<<"extra_lookup_params">> => -1})), + ?_errf(T(#{<<"extra_lookup_params">> => <<"aaaaaaa_not_exist">>})), + ?_errf(T(#{<<"host">> => <<"meow meow">>})), + ?_errf(T(#{<<"flush_interval">> => <<"meow">>})), + ?_errf(T(#{<<"flush_interval">> => -20})), + ?_errf(T(#{<<"full_text_search">> => -1})), + ?_errf(T(#{<<"max_batch_size">> => -1})), + ?_errf(T(#{<<"max_result_limit">> => -1})), + ?_errf(T(#{<<"message_retraction">> => -1})), + ?_errf(T(#{<<"rdbms_message_format">> => <<"verysimple">>})), + ?_errf(T(#{<<"simple">> => 1})), + ?_errf(T(#{<<"no_stanzaid_element">> => -1})), + ?_errf(T(#{<<"is_archivable_message">> => -1})), + ?_errf(T(#{<<"user_prefs_store">> => 1}))]. + +mam_failing_riak_cases(T) -> + [?_errf(T(#{<<"riak">> => #{<<"bucket_type">> => 1}})), + ?_errf(T(#{<<"riak">> => #{<<"search_index">> => 1}}))]. + +mod_muc(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_muc">> => Opts}} end, + Base = #{ + <<"access">> => <<"all">>, + <<"access_admin">> => <<"none">>, + <<"access_create">> => <<"all">>, + <<"access_persistent">> => <<"all">>, + <<"backend">> => <<"mnesia">>, + <<"hibernated_room_check_interval">> => <<"infinity">>, + <<"hibernated_room_timeout">> => <<"infinity">>, + <<"history_size">> => 20, + <<"host">> => <<"conference.@HOST@">>, + <<"http_auth_pool">> => <<"none">>, + <<"load_permanent_rooms_at_startup">> => false, + <<"max_room_desc">> => <<"infinity">>, + <<"max_room_id">> => <<"infinity">>, + <<"max_room_name">> => <<"infinity">>, + <<"max_user_conferences">> => 0, + <<"max_users">> => 200, + <<"max_users_admin_threshold">> => 5, + <<"min_message_interval">> => 0, + <<"min_presence_interval">> => 0, + <<"room_shaper">> => <<"none">>, + <<"user_message_shaper">> => <<"none">>, + <<"user_presence_shaper">> => <<"none">> + }, + MBase = [{access,all}, + {access_admin,none}, + {access_create,all}, + {access_persistent,all}, + {backend,mnesia}, + {hibernated_room_check_interval,infinity}, + {hibernated_room_timeout,infinity}, + {history_size,20}, + {host,"conference.@HOST@"}, + {http_auth_pool,none}, + {load_permanent_rooms_at_startup,false}, + {max_room_desc,infinity}, + {max_room_id,infinity}, + {max_room_name,infinity}, + {max_user_conferences,0}, + {max_users,200}, + {max_users_admin_threshold,5}, + {min_message_interval,0}, + {min_presence_interval,0}, + {room_shaper,none}, + {user_message_shaper,none}, + {user_presence_shaper,none}], + ensure_sorted(MBase), + run_multi( + %% Test configurations with one option only + check_one_opts(mod_muc, MBase, Base, T) ++ [ + ?_eqf(modopts(mod_muc, MBase), T(Base)), + ?_eqf(modopts(mod_muc, [{default_room_options,[]}]), + T(#{<<"default_room">> => #{}})) + ] ++ some_muc_opts_cases(T) + ++ some_room_opts_cases(T) + ++ bad_muc_opts_cases(T) + ++ bad_room_opts_cases(T) + ). + +some_muc_opts_cases(T) -> + [some_muc_opts_case(T, K, Toml, Mim) || {K, Toml, Mim} <- some_muc_opts()]. + +some_muc_opts_case(T, K, Toml, Mim) -> + ?_eqf(modopts(mod_muc, [{K, Mim}]), T(#{a2b(K) => Toml})). + +bad_muc_opts_cases(T) -> + [bad_muc_opts_case(T, K, Toml) || {K, Toml} <- bad_muc_opts()]. + +bad_muc_opts_case(T, K, Toml) -> + ?_errf(T(#{a2b(K) => Toml})). + +some_room_opts_cases(T) -> + [some_room_opts_case(T, K, Toml, Mim) || {K, Toml, Mim} <- some_room_opts()]. + +some_room_opts_case(T, K, Toml, Mim) -> + ?_eqf(modopts(mod_muc, [{default_room_options, [{K, Mim}]}]), + T(#{<<"default_room">> => #{a2b(K) => Toml}})). + +bad_room_opts_cases(T) -> + [bad_room_opts_case(T, K, Toml) || {K, Toml} <- bad_room_opts()]. + +bad_room_opts_case(T, K, Toml) -> + ?_errf(T(#{<<"default_room">> => #{a2b(K) => Toml}})). + +some_muc_opts() -> + %% name toml mim + [{hibernated_room_check_interval, 1, 1}, + {hibernated_room_timeout, 1, 1}, + {history_size, 0, 0}, + {host, <<"good">>, "good"}, + {http_auth_pool, <<"deadpool">>, deadpool}, + {load_permanent_rooms_at_startup, true, true}, + {max_room_desc, 10, 10}, + {max_room_id, 10, 10}, + {max_room_name, 10, 10}, + {max_user_conferences, 10, 10}, + {max_users, 10, 10}, + {max_users_admin_threshold, 10, 10}, + {min_message_interval, 10, 10}, + {min_presence_interval, 10, 10}, + {room_shaper, <<"good">>, good}, + {user_message_shaper, <<"good">>, good}, + {user_presence_shaper, <<"good">>, good}]. + +bad_muc_opts() -> + %% name toml + [{access, 1}, + {access_admin, 1}, + {access_create, 1}, + {access_persistent, 1}, + {backend, 1}, + {backend, <<"meowmoew">>}, + {hibernated_room_check_interval, -1}, + {hibernated_room_timeout, -1}, + {history_size, -1}, + {host, 1}, + {host, <<"bad bad bad">>}, + {http_auth_pool, 1}, + {load_permanent_rooms_at_startup, 1}, + {max_room_desc, -1}, + {max_room_id, -1}, + {max_room_name, -1}, + {max_user_conferences, -1}, + {max_users, -1}, + {max_users_admin_threshold, -1}, + {min_message_interval, -1}, + {min_presence_interval, -1}, + {room_shaper, 1}, + {user_message_shaper, 1}, + {user_presence_shaper, 1}]. + +some_room_opts() -> + [{title, <<"Test">>, <<"Test">>}, + {description, <<"Test">>, <<"Test">>}, + {allow_change_subj, true, true}, + {allow_query_users, true, true}, + {allow_private_messages, true, true}, + {allow_visitor_status, true, true}, + {allow_visitor_nickchange, true, true}, + {public, true, true}, + {public_list, true, true}, + {moderated, true, true}, + {members_by_default, true, true}, + {members_only, true, true}, + {allow_user_invites, true, true}, + {allow_multiple_sessions, true, true}, + {password_protected, true, true}, + {password, <<"secret">>, <<"secret">>}, + {anonymous, true, true}, + {max_users, 10, 10}, + {logging, true, true}, + {maygetmemberlist, [<<"moderator">>, <<"user">>], [moderator, user]}, + {affiliations, [#{<<"user">> => <<"Alice">>, <<"server">> => <<"home">>, + <<"resource">> => <<>>, <<"affiliation">> => <<"member">>}], + [{{<<"Alice">>, <<"home">>, <<>>}, member}]}, + {subject, <<"Fight">>, <<"Fight">>}, + {subject_author, <<"meow">>, <<"meow">>} + ]. + +bad_room_opts() -> + [{title, 1}, + {description, 1}, + {allow_change_subj, 1}, + {allow_query_users, 1}, + {allow_private_messages, 1}, + {allow_visitor_status, 1}, + {allow_visitor_nickchange, 1}, + {public, 1}, + {public_list, 1}, + {persistent, 1}, + {moderated, 1}, + {members_by_default, 1}, + {members_only, 1}, + {allow_user_invites, 1}, + {allow_multiple_sessions, 1}, + {password_protected, 1}, + {password, 1}, + {anonymous, 1}, + {max_users, -1}, + {logging, 1}, + {maygetmemberlist, 1}, + {maygetmemberlist, [1]}, + {maygetmemberlist, #{}}, + {subject, 1}, + {subject_author, 1}, + {affiliations, [1]}, + {affiliations, 1}, + {affiliations, [#{<<"user">> => <<"Alice">>, <<"server">> => <<"home home">>, + <<"resource">> => <<>>, <<"affiliation">> => <<"member">>}]}, + {affiliations, [#{<<"user">> => 1, <<"server">> => <<"home">>, + <<"resource">> => <<>>, <<"affiliation">> => <<"member">>}]}, + {affiliations, [#{<<"user">> => <<"Alice">>, <<"server">> => 1, + <<"resource">> => <<>>, <<"affiliation">> => <<"member">>}]}, + {affiliations, [#{<<"user">> => <<"Alice">>, <<"server">> => <<"home">>, + <<"resource">> => 1, <<"affiliation">> => <<"member">>}]}, + {affiliations, [#{<<"user">> => <<"Alice">>, <<"server">> => <<"home">>, + <<"resource">> => <<>>, <<"affiliation">> => 1}]}, + {affiliations, [#{<<"server">> => <<"home">>, + <<"resource">> => <<>>, <<"affiliation">> => <<"member">>}]}, + {affiliations, [#{<<"user">> => <<"Alice">>, + <<"resource">> => <<>>, <<"affiliation">> => <<"member">>}]}, + {affiliations, [#{<<"user">> => <<"Alice">>, <<"server">> => <<"home">>, + <<"affiliation">> => <<"member">>}]} %% Resource required + ]. + +mod_muc_log(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_muc_log">> => Opts}} end, + run_multi( + generic_opts_cases(mod_muc_log, T, mod_muc_log_opts()) ++ + generic_renamed_opts_cases(mod_muc_log, T, mod_muc_log_renamed_opts()) ++ + generic_bad_opts_cases(T, mod_muc_log_bad_opts()) + ). + +mod_muc_log_renamed_opts() -> + %% toml-name mim-name toml mim + [{css_file, cssfile, <<"path/to/css_file">>, <<"path/to/css_file">>}, + {css_file, cssfile, false, false}]. + +mod_muc_log_opts() -> + %% name toml mim + [{outdir, <<"www/muc">>, "www/muc"}, + {access_log, <<"muc_admin">>, muc_admin}, + {dirtype, <<"subdirs">>, subdirs}, + {dirtype, <<"plain">>, plain}, + {file_format, <<"html">>, html}, + {file_format, <<"plaintext">>, plaintext}, + {timezone, <<"local">>, local}, + {timezone, <<"universal">>, universal}, + {spam_prevention, true, true}, + {top_link, #{<<"target">> => <<"https://mongooseim.readthedocs.io/en/latest/modules/mod_muc_log/">>, + <<"text">> => <<"docs">>}, + {"https://mongooseim.readthedocs.io/en/latest/modules/mod_muc_log/", "docs"}}]. + +mod_muc_log_bad_opts() -> + %% toml-name toml + [{outdir, 1}, + {outdir, <<"does/not/exist">>}, + {access_log, 1}, + {dirtype, <<"subways">>}, + {file_format, <<"haskelencodedlove">>}, + {timezone, <<"galactive">>}, + {spam_prevention, 69}, + {top_link, #{<<"target">> => 1, <<"text">> => <<"docs">>}}, + {top_link, #{<<"target">> => <<"https://mongooseim.readthedocs.io/">>, <<"text">> => <<>>}} + ]. + +mod_muc_light(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_muc_light">> => Opts}} end, + run_multi( + generic_opts_cases(mod_muc_light, T, mod_muc_light_opts()) ++ + generic_bad_opts_cases(T, mod_muc_light_bad_opts()) + ). + +mod_muc_light_opts() -> + [{host, <<"muclight.@HOST@">>, "muclight.@HOST@"}, + {backend, <<"mnesia">>, mnesia}, + {equal_occupants, true, true}, + {legacy_mode, true, true}, + {rooms_per_user, 1, 1}, + {rooms_per_user, <<"infinity">>, infinity}, + {blocking, true, true}, + {all_can_configure, true, true}, + {all_can_invite, true, true}, + {max_occupants, 1, 1}, + {max_occupants, <<"infinity">>, infinity}, + {rooms_per_page, 1, 1}, + {rooms_per_page, <<"infinity">>, infinity}, + {rooms_in_rosters, true, true}, + {config_schema, [ + #{<<"field">> => <<"roomname">>, <<"value">> => <<"My Room">>}, + #{<<"field">> => <<"subject">>, <<"value">> => <<"Hi">>}, + #{<<"field">> => <<"priority">>, <<"value">> => 0, + <<"internal_key">> => <<"priority">>, <<"type">> => <<"integer">>} + ], + [{"roomname", "My Room"}, {"subject", "Hi"}, + {"priority", 0, priority, integer}]} + ]. + +mod_muc_light_bad_opts() -> + [{host, 1}, + {host, <<"test test">>}, + {equal_occupants, 1}, + {equal_occupants, #{}}, + {legacy_mode, 1}, + {rooms_per_user, true}, + {blocking, 1}, + {all_can_configure, 1}, + {all_can_invite, 1}, + {max_occupants, true}, + {rooms_per_page, false}, + {rooms_in_rosters, 1}, + {config_schema, [ #{<<"field">> => 1, <<"value">> => <<"ok">>} ]}, + {config_schema, [ #{<<"field">> => <<"subject">>} ]}, + {config_schema, [ #{<<"field">> => <<"priority">>, <<"value">> => 0, + <<"internal_key">> => <<"priority">>, <<"type">> => <<"bad_integer">>} ]} + ]. + +mod_offline(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_offline">> => Opts}} end, + Base = #{<<"access_max_user_messages">> => <<"max_user_offline_messages">>, + <<"backend">> => <<"riak">>, + <<"riak">> => #{<<"bucket_type">> => <<"test">>}}, + MBase = [{access_max_user_messages, max_user_offline_messages}, + {backend, riak}, + {bucket_type, <<"test">>}], + ?eqf(modopts(mod_offline, MBase), T(Base)), + ?errf(T(Base#{<<"access_max_user_messages">> => 1})), + ?errf(T(Base#{<<"backend">> => <<"riak_is_the_best">>})), + ?errf(T(Base#{<<"riak">> => #{<<"bucket_type">> => 1}})). + +mod_ping(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_ping">> => Opts}} end, + Base = #{<<"iqdisc">> => #{<<"type">> => <<"no_queue">>}, + <<"ping_req_timeout">> => 32, + <<"send_pings">> => true, + <<"timeout_action">> => <<"none">>}, + MBase = [{iqdisc, no_queue}, + {ping_req_timeout, 32}, + {send_pings, true}, + {timeout_action, none}], + ensure_sorted(MBase), + ?eqf(modopts(mod_ping, MBase), T(Base)), + ?errf(T(Base#{<<"send_pings">> => 1})), + ?errf(T(Base#{<<"ping_interval">> => -1})), + ?errf(T(Base#{<<"timeout_action">> => 1})), + ?errf(T(Base#{<<"timeout_action">> => <<"kill_them_all">>})), + ?errf(T(Base#{<<"ping_req_timeout">> => -1})), + ?errf(T(Base#{<<"ping_req_timeout">> => <<"32">>})), + check_iqdisc(mod_ping). + +mod_privacy(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_privacy">> => Opts}} end, + Riak = #{<<"defaults_bucket_type">> => <<"privacy_defaults">>, + <<"names_bucket_type">> => <<"privacy_lists_names">>, + <<"bucket_type">> => <<"privacy_defaults">>}, + Base = #{<<"backend">> => <<"mnesia">>, + <<"riak">> => Riak}, + MBase = [{backend, mnesia}, + %% Riak opts + {defaults_bucket_type, <<"privacy_defaults">>}, + {names_bucket_type, <<"privacy_lists_names">>}, + {bucket_type, <<"privacy_defaults">>}], + ?eqf(modopts(mod_privacy, lists:sort(MBase)), T(Base)), + ?errf(T(Base#{<<"backend">> => 1})), + ?errf(T(Base#{<<"backend">> => <<"mongoddt">>})), + ?errf(T(Base#{<<"riak">> => #{<<"defaults_bucket_type">> => 1}})), + ?errf(T(Base#{<<"riak">> => #{<<"names_bucket_type">> => 1}})), + ?errf(T(Base#{<<"riak">> => #{<<"bucket_type">> => 1}})). + +mod_private(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_private">> => Opts}} end, + Riak = #{<<"bucket_type">> => <<"private_stuff">>}, + Base = #{<<"backend">> => <<"riak">>, + <<"riak">> => Riak}, + MBase = [{backend, riak}, + %% Riak opts + {bucket_type, <<"private_stuff">>}], + ?eqf(modopts(mod_private, lists:sort(MBase)), T(Base)), + ?errf(T(Base#{<<"backend">> => 1})), + ?errf(T(Base#{<<"backend">> => <<"mongoddt">>})), + ?errf(T(Base#{<<"riak">> => #{<<"bucket_type">> => 1}})), + check_iqdisc(mod_private). + +mod_pubsub(_Config) -> + %% TODO default_node_config + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_pubsub">> => Opts}} end, + Base = #{<<"backend">> => <<"mnesia">>, + <<"host">> => <<"pubsub.@HOST@">>, + <<"access_createnode">> => <<"all">>, + <<"max_items_node">> => 10, + <<"max_subscriptions_node">> => 10, + <<"nodetree">> => <<"tree">>, + <<"ignore_pep_from_offline">> => true, + <<"last_item_cache">> => false, + <<"plugins">> => [<<"flat">>], + <<"pep_mapping">> => [#{<<"namespace">> => <<"urn:xmpp:microblog:0">>, + <<"node">> => <<"mb">>}], + <<"item_publisher">> => false, + <<"sync_broadcast">> => true}, + MBase = [{backend, mnesia}, + {access_createnode, all}, + {host, "pubsub.@HOST@"}, + {max_items_node, 10}, + {max_subscriptions_node, 10}, + {nodetree, <<"tree">>}, + {ignore_pep_from_offline, true}, + {last_item_cache, false}, + {plugins, [<<"flat">>]}, + {pep_mapping, [{"urn:xmpp:microblog:0", "mb"}]}, + {item_publisher, false}, + {sync_broadcast, true}], + ?eqf(modopts(mod_pubsub, lists:sort(MBase)), T(Base)), + ?eqf(modopts(mod_pubsub, [{last_item_cache, mnesia}]), + T(#{<<"last_item_cache">> => <<"mnesia">>})), + ?eqf(modopts(mod_pubsub, []), %% The option is undefined, i.e. parser just removes it + T(#{<<"max_subscriptions_node">> => <<"infinity">>})), + run_multi( + good_default_node_config_opts(T) ++ + bad_default_node_config_opts(T) ++ + generic_bad_opts_cases(T, mod_pubsub_bad_opts())), + check_iqdisc(mod_pubsub). + +good_default_node_config_opts(T) -> + [good_default_node_config_opt(T, K, Toml, Mim) + || {K, Toml, Mim} <- default_node_config_opts()]. + +good_default_node_config_opt(T, K, Toml, Mim) -> + MBase = [{default_node_config, [{K, Mim}]}], + Base = #{<<"default_node_config">> => #{a2b(K) => Toml}}, + ?_eqf(modopts(mod_pubsub, MBase), T(Base)). + +bad_default_node_config_opts(T) -> + [bad_default_node_config_opt(T, K, Toml) + || {K, Toml} <- default_node_config_bad_opts()]. + +bad_default_node_config_opt(T, K, Toml) -> + Base = #{<<"default_node_config">> => #{a2b(K) => Toml}}, + ?_errf(T(Base)). + +default_node_config_opts() -> + [{access_model, <<"open">>, open}, + {deliver_notifications, true, true}, + {deliver_payloads, true, true}, + {max_items, 10, 10}, + {max_payload_size, 10000, 10000}, + {node_type, <<"leaf">>, leaf}, + {notification_type, <<"headline">>, headline}, + {notify_config, false, false}, + {notify_delete, false, false}, + {notify_retract, false, false}, + {persist_items, true, true}, + {presence_based_delivery, true, true}, + {publish_model, <<"open">>, open}, + {purge_offline, false, false}, + {roster_groups_allowed, [<<"friends">>], [<<"friends">>]}, + {send_last_published_item, <<"on_sub_and_presence">>, on_sub_and_presence}, + {subscribe, true, true}]. + +default_node_config_bad_opts() -> + [{access_model, 1}, + {deliver_notifications, 1}, + {deliver_payloads, 1}, + {max_items, -1}, + {max_payload_size, -1}, + {node_type, 1}, + {notification_type, 1}, + {notify_config, 1}, + {notify_delete, 1}, + {notify_retract, 1}, + {persist_items, 1}, + {presence_based_delivery, 1}, + {publish_model, 1}, + {purge_offline, 1}, + {roster_groups_allowed, [1]}, + {roster_groups_allowed, 1}, + {send_last_published_item, 1}, + {subscribe, 1}]. + +mod_pubsub_bad_opts() -> + [{backend, 1}, + {access_createnode, 1}, + {host, 1}, + {host, <<"aaa aaa">>}, + {max_items_node, -1}, + {max_subscriptions_node, -1}, + {nodetree, -1}, + {nodetree, <<"oops">>}, + {ignore_pep_from_offline, 1}, + {last_item_cache, 1}, + {plugins, [<<"fat">>]}, + {pep_mapping, [#{<<"namespace">> => 1, <<"node">> => <<"mb">>}]}, + {pep_mapping, [#{<<"namespace">> => <<"urn:xmpp:microblog:0">>, <<"node">> => 1}]}, + {item_publisher, 1}, + {sync_broadcast, 1}]. + +mod_push_service_mongoosepush(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_push_service_mongoosepush">> => Opts}} end, + Base = #{<<"pool_name">> => <<"test_pool">>, + <<"api_version">> => <<"v3">>, + <<"max_http_connections">> => 100}, + MBase = [{pool_name, test_pool}, + {api_version, "v3"}, + {max_http_connections, 100}], + ?eqf(modopts(mod_push_service_mongoosepush, lists:sort(MBase)), T(Base)), + ?errf(T(Base#{<<"pool_name">> => 1})), + ?errf(T(Base#{<<"api_version">> => 1})), + ?errf(T(Base#{<<"max_http_connections">> => -1})), + ok. + +mod_register(_Config) -> + ?eqf(modopts(mod_register, + [{access,register}, + {ip_access, [{allow,"127.0.0.0/8"}, + {deny,"0.0.0.0"}]} + ]), + ip_access_register(<<"0.0.0.0">>)), + ?eqf(modopts(mod_register, + [{access,register}, + {ip_access, [{allow,"127.0.0.0/8"}, + {deny,"0.0.0.4"}]} + ]), + ip_access_register(<<"0.0.0.4">>)), + ?eqf(modopts(mod_register, + [{access,register}, + {ip_access, [{allow,"127.0.0.0/8"}, + {deny,"::1"}]} + ]), + ip_access_register(<<"::1">>)), + ?eqf(modopts(mod_register, + [{access,register}, + {ip_access, [{allow,"127.0.0.0/8"}, + {deny,"::1/128"}]} + ]), + ip_access_register(<<"::1/128">>)), + ?errf(invalid_ip_access_register()), + ?errf(invalid_ip_access_register_ipv6()), + ?errf(ip_access_register(<<"hello">>)), + ?errf(ip_access_register(<<"0.d">>)), + ?eqf(modopts(mod_register, + [{welcome_message, {"Subject", "Body"}}]), + welcome_message()), + %% List of jids + ?eqf(modopts(mod_register, + [{registration_watchers, + [<<"alice@bob">>, <<"ilovemongoose@help">>]}]), + registration_watchers([<<"alice@bob">>, <<"ilovemongoose@help">>])), + ?errf(registration_watchers([<<"alice@bob">>, <<"jids@have@no@feelings!">>])), + %% non-negative integer + ?eqf(modopts(mod_register, [{password_strength, 42}]), + password_strength_register(42)), + ?errf(password_strength_register(<<"42">>)), + ?errf(password_strength_register(<<"strong">>)), + ?errf(password_strength_register(-150)), + ?errf(welcome_message(<<"Subject">>, 1)), + ?errf(welcome_message(1, <<"Body">>)), + check_iqdisc(mod_register). + +welcome_message() -> + welcome_message(<<"Subject">>, <<"Body">>). + +welcome_message(S, B) -> + Opts = #{<<"welcome_message">> => #{<<"subject">> => S, <<"body">> => B}}, + #{<<"modules">> => #{<<"mod_register">> => Opts}}. + +password_strength_register(Strength) -> + Opts = #{<<"password_strength">> => Strength}, + #{<<"modules">> => #{<<"mod_register">> => Opts}}. + +ip_access_register(Ip) -> + Opts = #{<<"access">> => <<"register">>, + <<"ip_access">> => + [#{<<"address">> => <<"127.0.0.0/8">>, <<"policy">> => <<"allow">>}, + #{<<"address">> => Ip, <<"policy">> => <<"deny">>}]}, + #{<<"modules">> => #{<<"mod_register">> => Opts}}. + +invalid_ip_access_register() -> + Opts = #{<<"access">> => <<"register">>, + <<"ip_access">> => + [#{<<"address">> => <<"127.0.0.0/8">>, <<"policy">> => <<"allawww">>}, + #{<<"address">> => <<"8.8.8.8">>, <<"policy">> => <<"denyh">>}]}, + #{<<"modules">> => #{<<"mod_register">> => Opts}}. + +invalid_ip_access_register_ipv6() -> + Opts = #{<<"access">> => <<"register">>, + <<"ip_access">> => + [#{<<"address">> => <<"::1/129">>, <<"policy">> => <<"allow">>}]}, + #{<<"modules">> => #{<<"mod_register">> => Opts}}. + +registration_watchers(JidBins) -> + Opts = #{<<"registration_watchers">> => JidBins}, + #{<<"modules">> => #{<<"mod_register">> => Opts}}. + +mod_revproxy(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_revproxy">> => Opts}} end, + R = fun(Route) -> T(#{<<"routes">> => [Route]}) end, + Base = #{<<"routes">> => [R1 = #{ + <<"host">> => <<"www.erlang-solutions.com">>, + <<"path">> => <<"/admin">>, + <<"method">> => <<"_">>, + <<"upstream">> => <<"https://www.erlang-solutions.com/">> + }, #{ + <<"host">> => <<"example.com">>, + <<"path">> => <<"/test">>, + <<"upstream">> => <<"https://example.com/">> + }]}, + MBase = [{routes, [{"www.erlang-solutions.com", "/admin", "_", + "https://www.erlang-solutions.com/"}, + {"example.com", "/test", "https://example.com/"}]}], + run_multi([ + ?_eqf(modopts(mod_revproxy, MBase), T(Base)), + ?_errf(R(R1#{<<"host">> => 1})), + ?_errf(R(R1#{<<"path">> => 1})), + ?_errf(R(R1#{<<"method">> => 1})), + ?_errf(R(R1#{<<"upstream">> => 1})), + ?_errf(R(R1#{<<"upstream">> => <<>>})), + ?_errf(R(R1#{<<"host">> => <<>>})) + ]). + +mod_roster(_Config) -> + Riak = #{<<"bucket_type">> => <<"rosters">>, + <<"version_bucket_type">> => <<"roster_versions">>}, + Base = #{<<"iqdisc">> => #{<<"type">> => <<"one_queue">>}, + <<"versioning">> => false, + <<"store_current_id">> => false, + <<"backend">> => <<"mnesia">>, + <<"riak">> => Riak}, + MBase = [{iqdisc, one_queue}, + {versioning, false}, + {store_current_id, false}, + {backend, mnesia}, + {bucket_type, <<"rosters">>}, + {version_bucket_type, <<"roster_versions">>}], + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_roster">> => Opts}} end, + run_multi([ + ?_eqf(modopts(mod_roster, lists:sort(MBase)), T(Base)), + ?_errf(T(#{<<"versioning">> => 1})), + ?_errf(T(#{<<"store_current_id">> => 1})), + ?_errf(T(#{<<"backend">> => 1})), + ?_errf(T(#{<<"backend">> => <<"iloveyou">>})), + ?_errf(T(#{<<"riak">> => #{<<"version_bucket_type">> => 1}})), + ?_errf(T(#{<<"riak">> => #{<<"bucket_type">> => 1}})) + ]), + check_iqdisc(mod_roster). + +mod_shared_roster_ldap(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_shared_roster_ldap">> => Opts}} end, + MBase = [{ldap_pool_tag, default}, + {ldap_base, "string"}, + {ldap_deref, never}, + %% Options: attributes + {ldap_groupattr, "cn"}, + {ldap_groupdesc, "default"}, + {ldap_userdesc, "cn"}, + {ldap_useruid, "cn"}, + {ldap_memberattr, "memberUid"}, + {ldap_memberattr_format, "%u"}, + {ldap_memberattr_format_re,""}, + %% Options: parameters + {ldap_auth_check, true}, + {ldap_user_cache_validity, 300}, + {ldap_group_cache_validity, 300}, + {ldap_user_cache_size, 300}, + {ldap_group_cache_size, 300}, + %% Options: LDAP filters + {ldap_rfilter, "test"}, + {ldap_gfilter, "test"}, + {ldap_ufilter, "test"}, + {ldap_filter, "test"} + ], + Base = #{ + <<"ldap_pool_tag">> => <<"default">>, + <<"ldap_base">> => <<"string">>, + <<"ldap_deref">> => <<"never">>, + %% Options: attributes + <<"ldap_groupattr">> => <<"cn">>, + <<"ldap_groupdesc">> => <<"default">>, + <<"ldap_userdesc">> => <<"cn">>, + <<"ldap_useruid">> => <<"cn">>, + <<"ldap_memberattr">> => <<"memberUid">>, + <<"ldap_memberattr_format">> => <<"%u">>, + <<"ldap_memberattr_format_re">> => <<"">>, + %% Options: parameters + <<"ldap_auth_check">> => true, + <<"ldap_user_cache_validity">> => 300, + <<"ldap_group_cache_validity">> => 300, + <<"ldap_user_cache_size">> => 300, + <<"ldap_group_cache_size">> => 300, + %% Options: LDAP filters + <<"ldap_rfilter">> => <<"test">>, + <<"ldap_gfilter">> => <<"test">>, + <<"ldap_ufilter">> => <<"test">>, + <<"ldap_filter">> => <<"test">> + }, + run_multi( + check_one_opts(mod_shared_roster_ldap, MBase, Base, T) ++ [ + ?_eqf(modopts(mod_shared_roster_ldap, lists:sort(MBase)), T(Base)), + ?_errf(T(#{<<"ldap_pool_tag">> => 1})), + ?_errf(T(#{<<"ldap_base">> => 1})), + ?_errf(T(#{<<"ldap_deref">> => 1})), + %% Options: attributes + ?_errf(T(#{<<"ldap_groupattr">> => 1})), + ?_errf(T(#{<<"ldap_groupdesc">> => 1})), + ?_errf(T(#{<<"ldap_userdesc">> => 1})), + ?_errf(T(#{<<"ldap_useruid">> => 1})), + ?_errf(T(#{<<"ldap_memberattr">> => 1})), + ?_errf(T(#{<<"ldap_memberattr_format">> => 1})), + ?_errf(T(#{<<"ldap_memberattr_format_re">> => 1})), + %% Options: parameters + ?_errf(T(#{<<"ldap_auth_check">> => 1})), + ?_errf(T(#{<<"ldap_user_cache_validity">> => -1})), + ?_errf(T(#{<<"ldap_group_cache_validity">> => -1})), + ?_errf(T(#{<<"ldap_user_cache_size">> => -1})), + ?_errf(T(#{<<"ldap_group_cache_size">> => -1})), + %% Options: LDAP filters + ?_errf(T(#{<<"ldap_rfilter">> => 1})), + ?_errf(T(#{<<"ldap_gfilter">> => 1})), + ?_errf(T(#{<<"ldap_ufilter">> => 1})), + ?_errf(T(#{<<"ldap_filter">> => 1})) + ]). + +mod_sic(_Config) -> + check_iqdisc(mod_sic). + +mod_stream_management(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_stream_management">> => Opts}} end, + Base = #{ + <<"buffer_max">> => 100, + <<"ack_freq">> => 1, + <<"resume_timeout">> => 600, + <<"stale_h">> => #{<<"enabled">> => true, + <<"repeat_after">> => 1800, + <<"geriatric">> => 3600} + }, + MBase = [ + {buffer_max, 100}, + {ack_freq, 1}, + {resume_timeout, 600}, + {stale_h, [{enabled, true}, + {stale_h_geriatric, 3600}, + {stale_h_repeat_after, 1800}]} + ], + ?eqf(modopts(mod_stream_management, lists:sort(MBase)), T(Base)), + ?eqf(modopts(mod_stream_management, [{buffer_max, no_buffer}]), + T(#{<<"buffer_max">> => <<"no_buffer">>})), + ?errf(T(#{<<"buffer_max">> => -1})), + ?errf(T(#{<<"ack_freq">> => -1})), + ?errf(T(#{<<"resume_timeout">> => -1})), + ?errf(T(#{<<"stale_h">> => #{<<"enabled">> => <<"true">>}})), + ?errf(T(#{<<"stale_h">> => #{<<"enabled">> => 1}})), + ?errf(T(#{<<"stale_h">> => #{<<"repeat_after">> => -1}})), + ?errf(T(#{<<"stale_h">> => #{<<"geriatric">> => -1}})), + ok. + +mod_time(_Config) -> + check_iqdisc(mod_time). + +mod_vcard(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_vcard">> => Opts}} end, + MBase = [{iqdisc, one_queue}, + {host, "vjud.@HOST@"}, + {search, true}, + {backend, mnesia}, + {matches, infinity}, + %% ldap + {ldap_pool_tag, default}, + {ldap_base, "ou=Users,dc=ejd,dc=com"}, + {ldap_deref, never}, + {ldap_uids, [{"mail", "%u@mail.example.org"}, "name"]}, + {ldap_filter, "(&(objectClass=shadowAccount)(memberOf=Jabber Users))"}, + %% MIM accepts {"FAMILY", "%s", ["sn", "cn"]} form too + {ldap_vcard_map, [{<<"FAMILY">>, <<"%s">>, [<<"sn">>]}]}, %% iolists + {ldap_search_fields, [{<<"Full Name">>, <<"cn">>}]}, %% pair of iolists + {ldap_search_reported, [{<<"Full Name">>, <<"FN">>}]}, %% iolists + {ldap_search_operator, 'or'}, + {ldap_binary_search_fields, [<<"PHOTO">>]}, + %% riak + {bucket_type, <<"vcard">>}, + {search_index, <<"vcard">>} + ], + Riak = #{<<"bucket_type">> => <<"vcard">>, + <<"search_index">> => <<"vcard">>}, + Base = #{ + <<"iqdisc">> => #{<<"type">> => <<"one_queue">>}, + <<"host">> => <<"vjud.@HOST@">>, + <<"search">> => true, + <<"backend">> => <<"mnesia">>, + <<"matches">> => <<"infinity">>, + %% ldap + <<"ldap_pool_tag">> => <<"default">>, + <<"ldap_base">> => <<"ou=Users,dc=ejd,dc=com">>, + <<"ldap_deref">> => <<"never">>, + <<"ldap_uids">> => [#{<<"attr">> => <<"mail">>, + <<"format">> => <<"%u@mail.example.org">>}, + #{<<"attr">> => <<"name">>}], + <<"ldap_filter">> => <<"(&(objectClass=shadowAccount)(memberOf=Jabber Users))">>, + <<"ldap_vcard_map">> => [#{<<"vcard_field">> => <<"FAMILY">>, + <<"ldap_pattern">> => <<"%s">>, + <<"ldap_field">> => <<"sn">>}], + <<"ldap_search_fields">> => [#{<<"search_field">> => <<"Full Name">>, <<"ldap_field">> => <<"cn">>}], + <<"ldap_search_reported">> => [#{<<"search_field">> => <<"Full Name">>, <<"vcard_field">> => <<"FN">>}], + <<"ldap_search_operator">> => <<"or">>, % atom + <<"ldap_binary_search_fields">> => [<<"PHOTO">>], + <<"riak">> => Riak + }, + run_multi(check_one_opts_with_same_field_name(mod_vcard, MBase, Base, T) + ++ [ ?_eqf(modopts(mod_vcard, lists:sort(MBase)), T(Base)), + ?_eqf(modopts(mod_vcard, [{matches, 1}]), T(#{<<"matches">> => 1})) ] + ++ generic_bad_opts_cases(T, mod_vcard_bad_opts())), + check_iqdisc(mod_vcard). + +mod_vcard_bad_opts() -> + M = #{<<"vcard_field">> => <<"FAMILY">>, + <<"ldap_pattern">> => <<"%s">>, + <<"ldap_field">> => <<"sn">>}, + [{host, 1}, + {host, <<"test test">>}, + {search, 1}, + {backend, 1}, + {backend, <<"mememesia">>}, + {matches, -1}, + {ldap_pool_tag, -1}, + {ldap_base, -1}, + {ldap_deref, <<"nevernever">>}, + {ldap_deref, -1}, + {ldap_uids, -1}, + {ldap_uids, [#{}]}, + {ldap_uids, [#{<<"attr">> => 1, <<"format">> => <<"ok">>}]}, + {ldap_uids, [#{<<"attr">> => <<"ok">>, <<"format">> => 1}]}, + {ldap_uids, [#{<<"format">> => <<"ok">>}]}, + {ldap_filter, 1}, + {ldap_vcard_map, [M#{<<"vcard_field">> => 1}]}, + {ldap_vcard_map, [M#{<<"ldap_pattern">> => 1}]}, + {ldap_vcard_map, [M#{<<"ldap_pattern">> => 1}]}, + {ldap_search_fields, [#{<<"search_field">> => 1, <<"ldap_field">> => <<"cn">>}]}, + {ldap_search_fields, [#{<<"search_field">> => <<"Full Name">>, <<"ldap_field">> => 1}]}, + {ldap_search_reported, [#{<<"search_field">> => 1, <<"vcard_field">> => <<"FN">>}]}, + {ldap_search_reported, [#{<<"search_field">> => <<"Full Name">>, <<"vcard_field">> => 1}]}, + {ldap_search_operator, <<"more">>}, + {ldap_binary_search_fields, [1]}, + {ldap_binary_search_fields, 1}, + {riak, #{<<"bucket_type">> => 1}}, + {riak, #{<<"search_index">> => 1}}]. + +mod_version(_Config) -> + T = fun(Opts) -> #{<<"modules">> => #{<<"mod_version">> => Opts}} end, + ?eqf(modopts(mod_version, [{os_info, false}]), T(#{<<"os_info">> => false})), + ?errf(T(#{<<"os_info">> => 1})), + check_iqdisc(mod_version). + +%% Services + +service_admin_extra(_Config) -> + T = fun(Opts) -> #{<<"services">> => #{<<"service_admin_extra">> => Opts}} end, + ?eq(servopts(service_admin_extra, [{submods, [node]}]), + parse(T(#{<<"submods">> => [<<"node">>]}))), + ?err(parse(T(#{<<"submods">> => 1}))), + ?err(parse(T(#{<<"submods">> => [1]}))), + ?err(parse(T(#{<<"submods">> => [<<"nodejshaha">>]}))), + ok. + +service_mongoose_system_metrics(_Config) -> + M = service_mongoose_system_metrics, + T = fun(Opts) -> #{<<"services">> => #{<<"service_mongoose_system_metrics">> => Opts}} end, + ?eq(servopts(M, [{initial_report, 5000}]), + parse(T(#{<<"initial_report">> => 5000}))), + ?eq(servopts(M, [{periodic_report, 5000}]), + parse(T(#{<<"periodic_report">> => 5000}))), + ?eq(servopts(M, [{tracking_id, "UA-123456789"}]), + parse(T(#{<<"tracking_id">> => <<"UA-123456789">>}))), + %% error cases + ?err(parse(T(#{<<"initial_report">> => <<"forever">>}))), + ?err(parse(T(#{<<"periodic_report">> => <<"forever">>}))), + ?err(parse(T(#{<<"initial_report">> => -1}))), + ?err(parse(T(#{<<"periodic_report">> => -1}))), + ?err(parse(T(#{<<"tracking_id">> => 666}))), + ok. + +%% Helpers for module tests + +iqdisc({queues, Workers}) -> #{<<"type">> => <<"queues">>, <<"workers">> => Workers}; +iqdisc(Atom) -> #{<<"type">> => atom_to_binary(Atom, utf8)}. + +iq_disc_generic(Module, Value) -> + Opts = #{<<"iqdisc">> => Value}, + #{<<"modules">> => #{atom_to_binary(Module, utf8) => Opts}}. + +check_iqdisc(Module) -> + ?eqf(modopts(Module, [{iqdisc, {queues, 10}}]), + iq_disc_generic(Module, iqdisc({queues, 10}))), + ?eqf(modopts(Module, [{iqdisc, parallel}]), + iq_disc_generic(Module, iqdisc(parallel))), + ?errf(iq_disc_generic(Module, iqdisc(bad_haha))). + +modopts(Mod, Opts) -> + [#local_config{key = {modules, ?HOST}, value = [{Mod, Opts}]}]. + +servopts(Mod, Opts) -> + [#local_config{key = services, value = [{Mod, Opts}]}]. + +%% helpers for 'listen' tests + +listener_config(Mod, Opts) -> + [#local_config{key = listen, + value = [{{5222, {0, 0, 0, 0}, tcp}, Mod, Opts}]}]. + +parse_http_handler(Type, Opts) -> + parse_listener(<<"http">>, #{<<"handlers">> => + #{Type => + [Opts#{<<"host">> => <<"localhost">>, + <<"path">> => <<"/api">>}] + }}). + +parse_listener(Type, Opts) -> + parse(#{<<"listen">> => #{Type => [Opts#{<<"port">> => 5222}]}}). + +%% helpers for 'auth' tests + +auth_ldap(Opts) -> + auth_config(<<"ldap">>, Opts). + +auth_config(Method, Opts) -> + #{<<"auth">> => #{Method => Opts}}. + +%% helpers for 'pool' tests + +pool_config(Pool) -> + [#local_config{key = outgoing_pools, value = [Pool]}]. + +parse_pool(Type, Tag, Opts) -> + parse(#{<<"outgoing_pools">> => #{Type => #{Tag => Opts}}}). + +parse_pool_conn(Type, Opts) -> + parse(#{<<"outgoing_pools">> => #{Type => #{<<"default">> => #{<<"connection">> => Opts}}}}). + +rdbms_opts() -> + #{<<"driver">> => <<"pgsql">>, + <<"host">> => <<"localhost">>, + <<"database">> => <<"db">>, + <<"username">> => <<"dbuser">>, + <<"password">> => <<"secret">>}. + +%% helpers for 'host_config' tests + +eq_host_config(Result, Config) -> + ConfigFunctions = parse(Config), % check for all hosts + compare_config(Result, lists:flatmap(fun(F) -> F(?HOST) end, ConfigFunctions)), + compare_config(Result, parse_host_config(Config)). % Check for a single host + +eq_host_or_global(ResultF, Config) -> + compare_config(ResultF(global), parse(Config)), % check for the 'global' host + compare_config(ResultF(?HOST), parse_host_config(Config)). % check for a single host + +err_host_config(Config) -> + ?err(parse(Config)), %% XXX Apply me + ?err(parse_host_config(Config)). + +parse_host_config(Config) -> + parse(#{<<"host_config">> => [Config#{<<"host">> => ?HOST}]}). + +%% helpers for 'equivalence' tests + +compare_config(C1, C2) -> + compare_unordered_lists(C1, C2, fun handle_config_option/2). + +filter_config(#config{key = required_files}) -> + false; % not supported yet in TOML +filter_config(_) -> true. + +handle_config_option(#config{key = K1, value = V1}, + #config{key = K2, value = V2}) -> + ?eq(K1, K2), + compare_values(K1, V1, V2); +handle_config_option(#local_config{key = K1, value = V1}, + #local_config{key = K2, value = V2}) -> + ?eq(K1, K2), + compare_values(K1, V1, V2); +handle_config_option(Opt1, Opt2) -> + ?eq(Opt1, Opt2). + +compare_values(listen, V1, V2) -> + compare_unordered_lists(V1, V2, fun handle_listener/2); +compare_values({auth_opts, _}, V1, V2) -> + compare_unordered_lists(V1, V2, fun handle_auth_opt/2); +compare_values(outgoing_pools, V1, V2) -> + compare_unordered_lists(V1, V2, fun handle_conn_pool/2); +compare_values({modules, _}, V1, V2) -> + compare_unordered_lists(V1, V2, fun handle_modules/2); +compare_values({services, _}, V1, V2) -> + compare_unordered_lists(V1, V2, fun handle_item_with_opts/2); +compare_values({auth_method, _}, V1, V2) when is_atom(V1) -> + ?eq([V1], V2); +compare_values({s2s_addr, _}, {_, _, _, _} = IP1, IP2) -> + ?eq(inet:ntoa(IP1), IP2); +compare_values(s2s_dns_options, V1, V2) -> + compare_unordered_lists(V1, V2); +compare_values(services, V1, V2) -> + MetricsOpts1 = proplists:get_value(service_mongoose_system_metrics, V1), + MetricsOpts2 = proplists:get_value(service_mongoose_system_metrics, V2), + compare_unordered_lists(MetricsOpts1, MetricsOpts2); +compare_values(K, V1, V2) -> + ?eq({K, V1}, {K, V2}). + +handle_listener({P1, M1, O1}, {P2, M2, O2}) -> + ?eq(P1, P2), + ?eq(M1, M2), + compare_unordered_lists(O1, O2, fun handle_listener_option/2). + +handle_listener_option({modules, M1}, {modules, M2}) -> + compare_unordered_lists(M1, M2, fun handle_listener_module/2); +handle_listener_option({transport_options, O1}, {transport_options, O2}) -> + compare_unordered_lists(O1, O2); +handle_listener_option(V1, V2) -> ?eq(V1, V2). + +handle_listener_module({H1, P1, M1}, M2) -> + handle_listener_module({H1, P1, M1, []}, M2); +handle_listener_module({H1, P1, M1, O1}, {H2, P2, M2, O2}) -> + ?eq(H1, H2), + ?eq(P1, P2), + ?eq(M1, M2), + compare_listener_module_options(M1, O1, O2). + +compare_listener_module_options(mod_websockets, L1, L2) -> + E1 = proplists:get_value(ejabberd_service, L1, []), + E2 = proplists:get_value(ejabberd_service, L2, []), + T1 = proplists:delete(ejabberd_service, L1), + T2 = proplists:delete(ejabberd_service, L2), + compare_unordered_lists(E1, E2), + compare_unordered_lists(T1, T2); +compare_listener_module_options(_, O1, O2) -> + ?eq(O1, O2). + +handle_auth_opt({cyrsasl_external, M}, {cyrsasl_external, [M]}) -> ok; +handle_auth_opt(V1, V2) -> ?eq(V1, V2). + +handle_item_with_opts({M1, O1}, {M2, O2}) -> + ?eq(M1, M2), + compare_unordered_lists(O1, O2). + +handle_conn_pool({Type1, Scope1, Tag1, POpts1, COpts1}, + {Type2, Scope2, Tag2, POpts2, COpts2}) -> + ?eq(Type1, Type2), + ?eq(Scope1, Scope2), + ?eq(Tag1, Tag2), + compare_unordered_lists(POpts1, POpts2), + compare_unordered_lists(COpts1, COpts2, fun handle_conn_opt/2). + +handle_conn_opt({server, {D1, H1, DB1, U1, P1, O1}}, + {server, {D2, H2, DB2, U2, P2, O2}}) -> + ?eq(D1, D2), + ?eq(H1, H2), + ?eq(DB1, DB2), + ?eq(U1, U2), + ?eq(P1, P2), + compare_unordered_lists(O1, O2, fun handle_db_server_opt/2); +handle_conn_opt(V1, V2) -> ?eq(V1, V2). + +handle_db_server_opt({ssl_opts, O1}, {ssl_opts, O2}) -> + compare_unordered_lists(O1, O2); +handle_db_server_opt(V1, V2) -> ?eq(V1, V2). + +handle_modules({Name, Opts}, {Name2, Opts2}) -> + ?eq(Name, Name2), + compare_unordered_lists(Opts, Opts2, fun handle_module_options/2). + +handle_module_options({configs, [Configs1]}, {configs, [Configs2]}) -> + compare_unordered_lists(Configs1, Configs2, fun handle_module_options/2); +handle_module_options({Name, Opts}, {Name2, Opts2}) -> + ?eq(Name, Name2), + compare_unordered_lists(Opts, Opts2, fun handle_module_options/2); +handle_module_options(V1, V2) -> + ?eq(V1, V2). + +%% Generic assertions, use the 'F' handler for any custom cases +compare_unordered_lists(L1, L2) -> + compare_unordered_lists(L1, L2, fun(V1, V2) -> ?eq(V1, V2) end). + +compare_unordered_lists(L1, L2, F) -> + SL1 = lists:sort(L1), + SL2 = lists:sort(L2), + compare_ordered_lists(SL1, SL2, F). + +compare_ordered_lists([H1|T1], [H1|T2], F) -> + compare_ordered_lists(T1, T2, F); +compare_ordered_lists([H1|T1], [H2|T2], F) -> + try F(H1, H2) + catch C:R:S -> + ct:fail({C, R, S}) + end, + compare_ordered_lists(T1, T2, F); +compare_ordered_lists([], [], _) -> + ok. + +test_equivalence_between_files(Config, File1, File2) -> + CfgPath = ejabberd_helper:data(Config, File1), + State1 = mongoose_config_parser_cfg:parse_file(CfgPath), + Hosts1 = mongoose_config_parser:state_to_host_opts(State1), + Opts1 = mongoose_config_parser:state_to_opts(State1), + + TOMLPath = ejabberd_helper:data(Config, File2), + State2 = mongoose_config_parser_toml:parse_file(TOMLPath), + Hosts2 = mongoose_config_parser:state_to_host_opts(State2), + Opts2 = mongoose_config_parser:state_to_opts(State2), + ?eq(Hosts1, Hosts2), + compare_unordered_lists(lists:filter(fun filter_config/1, Opts1), Opts2, + fun handle_config_option/2). + +parse_with_host(Config) -> + [F] = parse(Config), + apply(F, [?HOST]). + +set_pl(K, V, List) -> + lists:keyreplace(K, 1, List, {K, V}). + +create_files(Config) -> + %% The files must exist for validation to pass + Root = small_path_helper:repo_dir(Config), + file:make_dir("priv"), + PrivkeyPath = filename:join(Root, "tools/ssl/mongooseim/privkey.pem"), + CertPath = filename:join(Root, "tools/ssl/mongooseim/cert.pem"), + CaPath = filename:join(Root, "tools/ssl/ca/cacert.pem"), + ok = file:write_file("priv/access_psk", ""), + ok = file:write_file("priv/provision_psk", ""), + ok = filelib:ensure_dir("www/muc/dummy"), + ensure_copied(CaPath, "priv/ca.pem"), + ensure_copied(CertPath, "priv/cert.pem"), + ensure_copied(PrivkeyPath, "priv/dc1.pem"). + +ensure_copied(From, To) -> + case file:copy(From, To) of + {ok,_} -> + ok; + Other -> + error(#{what => ensure_copied_failed, from => From, to => To, + reason => Other}) + end. + +pl_merge(L1, L2) -> + M1 = maps:from_list(L1), + M2 = maps:from_list(L2), + maps:to_list(maps:merge(M1, M2)). + +%% Runs check_one_opts, but only for fields, that present in both +%% MongooseIM and TOML config formats with the same name. +%% Helps to filter out riak fields automatically. +check_one_opts_with_same_field_name(M, MBase, Base, T) -> + KeysM = maps:keys(maps:from_list(MBase)), + KeysT = lists:map(fun b2a/1, maps:keys(Base)), + Keys = ordsets:intersection(ordsets:from_list(KeysT), + ordsets:from_list(KeysM)), + Hook = fun(A,B) -> {A,B} end, + check_one_opts(M, MBase, Base, T, Keys, Hook). + +check_one_opts(M, MBase, Base, T) -> + Keys = maps:keys(maps:from_list(MBase)), + Hook = fun(A,B) -> {A,B} end, + check_one_opts(M, MBase, Base, T, Keys, Hook). + +check_one_opts(M, MBase, Base, T, Keys, Hook) -> + [check_one_opts_key(M, K, MBase, Base, T, Hook) || K <- Keys]. + +check_one_opts_key(M, K, MBase, Base, T, Hook) when is_atom(M), is_atom(K) -> + BK = atom_to_binary(K, utf8), + MimValue = maps:get(K, maps:from_list(MBase)), + TomValue = maps:get(BK, Base), + Mim0 = [{K, MimValue}], + Toml0 = #{BK => TomValue}, + {Mim, Toml} = Hook(Mim0, Toml0), + ?_eqf(modopts(M, Mim), T(Toml)). + +binaries_to_atoms(Bins) -> + [binary_to_atom(B, utf8) || B <- Bins]. + +run_multi(Cases) -> + Results = [run_case(F) || {F,_} <- Cases], + case lists:all(fun(X) -> X =:= ok end, Results) of + true -> + ok; + false -> + Failed = [Zip || {Res,_}=Zip <- lists:zip(Results, Cases), Res =/= ok], + [ct:pal("Info: ~p~nResult: ~p~n", [Info, Res]) || {Res, Info} <- Failed], + ct:fail(#{what => run_multi_failed, failed_cases => length(Failed)}) + end. + +run_case(F) -> + try + F(), ok + catch Class:Reason:Stacktrace -> + {Class, Reason, Stacktrace} + end. + +ensure_sorted(List) -> + [ct:fail("Not sorted list ~p~nSorted order ~p~n", [List, lists:sort(List)]) + || lists:sort(List) =/= List]. + +a2b(X) -> atom_to_binary(X, utf8). +b2a(X) -> binary_to_atom(X, utf8). + + +generic_opts_cases(M, T, Opts) -> + [generic_opts_case(M, T, K, Toml, Mim) || {K, Toml, Mim} <- Opts]. + +generic_opts_case(M, T, K, Toml, Mim) -> + Info = #{key => K, toml => Toml, mim => Mim}, + info(Info, ?_eqf(modopts(M, [{K, Mim}]), T(#{a2b(K) => Toml}))). + +generic_renamed_opts_cases(M, T, Opts) -> + [generic_renamed_opts_case(M, T, TomlKey, MimKey, Toml, Mim) + || {TomlKey, MimKey, Toml, Mim} <- Opts]. + +generic_renamed_opts_case(M, T, TomlKey, MimKey, Toml, Mim) -> + ?_eqf(modopts(M, [{MimKey, Mim}]), T(#{a2b(TomlKey) => Toml})). + + +generic_bad_opts_cases(T, Opts) -> + [generic_bad_opts_case(T, K, Toml) || {K, Toml} <- Opts]. + +generic_bad_opts_case(T, K, Toml) -> + ?_errf(T(#{a2b(K) => Toml})). + +info(Info, {F, Extra}) -> + {F, maps:merge(Extra, Info)}. diff --git a/test/config_parser_SUITE_data/miscellaneous.cfg b/test/config_parser_SUITE_data/miscellaneous.cfg new file mode 100644 index 00000000000..cbed6ffbd67 --- /dev/null +++ b/test/config_parser_SUITE_data/miscellaneous.cfg @@ -0,0 +1,51 @@ +{hosts, ["localhost", + "anonymous.localhost" + ] }. + +{cowboy_server_name, "Apache"}. +{rdbms_server_type, mssql}. +override_global. +override_local. +override_acls. +{pgsql_users_number_estimate, true}. +{route_subdomains, s2s}. +{mongooseimctl_access_commands, [{local, ["join_cluster"], [{node, "mongooseim@prime"}]}]}. +{routing_modules, [mongoose_router_global, mongoose_router_localdomain]}. +{replaced_wait_timeout, 2000}. +{hide_service_name, true}. +{extauth_instances, 1}. +{anonymous_protocol, sasl_anon}. +{allow_multiple_connections, true}. +{auth_opts, [ + {ldap_pool_tag, default}, + {ldap_bind_pool_tag, bind}, + {ldap_base, "ou=Users,dc=esl,dc=com"}, + {ldap_uids, ["uid", {"uid2", "%u"}]}, + {ldap_filter, "(&(objectClass=shadowAccount)(memberOf=Jabber Users))"}, + {ldap_dn_filter, {"(&(name=%s)(owner=%D)(user=%u@%d))", ["sn"]}}, + {ldap_local_filter, {equal, {"accountStatus",["enabled"]}}}, + {ldap_deref, never}, + {extauth_program, "/usr/bin/authenticator"}, + {basic_auth, "admin:admin"}, + {jwt_secret, "secret123"}, + {jwt_algorithm, "RS256"}, + {jwt_username_key, user}, + {bucket_type, <<"user_bucket">>} +]}. + +{listen, + [ + { 5280, ejabberd_cowboy, [ + {transport_options, [{num_acceptors, 10}, {max_connections, 1024}]}, + {modules, [ + {"_", "/ws-xmpp", mod_websockets, [{ejabberd_service, [ + {access, all}, + {shaper_rule, fast}, + {password, "secret"}, + {max_fsm_queue, 1000}]} + ]} + ]} + ]}]}. +{services, [ + {service_mongoose_system_metrics, [report, {initial_report, 300000}, {periodic_report, 10800000}, {tracking_id, "UA-123456789"}]} +]}. diff --git a/test/config_parser_SUITE_data/miscellaneous.toml b/test/config_parser_SUITE_data/miscellaneous.toml new file mode 100644 index 00000000000..65023a80e18 --- /dev/null +++ b/test/config_parser_SUITE_data/miscellaneous.toml @@ -0,0 +1,77 @@ +[general] + hosts = [ + "localhost", + "anonymous.localhost" + ] + http_server_name = "Apache" + rdbms_server_type = "mssql" + override = ["local", "global", "acls"] + pgsql_users_number_estimate = true + route_subdomains = "s2s" + routing_modules = [ + "mongoose_router_global", + "mongoose_router_localdomain" + ] + replaced_wait_timeout = 2000 + hide_service_name = true + + [general.mongooseimctl_access_commands.local] + commands = ["join_cluster"] + argument_restrictions.node = "mongooseim@prime" + +[auth] + http.basic_auth = "admin:admin" + riak.bucket_type = "user_bucket" + + [auth.anonymous] + protocol = "sasl_anon" + allow_multiple_connections = true + + [auth.external] + program = "/usr/bin/authenticator" + instances = 1 + + [auth.jwt] + secret.value = "secret123" + algorithm = "RS256" + username_key = "user" + + [auth.ldap] + pool_tag = "default" + bind_pool_tag = "bind" + base = "ou=Users,dc=esl,dc=com" + filter = "(&(objectClass=shadowAccount)(memberOf=Jabber Users))" + dn_filter.filter = "(&(name=%s)(owner=%D)(user=%u@%d))" + dn_filter.attributes = ["sn"] + local_filter.operation = "equal" + local_filter.attribute = "accountStatus" + local_filter.values = ["enabled"] + deref = "never" + + [[auth.ldap.uids]] + attr = "uid" + + [[auth.ldap.uids]] + attr = "uid2" + format = "%u" + +[[listen.http]] + port = 5280 + transport.num_acceptors = 10 + transport.max_connections = 1024 + + [[listen.http.handlers.mod_websockets]] + host = "_" + path = "/ws-xmpp" + + [listen.http.handlers.mod_websockets.service] + access = "all" + shaper_rule = "fast" + password = "secret" + max_fsm_queue = 1000 + +[services.service_mongoose_system_metrics] + report = true + initial_report = 300_000 + periodic_report = 10_800_000 + tracking_id = "UA-123456789" diff --git a/test/config_parser_SUITE_data/modules.cfg b/test/config_parser_SUITE_data/modules.cfg new file mode 100644 index 00000000000..76f2d570cc8 --- /dev/null +++ b/test/config_parser_SUITE_data/modules.cfg @@ -0,0 +1,260 @@ +{hosts, [ + "localhost", + "dummy_host" +]}. +{modules, + [ + {mod_adhoc, [{iqdisc, one_queue}, {report_commands_node, true}]}, + {mod_auth_token, [{{validity_period, access}, {13, minutes}}, + {{validity_period, refresh}, {13, days}}]}, + {mod_bosh, [ + {inactivity, 20}, + {max_wait, infinity}, + {server_acks, true}, + {backend, mnesia}, + {maxpause, 120} + ]}, + {mod_caps, [{cache_size, 1000}, {cache_life_time, 86}]}, + {mod_carboncopy, [{iqdisc, no_queue}]}, + {mod_csi, [{buffer_max, 40}]}, + {mod_disco, [ + {iqdisc, one_queue}, + {extra_domains, [<<"some_domain">>, <<"another_domain">>]}, + {server_info, [{all, "abuse-address", ["admin@example.com"]}, {[mod_muc, mod_disco], "friendly-spirits", ["spirit1@localhost", "spirit2@localhost"]}]}, + {users_can_see_hidden_services, true} + ]}, + {mod_event_pusher, [ + {backends, [ + {sns, [ + {access_key_id, "AKIAIOSFODNN7EXAMPLE"}, + {secret_access_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}, + {region, "eu-west-1"}, + {account_id, "123456789012"}, + {sns_host, "sns.eu-west-1.amazonaws.com"}, + {muc_host, "conference.HOST"}, + {plugin_module, mod_event_pusher_sns_defaults}, + {presence_updates_topic, "user_presence_updated"}, + {pm_messages_topic, "user_message_sent"}, + {muc_messages_topic, "user_messagegroup_sent"}, + {pool_size, 100}, + {publish_retry_count, 2}, + {publish_retry_time_ms, 50} + ]}, + {push, [ + {backend, mnesia}, + {wpool, [{workers, 200}]}, + {plugin_module, mod_event_pusher_push_plugin_defaults}, + {virtual_pubsub_hosts, ["host1", "host2"]} + ]}, + {http, [ + {pool_name, http_pool}, + {path, "/notifications"}, + {callback_module, mod_event_pusher_http_defaults} + ]}, + {rabbit, [ + {presence_exchange, [{name, <<"presence">>}, + {type, <<"topic">>}]}, + {chat_msg_exchange, [{name, <<"chat_msg">>}, + {sent_topic, <<"chat_msg_sent">>}, + {recv_topic, <<"chat_msg_recv">>}]}, + {groupchat_msg_exchange, [{name, <<"groupchat_msg">>}, + {sent_topic, <<"groupchat_msg_sent">>}, + {recv_topic, <<"groupchat_msg_recv">>}]} + ]} + ]} + ]}, + {mod_extdisco, [ + [{host, "stun1"}, + {password, "password"}, + {port, 3478}, + {transport, "udp"}, + {type, stun}, + {username, "username"}], + [{host, "stun2"}, + {password, "password"}, + {port, 2222}, + {transport, "tcp"}, + {type, stun}, + {username, "username"}], + [{host, "192.168.0.1"}, + {type, turn}] + ]}, + {mod_http_upload, [ + {host, "upload.@HOST@"}, + {backend, s3}, + {expiration_time, 120}, + {s3, [ + {bucket_url, "https://s3-eu-west-1.amazonaws.com/mybucket"}, + {region, "eu-west-1"}, + {add_acl, true}, + {access_key_id, "AKIAIOSFODNN7EXAMPLE"}, + {secret_access_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"} + ]} + ]}, + {mod_inbox, [{backend, rdbms}, + {reset_markers, [displayed]}, + {aff_changes, true}, + {remove_on_kicked, true}, + {groupchat, [muclight]} + ]}, + {mod_global_distrib, [ + {global_host, "example.com"}, + {local_host, "datacenter1.example.com"}, + {connections, [ + {endpoints, [{"172.16.0.2", 5555}]}, + {advertised_endpoints, [{"172.16.0.2", 5555}]}, + {connections_per_endpoint, 30}, + {tls_opts, [ + {certfile, "priv/dc1.pem"}, + {cafile, "priv/ca.pem"} + ]} + ]}, + {cache, [ + {domain_lifetime_seconds, 60} + ]}, + {bounce, [ + {resend_after_ms, 300}, + {max_retries, 3} + ]}, + {redis, [ + {pool, global_distrib} + ]} + ]}, + {mod_jingle_sip, [ + {proxy_host, "localhost"}, + {proxy_port, 5600}, + {listen_port, 5600}, + {local_host, "localhost"}, + {sdp_origin, "127.0.0.1"} + ]}, + {mod_keystore, [{keys, [{access_secret, ram}, + {access_psk, {file, "priv/access_psk"}}, + {provision_psk, {file, "priv/provision_psk"}} + ]}, + {ram_key_size, 1000} + ]}, + {mod_last, [{backend, mnesia}, {iqdisc, {queues, 10}}]}, + {mod_mam_meta, [ + {backend, rdbms}, + {no_stanzaid_element, true}, + {is_archivable_message, mod_mam_utils}, + {archive_chat_markers, true}, + {full_text_search, true}, + + + {pm, [{user_prefs_store, rdbms}, {full_text_search, false}]}, + {muc, [ + {host, "muc.example.com"}, + {rdbms_message_format, simple}, + {async_writer, false}, + {user_prefs_store, mnesia} + ]} + ]}, + {mod_muc, [ + {host, "muc.example.com"}, + {access, muc}, + {access_create, muc_create}, + {http_auth_pool, my_auth_pool}, + {default_room_options, [ + {password_protected, true}, + {affiliations, [ + {{<<"alice">>, <<"localhost">>, <<"resource1">>}, member}, + {{<<"bob">>, <<"localhost">>, <<"resource2">>}, owner} + ]} + ]} + ]}, + {mod_muc_log, + [ + {outdir, "www/muc"}, + {access_log, muc}, + {top_link, {"/", "Home"}}, + {cssfile, <<"path/to/css/file">>} + ]}, + {mod_muc_light, [ + {host, "muclight.example.com"}, + {equal_occupants, true}, + {legacy_mode, true}, + {rooms_per_user, 10}, + {blocking, false}, + {all_can_configure, true}, + {all_can_invite, true}, + {max_occupants, 50}, + {rooms_per_page, 5}, + {rooms_in_rosters, true}, + {config_schema, [{"roomname", "The Room"}, {"display-lines", 30, display_lines, integer}]} + ]}, + {mod_offline, [ + {access_max_user_messages, max_user_offline_messages}, + {backend, riak}, + {bucket_type, <<"offline">>} + ]}, + {mod_ping, [ + {send_pings, true}, + {ping_interval, 60}, + {timeout_action, none}, + {ping_req_timeout, 32} + ]}, + {mod_pubsub, [ + {access_createnode, pubsub_createnode}, + {ignore_pep_from_offline, false}, + {backend, rdbms}, + {last_item_cache, mnesia}, + {max_items_node, 1000}, + {plugins, [<<"flat">>, <<"pep">>]}, + {pep_mapping, [{"urn:xmpp:microblog:0", "mb"}]} + ]}, + {mod_push_service_mongoosepush, [ + {pool_name, mongoose_push_http}, + {api_version, "v3"}, + {max_http_connections, 100} + ]}, + {mod_register, [ + {welcome_message, {"Subject", "Body"}}, + {access, all}, + {registration_watchers, [<<"JID1">>, <<"JID2">>]}, + {password_strength, 32} + ]}, + {mod_revproxy, + [{routes, [{"www.erlang-solutions.com", "/admin", "_", + "https://www.erlang-solutions.com/"}, + {":var.com", "/:var", "http://localhost:8080/"}, + {":domain.com", "/", "_", "http://localhost:8080/:domain"}] + }]}, + {mod_roster, [ + {versioning, true}, + {store_current_id, true} + ]}, + {mod_shared_roster_ldap, [ + {ldap_base, "ou=Users,dc=ejd,dc=com"}, + {ldap_groupattr, "ou"}, + {ldap_memberattr, "cn"},{ldap_userdesc, "cn"}, + {ldap_filter, "(objectClass=inetOrgPerson)"}, + {ldap_rfilter, "(objectClass=inetOrgPerson)"}, + {ldap_group_cache_validity, 1}, + {ldap_user_cache_validity, 1}]}, + {mod_stream_management, [{buffer_max, 30}, + {ack_freq, 1}, + {resume_timeout, 600}, + {stale_h, [{enabled, true}, + {stale_h_repeat_after, 1800}, + {stale_h_geriatric, 3600}]} + ]}, + {mod_vcard, [ + {matches, 1}, + {search, true}, + {host, "directory.example.com"}, + {ldap_vcard_map, [ + {<<"FAMILY">>, <<"%s">>, [<<"sn">>]}, + {<<"FN">>, <<"%s">>, [<<"displayName">>]} + ]}, + {ldap_search_fields, [ + {<<"User">>, <<"%u">>}, + {<<"Full Name">>, <<"displayName">>} + ]}, + {ldap_search_reported, [ + {<<"Full Name">>, <<"FN">>}, + {<<"Given Name">>, <<"FIRST">>} + ]} + ]}, + {mod_version, [{os_info, true}]} +]}. diff --git a/test/config_parser_SUITE_data/modules.toml b/test/config_parser_SUITE_data/modules.toml new file mode 100644 index 00000000000..735c7440f2f --- /dev/null +++ b/test/config_parser_SUITE_data/modules.toml @@ -0,0 +1,319 @@ +[general] + hosts = [ + "localhost", + "dummy_host" + ] + +[modules.mod_adhoc] + iqdisc.type = "one_queue" + report_commands_node = true + +[modules.mod_auth_token] + validity_period = [ + {token = "access", value = 13, unit = "minutes"}, + {token = "refresh", value = 13, unit = "days"} + ] + +[modules.mod_bosh] + inactivity = 20 + max_wait = "infinity" + server_acks = true + backend = "mnesia" + maxpause = 120 + +[modules.mod_caps] + cache_size = 1000 + cache_life_time = 86 + +[modules.mod_carboncopy] + iqdisc.type = "no_queue" + +[modules.mod_csi] + buffer_max = 40 + +[modules.mod_disco] + iqdisc.type = "one_queue" + extra_domains = ["some_domain", "another_domain"] + server_info = [ + {module = "all", name = "abuse-address", urls = ["admin@example.com"]}, + {module = ["mod_muc", "mod_disco"], name = "friendly-spirits", urls = ["spirit1@localhost", "spirit2@localhost"]} + ] + users_can_see_hidden_services = true + +[modules.mod_event_pusher] + backend.sns.access_key_id = "AKIAIOSFODNN7EXAMPLE" + backend.sns.secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + backend.sns.region = "eu-west-1" + backend.sns.account_id = "123456789012" + backend.sns.sns_host = "sns.eu-west-1.amazonaws.com" + backend.sns.muc_host = "conference.HOST" + backend.sns.plugin_module = "mod_event_pusher_sns_defaults" + backend.sns.presence_updates_topic = "user_presence_updated" + backend.sns.pm_messages_topic = "user_message_sent" + backend.sns.muc_messages_topic = "user_messagegroup_sent" + backend.sns.pool_size = 100 + backend.sns.publish_retry_count = 2 + backend.sns.publish_retry_time_ms = 50 + + backend.push.backend = "mnesia" + backend.push.wpool.workers = 200 + backend.push.plugin_module = "mod_event_pusher_push_plugin_defaults" + backend.push.virtual_pubsub_hosts = ["host1", "host2"] + + backend.http.pool_name = "http_pool" + backend.http.path = "/notifications" + backend.http.callback_module = "mod_event_pusher_http_defaults" + + backend.rabbit.presence_exchange.name ="presence" + backend.rabbit.presence_exchange.type = "topic" + backend.rabbit.chat_msg_exchange.name = "chat_msg" + backend.rabbit.chat_msg_exchange.sent_topic = "chat_msg_sent" + backend.rabbit.chat_msg_exchange.recv_topic = "chat_msg_recv" + backend.rabbit.groupchat_msg_exchange.name = "groupchat_msg" + backend.rabbit.groupchat_msg_exchange.sent_topic = "groupchat_msg_sent" + backend.rabbit.groupchat_msg_exchange.recv_topic = "groupchat_msg_recv" + +[modules.mod_extdisco] + [[modules.mod_extdisco.service]] + type = "stun" + host = "stun1" + port = 3478 + transport = "udp" + username = "username" + password = "password" + [[modules.mod_extdisco.service]] + type = "stun" + host = "stun2" + port = 2222 + transport = "tcp" + username = "username" + password = "password" + [[modules.mod_extdisco.service]] + type = "turn" + host = "192.168.0.1" + +[modules.mod_http_upload] + host = "upload.@HOST@" + backend = "s3" + expiration_time = 120 + s3.bucket_url = "https://s3-eu-west-1.amazonaws.com/mybucket" + s3.region = "eu-west-1" + s3.add_acl = true + s3.access_key_id = "AKIAIOSFODNN7EXAMPLE" + s3.secret_access_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + +[modules.mod_inbox] + backend = "rdbms" + reset_markers = ["displayed"] + aff_changes = true + remove_on_kicked = true + groupchat = ["muclight"] + +[modules.mod_global_distrib] + global_host = "example.com" + local_host = "datacenter1.example.com" + connections.endpoints = [{host = "172.16.0.2", port = 5555}] + connections.advertised_endpoints = [{host = "172.16.0.2", port = 5555}] + connections.tls.certfile = "priv/dc1.pem" + connections.tls.cacertfile = "priv/ca.pem" + connections.connections_per_endpoint = 30 + cache.domain_lifetime_seconds = 60 + bounce.resend_after_ms = 300 + bounce.max_retries = 3 + redis.pool = "global_distrib" + +[modules.mod_jingle_sip] + proxy_host = "localhost" + proxy_port = 5600 + listen_port = 5600 + local_host = "localhost" + sdp_origin = "127.0.0.1" + +[modules.mod_keystore] + ram_key_size = 1000 + + [[modules.mod_keystore.keys]] + name = "access_secret" + type = "ram" + + [[modules.mod_keystore.keys]] + name = "access_psk" + type = "file" + path = "priv/access_psk" + + [[modules.mod_keystore.keys]] + name = "provision_psk" + type = "file" + path = "priv/provision_psk" + +[modules.mod_last] + backend = "mnesia" + iqdisc = {"type" = "queues", "workers" = 10} + +[modules.mod_mam_meta] + backend = "rdbms" + no_stanzaid_element = true + is_archivable_message = "mod_mam_utils" + archive_chat_markers = true + full_text_search = true + pm.user_prefs_store = "rdbms" + pm.full_text_search = false + muc.host = "muc.example.com" + muc.rdbms_message_format = "simple" + muc.async_writer = false + muc.user_prefs_store = "mnesia" + +[modules.mod_muc] + host = "muc.example.com" + access = "muc" + access_create = "muc_create" + http_auth_pool = "my_auth_pool" + default_room.password_protected = true + + [[modules.mod_muc.default_room.affiliations]] + user = "alice" + server = "localhost" + resource = "resource1" + affiliation = "member" + + [[modules.mod_muc.default_room.affiliations]] + user = "bob" + server = "localhost" + resource = "resource2" + affiliation = "owner" + +[modules.mod_muc_log] + outdir = "www/muc" + access_log = "muc" + css_file = "path/to/css/file" + top_link.target = "/" + top_link.text = "Home" + +[modules.mod_muc_light] + host = "muclight.example.com" + equal_occupants = true + legacy_mode = true + rooms_per_user = 10 + blocking = false + all_can_configure = true + all_can_invite = true + max_occupants = 50 + rooms_per_page = 5 + rooms_in_rosters = true + + [[modules.mod_muc_light.config_schema]] + field = "roomname" + value = "The Room" + + [[modules.mod_muc_light.config_schema]] + field = "display-lines" + value = 30 + internal_key = "display_lines" + type = "integer" + +[modules.mod_offline] + access_max_user_messages = "max_user_offline_messages" + backend = "riak" + riak.bucket_type = "offline" + +[modules.mod_ping] + send_pings = true + ping_interval = 60 + timeout_action = "none" + ping_req_timeout = 32 + +[modules.mod_pubsub] + access_createnode = "pubsub_createnode" + ignore_pep_from_offline = false + backend = "rdbms" + last_item_cache = "mnesia" + max_items_node = 1000 + plugins = ["flat", "pep"] + + [[modules.mod_pubsub.pep_mapping]] + namespace = "urn:xmpp:microblog:0" + node = "mb" + +[modules.mod_push_service_mongoosepush] + pool_name = "mongoose_push_http" + api_version = "v3" + max_http_connections = 100 + +[modules.mod_register] + welcome_message = {subject = "Subject", body = "Body"} + access = "all" + registration_watchers = ["JID1", "JID2"] + password_strength = 32 + + [[modules.mod_revproxy.routes]] + host = "www.erlang-solutions.com" + path = "/admin" + method = "_" + upstream = "https://www.erlang-solutions.com/" + + [[modules.mod_revproxy.routes]] + host = ":var.com" + path = "/:var" + upstream = "http://localhost:8080/" + + [[modules.mod_revproxy.routes]] + host = ":domain.com" + path = "/" + method = "_" + upstream = "http://localhost:8080/:domain" + +[modules.mod_roster] + versioning = true + store_current_id = true + +[modules.mod_shared_roster_ldap] + ldap_base = "ou=Users,dc=ejd,dc=com" + ldap_groupattr = "ou" + ldap_memberattr = "cn" + ldap_userdesc = "cn" + ldap_filter = "(objectClass=inetOrgPerson)" + ldap_rfilter = "(objectClass=inetOrgPerson)" + ldap_group_cache_validity = 1 + ldap_user_cache_validity = 1 + +[modules.mod_stream_management] + buffer_max = 30 + ack_freq = 1 + resume_timeout = 600 + stale_h.enabled = true + stale_h.repeat_after = 1800 + stale_h.geriatric = 3600 + +[modules.mod_vcard] + matches = 1 + search = true + host = "directory.example.com" + + [[modules.mod_vcard.ldap_vcard_map]] + vcard_field = "FAMILY" + ldap_pattern = "%s" + ldap_field = "sn" + + [[modules.mod_vcard.ldap_vcard_map]] + vcard_field = "FN" + ldap_pattern = "%s" + ldap_field = "displayName" + + [[modules.mod_vcard.ldap_search_fields]] + search_field = "User" + ldap_field = "%u" + + [[modules.mod_vcard.ldap_search_fields]] + search_field = "Full Name" + ldap_field = "displayName" + + [[modules.mod_vcard.ldap_search_reported]] + search_field = "Full Name" + vcard_field = "FN" + + [[modules.mod_vcard.ldap_search_reported]] + search_field = "Given Name" + vcard_field = "FIRST" + +[modules.mod_version] + os_info = true diff --git a/test/config_parser_SUITE_data/mongooseim-pgsql.cfg b/test/config_parser_SUITE_data/mongooseim-pgsql.cfg new file mode 100644 index 00000000000..a7a7eb86f61 --- /dev/null +++ b/test/config_parser_SUITE_data/mongooseim-pgsql.cfg @@ -0,0 +1,193 @@ +{loglevel, warning}. +{hosts, ["localhost", + "anonymous.localhost", + "localhost.bis" + ] }. +{listen, + [ + { 5280, ejabberd_cowboy, [ + {transport_options, [{num_acceptors, 10}, {max_connections, 1024}]}, + {modules, [ + {"_", "/http-bind", mod_bosh}, + {"_", "/ws-xmpp", mod_websockets, [{ejabberd_service, [ + {access, all}, + {shaper_rule, fast}, + {password, "secret"}]} + ]} + ]} + ]}, + { 5285, ejabberd_cowboy, [ + {transport_options, [{num_acceptors, 10}, {max_connections, 1024}]}, + {ssl, [{certfile, "tools/ssl/mongooseim/cert.pem"}, {keyfile, "tools/ssl/mongooseim/key.pem"}, {password, ""}]}, + {modules, [ + {"_", "/http-bind", mod_bosh}, + {"_", "/ws-xmpp", mod_websockets, [ + {timeout, infinity}, + {ping_rate, none}, + {max_stanza_size, 100} + ]}, + {"localhost", "/api", mongoose_api_admin, [{auth, {<<"ala">>, <<"makotaipsa">>}}]}, + {"localhost", "/api/contacts/{:jid}", mongoose_api_client, []} + ]} + + ]}, + { { 8088, "127.0.0.1"} , ejabberd_cowboy, [ + {transport_options, [{num_acceptors, 10}, {max_connections, 1024}]}, + {modules, [ + {"localhost", "/api", mongoose_api_admin, []} + ]} + ]}, + { 8089 , ejabberd_cowboy, [ + {transport_options, [{num_acceptors, 10}, {max_connections, 1024}]}, + {protocol_options, [{compress, true}]}, + {ssl, [{certfile, "tools/ssl/mongooseim/cert.pem"}, {keyfile, "tools/ssl/mongooseim/key.pem"}, {password, ""}]}, + {modules, [ + {"_", "/api/sse", lasse_handler, [mongoose_client_api_sse]}, + {"_", "/api/messages/[:with]", mongoose_client_api_messages, []}, + {"_", "/api/contacts/[:jid]", mongoose_client_api_contacts, []}, + {"_", "/api/rooms/[:id]", mongoose_client_api_rooms, []}, + {"_", "/api/rooms/[:id]/config", mongoose_client_api_rooms_config, []}, + {"_", "/api/rooms/:id/users/[:user]", mongoose_client_api_rooms_users, []}, + {"_", "/api/rooms/[:id]/messages", mongoose_client_api_rooms_messages, []}, + {"_", "/api-docs", cowboy_swagger_redirect_handler, #{}}, + {"_", "/api-docs/swagger.json", cowboy_swagger_json_handler, #{}}, + {"_", "/api-docs/[...]", cowboy_static, {priv_dir, cowboy_swagger, "swagger", [{mimetypes, cow_mimetypes, all}]}} + ]} + ]}, + { { 5288, "127.0.0.1"} , ejabberd_cowboy, [ + {transport_options, [{num_acceptors, 10}, {max_connections, 1024}]}, + {modules, [ + {"localhost", "/api", mongoose_api, [{handlers, [mongoose_api_metrics, + mongoose_api_users]}]} + ]} + ]}, + { 5222, ejabberd_c2s, [ + {certfile, "tools/ssl/mongooseim/server.pem"}, starttls, + {zlib, 10000}, + {access, c2s}, + {shaper, c2s_shaper}, + {max_stanza_size, 65536} + ,{dhfile, "tools/ssl/mongooseim/dh_server.pem"} + ]}, + { 5223, ejabberd_c2s, [ + {zlib, 4096}, + {access, c2s}, + {shaper, c2s_shaper}, + {max_stanza_size, 65536} + ]}, + { 5269, ejabberd_s2s_in, [ + {shaper, s2s_shaper}, + {max_stanza_size, 131072} + ,{dhfile, "tools/ssl/mongooseim/dh_server.pem"} + ]} + ,{ 8888, ejabberd_service, [ + {access, all}, + {shaper_rule, fast}, + {ip, {127, 0, 0, 1}}, + {password, "secret"} + ]}, + { 8666, ejabberd_service, [ + {access, all}, + {conflict_behaviour, kick_old}, + {shaper_rule, fast}, + {ip, {127, 0, 0, 1}}, + {password, "secret"} + ]}, + { 8189, ejabberd_service, [ + {access, all}, + {hidden_components, true}, + {shaper_rule, fast}, + {ip, {127, 0, 0, 1}}, + {password, "secret"} + ]} + ]}. +{s2s_use_starttls, optional}. +{s2s_certfile, "tools/ssl/mongooseim/server.pem"}. +{s2s_default_policy, allow }. +{outgoing_s2s_port, 5299 }. +{ {s2s_addr, "fed1"}, {127,0,0,1} }. +{sm_backend, {mnesia, []} }. +{auth_method, rdbms }. +{auth_opts, [ + {password_format, {scram, [sha256]}} + , {scram_iterations, 64} + , {cyrsasl_external, standard} + ]}. +{host_config, "anonymous.localhost", [{auth_method, anonymous}, + {allow_multiple_connections, true}, + {anonymous_protocol, both}, + {auth_opts, []}]}. +{outgoing_pools, [ + {redis, <<"localhost">>, global_distrib, [{workers, 10}], []}, + {rdbms, global, default, [{workers, 5}], + [{server, {pgsql, "localhost", "ejabberd", "ejabberd", "mongooseim_secret", + [{ssl, required}, {ssl_opts, [{verify, verify_peer}, + {cacertfile, "priv/ssl/cacert.pem"}, {server_name_indication, disable}]}]}}]} + ]}. +{shaper, normal, {maxrate, 1000}}. +{shaper, fast, {maxrate, 50000}}. +{max_fsm_queue, 1000}. +{acl, local, {user_regexp, ""}}. +{access, max_user_sessions, [{10, all}]}. +{access, max_user_offline_messages, [{5000, admin}, {100, all}]}. +{access, local, [{allow, local}]}. +{access, c2s, [{deny, blocked}, + {allow, all}]}. +{access, c2s_shaper, [{none, admin}, + {normal, all}]}. +{access, s2s_shaper, [{fast, all}]}. +{access, muc_admin, [{allow, admin}]}. +{access, muc_create, [{allow, local}]}. +{access, muc, [{allow, all}]}. +{access, register, [{allow, all}]}. +{registration_timeout, infinity}. +{access, mam_set_prefs, [{default, all}]}. +{access, mam_get_prefs, [{default, all}]}. +{access, mam_lookup_messages, [{default, all}]}. +{shaper, mam_shaper, {maxrate, 1}}. +{shaper, mam_global_shaper, {maxrate, 1000}}. +{access, mam_set_prefs_shaper, [{mam_shaper, all}]}. +{access, mam_get_prefs_shaper, [{mam_shaper, all}]}. +{access, mam_lookup_messages_shaper, [{mam_shaper, all}]}. +{access, mam_set_prefs_global_shaper, [{mam_global_shaper, all}]}. +{access, mam_get_prefs_global_shaper, [{mam_global_shaper, all}]}. +{access, mam_lookup_messages_global_shaper, [{mam_global_shaper, all}]}. +{language, "en"}. +{all_metrics_are_global, false }. +{services, + [ + {service_admin_extra, [{submods, [node, accounts, sessions, vcard, gdpr, upload, + roster, last, private, stanza, stats]}]}, + {service_mongoose_system_metrics, [{initial_report, 300000}, + {periodic_report, 10800000}]} + ] +}. +{modules, + [ + {mod_adhoc, []}, + {mod_amp, []}, + {mod_disco, [{users_can_see_hidden_services, false}]}, + {mod_commands, []}, + {mod_muc_commands, []}, + {mod_muc_light_commands, []}, + {mod_last, [{backend, rdbms}]}, + {mod_stream_management, [ + ]}, + {mod_offline, [{backend, rdbms}]}, + {mod_privacy, [{backend, rdbms}]}, + {mod_blocking, []}, + {mod_private, [{backend, rdbms}]}, + {mod_register, [ + {welcome_message, {"Hello", "I am MongooseIM"}}, + {ip_access, [{allow, "127.0.0.0/8"}, + {deny, "0.0.0.0/0"}]}, + {access, register} + ]}, + {mod_roster, [{backend, rdbms}]}, + {mod_sic, []}, + {mod_vcard, [ {backend, rdbms}, + {host, "vjud.@HOST@"} + ]}, + {mod_bosh, []}, + {mod_carboncopy, []} + ]}. diff --git a/test/config_parser_SUITE_data/mongooseim-pgsql.toml b/test/config_parser_SUITE_data/mongooseim-pgsql.toml new file mode 100644 index 00000000000..80ca1058d57 --- /dev/null +++ b/test/config_parser_SUITE_data/mongooseim-pgsql.toml @@ -0,0 +1,379 @@ +[general] + loglevel = "warning" + hosts = [ + "localhost", + "anonymous.localhost", + "localhost.bis" + ] + registration_timeout = "infinity" + language = "en" + all_metrics_are_global = false + sm_backend = "mnesia" + max_fsm_queue = 1000 + +[[listen.http]] + port = 5280 + transport.num_acceptors = 10 + transport.max_connections = 1024 + + [[listen.http.handlers.mod_bosh]] + host = "_" + path = "/http-bind" + + [[listen.http.handlers.mod_websockets]] + host = "_" + path = "/ws-xmpp" + + [listen.http.handlers.mod_websockets.service] + access = "all" + shaper_rule = "fast" + password = "secret" + +[[listen.http]] + port = 5285 + transport.num_acceptors = 10 + transport.max_connections = 1024 + tls.certfile = "tools/ssl/mongooseim/cert.pem" + tls.keyfile = "tools/ssl/mongooseim/key.pem" + tls.password = "" + + [[listen.http.handlers.mongoose_api_admin]] + host = "localhost" + path = "/api" + username = "ala" + password = "makotaipsa" + + [[listen.http.handlers.mongoose_api_client]] + host = "localhost" + path = "/api/contacts/{:jid}" + + [[listen.http.handlers.mod_bosh]] + host = "_" + path = "/http-bind" + + [[listen.http.handlers.mod_websockets]] + host = "_" + path = "/ws-xmpp" + timeout = "infinity" + ping_rate = "none" + max_stanza_size = 100 + +[[listen.http]] + port = 8088 + ip_address = "127.0.0.1" + transport.num_acceptors = 10 + transport.max_connections = 1024 + + [[listen.http.handlers.mongoose_api_admin]] + host = "localhost" + path = "/api" + +[[listen.http]] + port = 8089 + transport.num_acceptors = 10 + transport.max_connections = 1024 + protocol.compress = true + tls.certfile = "tools/ssl/mongooseim/cert.pem" + tls.keyfile = "tools/ssl/mongooseim/key.pem" + tls.password = "" + + [[listen.http.handlers.lasse_handler]] + host = "_" + path = "/api/sse" + module = "mongoose_client_api_sse" + + [[listen.http.handlers.mongoose_client_api_messages]] + host = "_" + path = "/api/messages/[:with]" + + [[listen.http.handlers.mongoose_client_api_contacts]] + host = "_" + path = "/api/contacts/[:jid]" + + [[listen.http.handlers.mongoose_client_api_rooms]] + host = "_" + path = "/api/rooms/[:id]" + + [[listen.http.handlers.mongoose_client_api_rooms_config]] + host = "_" + path = "/api/rooms/[:id]/config" + + [[listen.http.handlers.mongoose_client_api_rooms_users]] + host = "_" + path = "/api/rooms/:id/users/[:user]" + + [[listen.http.handlers.mongoose_client_api_rooms_messages]] + host = "_" + path = "/api/rooms/[:id]/messages" + + [[listen.http.handlers.cowboy_swagger_redirect_handler]] + host = "_" + path = "/api-docs" + + [[listen.http.handlers.cowboy_swagger_json_handler]] + host = "_" + path = "/api-docs/swagger.json" + + [[listen.http.handlers.cowboy_static]] + host = "_" + path = "/api-docs/[...]" + type = "priv_dir" + app = "cowboy_swagger" + content_path = "swagger" + +[[listen.http]] + port = 5288 + ip_address = "127.0.0.1" + transport.num_acceptors = 10 + transport.max_connections = 1024 + + [[listen.http.handlers.mongoose_api]] + host = "localhost" + path = "/api" + handlers = ["mongoose_api_metrics", "mongoose_api_users"] + +[[listen.c2s]] + port = 5222 + zlib = 10000 + access = "c2s" + shaper = "c2s_shaper" + max_stanza_size = 65536 + tls.mode = "starttls" + tls.certfile = "tools/ssl/mongooseim/server.pem" + tls.dhfile = "tools/ssl/mongooseim/dh_server.pem" + +[[listen.c2s]] + port = 5223 + zlib = 4096 + access = "c2s" + shaper = "c2s_shaper" + max_stanza_size = 65536 + +[[listen.s2s]] + port = 5269 + shaper = "s2s_shaper" + max_stanza_size = 131072 + tls.dhfile = "tools/ssl/mongooseim/dh_server.pem" + +[[listen.service]] + port = 8888 + access = "all" + shaper_rule = "fast" + ip_address = "127.0.0.1" + password = "secret" + +[[listen.service]] + port = 8666 + access = "all" + conflict_behaviour = "kick_old" + shaper_rule = "fast" + ip_address = "127.0.0.1" + password = "secret" + +[[listen.service]] + port = 8189 + access = "all" + hidden_components = true + shaper_rule = "fast" + ip_address = "127.0.0.1" + password = "secret" + +[auth] + methods = ["rdbms"] + password.format = "scram" + password.hash = ["sha256"] + scram_iterations = 64 + sasl_external = ["standard"] + +[outgoing_pools.redis.global_distrib] + scope = "single_host" + host = "localhost" + workers = 10 + +[outgoing_pools.rdbms.default] + scope = "global" + workers = 5 + + [outgoing_pools.rdbms.default.connection] + driver = "pgsql" + host = "localhost" + database = "ejabberd" + username = "ejabberd" + password = "mongooseim_secret" + tls.required = true + tls.verify_peer = true + tls.cacertfile = "priv/ssl/cacert.pem" + tls.server_name_indication = false + +[services.service_admin_extra] + submods = ["node", "accounts", "sessions", "vcard", "gdpr", "upload", + "roster", "last", "private", "stanza", "stats"] + +[services.service_mongoose_system_metrics] + initial_report = 300_000 + periodic_report = 10_800_000 + +[modules.mod_adhoc] + +[modules.mod_amp] + +[modules.mod_disco] + users_can_see_hidden_services = false + +[modules.mod_commands] + +[modules.mod_muc_commands] + +[modules.mod_muc_light_commands] + +[modules.mod_last] + backend = "rdbms" + +[modules.mod_stream_management] + +[modules.mod_offline] + backend = "rdbms" + +[modules.mod_privacy] + backend = "rdbms" + +[modules.mod_blocking] + +[modules.mod_private] + backend = "rdbms" + +[modules.mod_register] + welcome_message = {subject = "Hello", body = "I am MongooseIM"} + ip_access = [ + {address = "127.0.0.0/8", policy = "allow"}, + {address = "0.0.0.0/0", policy = "deny"} + ] + access = "register" + +[modules.mod_roster] + backend = "rdbms" + +[modules.mod_sic] + +[modules.mod_vcard] + backend = "rdbms" + host = "vjud.@HOST@" + +[modules.mod_bosh] + +[modules.mod_carboncopy] + +[shaper.normal] + max_rate = 1000 + +[shaper.fast] + max_rate = 50_000 + +[shaper.mam_shaper] + max_rate = 1 + +[shaper.mam_global_shaper] + max_rate = 1000 + +[acl] + local = [ + {user_regexp = ""} + ] + +[access] + max_user_sessions = [ + {acl = "all", value = 10} + ] + + max_user_offline_messages = [ + {acl = "admin", value = 5000}, + {acl = "all", value = 100} + ] + + local = [ + {acl = "local", value = "allow"} + ] + + c2s = [ + {acl = "blocked", value = "deny"}, + {acl = "all", value = "allow"} + ] + + c2s_shaper = [ + {acl = "admin", value = "none"}, + {acl = "all", value = "normal"} + ] + + s2s_shaper = [ + {acl = "all", value = "fast"} + ] + + muc_admin = [ + {acl = "admin", value = "allow"} + ] + + muc_create = [ + {acl = "local", value = "allow"} + ] + + muc = [ + {acl = "all", value = "allow"} + ] + + register = [ + {acl = "all", value = "allow"} + ] + + mam_set_prefs = [ + {acl = "all", value = "default"} + ] + + mam_get_prefs = [ + {acl = "all", value = "default"} + ] + + mam_lookup_messages = [ + {acl = "all", value = "default"} + ] + + mam_set_prefs_shaper = [ + {acl = "all", value = "mam_shaper"} + ] + + mam_get_prefs_shaper = [ + {acl = "all", value = "mam_shaper"} + ] + + mam_lookup_messages_shaper = [ + {acl = "all", value = "mam_shaper"} + ] + + mam_set_prefs_global_shaper = [ + {acl = "all", value = "mam_global_shaper"} + ] + + mam_get_prefs_global_shaper = [ + {acl = "all", value = "mam_global_shaper"} + ] + + mam_lookup_messages_global_shaper = [ + {acl = "all", value = "mam_global_shaper"} + ] + +[s2s] + use_starttls = "optional" + certfile = "tools/ssl/mongooseim/server.pem" + default_policy = "allow" + outgoing.port = 5299 + + [[s2s.address]] + host = "fed1" + ip_address = "127.0.0.1" + +[[host_config]] + host = "anonymous.localhost" + + [host_config.auth] + methods = ["anonymous"] + anonymous.allow_multiple_connections = true + anonymous.protocol = "both" diff --git a/test/config_parser_SUITE_data/outgoing_pools.cfg b/test/config_parser_SUITE_data/outgoing_pools.cfg new file mode 100644 index 00000000000..fc4ef4780a9 --- /dev/null +++ b/test/config_parser_SUITE_data/outgoing_pools.cfg @@ -0,0 +1,42 @@ +{hosts, ["localhost", + "anonymous.localhost", + "localhost.bis" + ] }. + +{outgoing_pools, [ + {redis, <<"localhost">>, global_distrib, [{workers, 10}], []}, + {rdbms, global, default, [{workers, 5}], + [{server, {pgsql, "localhost", "ejabberd", "ejabberd", "mongooseim_secret", + [{ssl, required}, {ssl_opts, [{verify, verify_peer}, + {cacertfile, "priv/ssl/cacert.pem"}, {server_name_indication, disable}]}]}}, + {keepalive_interval, 30}]}, + {http, global, mongoose_push_http, + [{workers, 50}], + [{server, "https://localhost:8443"}, + {path_prefix, "/"}, + {request_timeout, 2000} + ]}, + {riak, global, default, [{workers, 20}, {strategy, next_worker}], + [{address, "127.0.0.1"}, {port, 8087}, + {credentials, "username", "pass"}, + {cacertfile, "path/to/cacert.pem"}]}, + {cassandra, global, default, [], + [ + {servers, [{"cassandra_server1.example.com", 9042}, + {"cassandra_server2.example.com", 9042}] }, + {keyspace, "big_mongooseim"} + ]}, + {elastic, global, default, [], [{host, "localhost"}]}, + {rabbit, host, event_pusher, [{workers, 20}], + [{amqp_host, "localhost"}, + {amqp_port, 5672}, + {amqp_username, "guest"}, + {amqp_password, "guest"}, + {confirms_enabled, true}, + {max_worker_queue_len, 100}]}, + {ldap, host, default, [{workers, 5}], + [{servers, ["ldap-server.example.com"]}, + {rootdn, "cn=admin,dc=example,dc=com"}, + {password, "ldap-admin-password"}] + } + ]}. diff --git a/test/config_parser_SUITE_data/outgoing_pools.toml b/test/config_parser_SUITE_data/outgoing_pools.toml new file mode 100644 index 00000000000..36ba211a7cd --- /dev/null +++ b/test/config_parser_SUITE_data/outgoing_pools.toml @@ -0,0 +1,75 @@ +[general] + hosts = [ + "localhost", + "anonymous.localhost", + "localhost.bis" + ] + +[outgoing_pools.redis.global_distrib] + scope = "single_host" + host = "localhost" + workers = 10 + +[outgoing_pools.rdbms.default] + scope = "global" + workers = 5 + + [outgoing_pools.rdbms.default.connection] + driver = "pgsql" + host = "localhost" + database = "ejabberd" + username = "ejabberd" + password = "mongooseim_secret" + keepalive_interval = 30 + tls.required = true + tls.verify_peer = true + tls.cacertfile = "priv/ssl/cacert.pem" + tls.server_name_indication = false + +[outgoing_pools.http.mongoose_push_http] + scope = "global" + workers = 50 + + [outgoing_pools.http.mongoose_push_http.connection] + host = "https://localhost:8443" + path_prefix = "/" + request_timeout = 2000 + +[outgoing_pools.riak.default] + scope = "global" + workers = 20 + strategy = "next_worker" + connection.address = "127.0.0.1" + connection.port = 8087 + connection.credentials = {user = "username", password = "pass"} + connection.cacertfile = "path/to/cacert.pem" + +[outgoing_pools.cassandra.default] + scope = "global" + connection.servers = [ + {ip_address = "cassandra_server1.example.com", port = 9042}, + {ip_address = "cassandra_server2.example.com", port = 9042} + ] + connection.keyspace = "big_mongooseim" + + +[outgoing_pools.elastic.default] + scope = "global" + connection.host = "localhost" + +[outgoing_pools.rabbit.event_pusher] + scope = "host" + workers = 20 + connection.amqp_host = "localhost" + connection.amqp_port = 5672 + connection.amqp_username = "guest" + connection.amqp_password = "guest" + connection.confirms_enabled = true + connection.max_worker_queue_len = 100 + +[outgoing_pools.ldap.default] + scope = "host" + workers = 5 + connection.servers = ["ldap-server.example.com"] + connection.rootdn = "cn=admin,dc=example,dc=com" + connection.password = "ldap-admin-password" diff --git a/test/config_parser_SUITE_data/s2s_only.cfg b/test/config_parser_SUITE_data/s2s_only.cfg new file mode 100644 index 00000000000..fc287ead1c2 --- /dev/null +++ b/test/config_parser_SUITE_data/s2s_only.cfg @@ -0,0 +1,17 @@ +{hosts, [ + "localhost", + "dummy_host" +]}. + +{s2s_use_starttls, optional}. +{s2s_certfile, "tools/ssl/mongooseim/server.pem"}. +{s2s_default_policy, allow }. +{outgoing_s2s_port, 5299 }. +{ {s2s_addr, "fed1"}, {127,0,0,1} }. +{s2s_ciphers, "TLSv1.2:TLSv1.3"}. +{domain_certfile, "example.org", "/path/to/example_org.pem"}. +{domain_certfile, "example.com", "/path/to/example_com.pem"}. +{outgoing_s2s_options, [ipv4, ipv6], 10000}. +{s2s_shared, <<"shared secret">>}. +{s2s_dns_options, [{timeout, 30}, {retries, 1}]}. +{s2s_max_retry_delay, 30}. diff --git a/test/config_parser_SUITE_data/s2s_only.toml b/test/config_parser_SUITE_data/s2s_only.toml new file mode 100644 index 00000000000..0f3fa3f8f23 --- /dev/null +++ b/test/config_parser_SUITE_data/s2s_only.toml @@ -0,0 +1,29 @@ +[general] + hosts = [ + "localhost", + "dummy_host" + ] +[s2s] + use_starttls = "optional" + certfile = "tools/ssl/mongooseim/server.pem" + default_policy = "allow" + ciphers = "TLSv1.2:TLSv1.3" + outgoing.port = 5299 + outgoing.connection_timeout = 10000 + outgoing.ip_versions = [4, 6] + dns.timeout = 30 + dns.retries = 1 + shared = "shared secret" + max_retry_delay = 30 + + [[s2s.address]] + host = "fed1" + ip_address = "127.0.0.1" + + [[s2s.domain_certfile]] + domain = "example.com" + certfile = "/path/to/example_com.pem" + + [[s2s.domain_certfile]] + domain = "example.org" + certfile = "/path/to/example_org.pem" diff --git a/test/cowboy_SUITE_data/mongooseim.onlyhttp.cfg b/test/cowboy_SUITE_data/mongooseim.onlyhttp.cfg index ffd45b6ed61..22994608783 100644 --- a/test/cowboy_SUITE_data/mongooseim.onlyhttp.cfg +++ b/test/cowboy_SUITE_data/mongooseim.onlyhttp.cfg @@ -475,7 +475,7 @@ %% After successful registration, the user receives %% a message with this subject and body. %% - {welcome_message, {""}}, + {welcome_message, {"", ""}}, %% %% When a user registers, send a notification to diff --git a/test/cowboy_SUITE_data/mongooseim.onlyws.cfg b/test/cowboy_SUITE_data/mongooseim.onlyws.cfg index 6f693cccd2e..81fd385b0d1 100644 --- a/test/cowboy_SUITE_data/mongooseim.onlyws.cfg +++ b/test/cowboy_SUITE_data/mongooseim.onlyws.cfg @@ -475,7 +475,7 @@ %% After successful registration, the user receives %% a message with this subject and body. %% - {welcome_message, {""}}, + {welcome_message, {"", ""}}, %% %% When a user registers, send a notification to diff --git a/test/ejabberd_config_SUITE.erl b/test/ejabberd_config_SUITE.erl index d5386270c50..daf2729c116 100644 --- a/test/ejabberd_config_SUITE.erl +++ b/test/ejabberd_config_SUITE.erl @@ -75,7 +75,7 @@ smoke(Config) -> ok = stop_ejabberd(). coalesce_multiple_local_config_options(_Config) -> - F = fun mongoose_config_parser:group_host_changes/1, + F = fun mongoose_config_reload:group_host_changes/1, ?eq(coalesced_modules_section(), F(multiple_modules_sections())). add_a_module(C) -> diff --git a/test/ejabberd_config_SUITE_data/mongooseim.default.cfg b/test/ejabberd_config_SUITE_data/mongooseim.default.cfg index beea9b5cef7..e54250db16a 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.default.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.default.cfg @@ -454,7 +454,7 @@ %% After successful registration, the user receives %% a message with this subject and body. %% - {welcome_message, {""}}, + {welcome_message, {"", ""}}, %% %% When a user registers, send a notification to diff --git a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.cfg b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.cfg index 7672f9c5063..03931152e3c 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.cfg @@ -52,7 +52,7 @@ {mod_muc_log, [ {outdir, "/tmp/muclogs"}, {access_log, muc} ]}, {mod_privacy, []}, {mod_register, [ - {welcome_message, {""}}, + {welcome_message, {"", ""}}, {ip_access, [{allow, "127.0.0.0/8"}, {deny, "0.0.0.0/0"}]}, {access, register} diff --git a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.loglevel_err.cfg b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.loglevel_err.cfg index 70106e8b4c7..865d300ae5c 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.loglevel_err.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.loglevel_err.cfg @@ -52,7 +52,7 @@ {mod_muc_log, [ {outdir, "/tmp/muclogs"}, {access_log, muc} ]}, {mod_privacy, []}, {mod_register, [ - {welcome_message, {""}}, + {welcome_message, {"", ""}}, {ip_access, [{allow, "127.0.0.0/8"}, {deny, "0.0.0.0/0"}]}, {access, register} diff --git a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_module_node1_v1.cfg b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_module_node1_v1.cfg index 83d3a7c240d..be91ac5f898 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_module_node1_v1.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_module_node1_v1.cfg @@ -50,7 +50,7 @@ {mod_stream_management, [ ]}, {mod_privacy, []}, {mod_register, [ - {welcome_message, {""}}, + {welcome_message, {"", ""}}, {ip_access, [{allow, "127.0.0.0/8"}, {deny, "0.0.0.0/0"}]}, {access, register} diff --git a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node1_v1.cfg b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node1_v1.cfg index f7a05de866e..edb70b2bc91 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node1_v1.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node1_v1.cfg @@ -57,7 +57,7 @@ {mod_muc_log, [ {outdir, "/tmp/muclogs_node1"}, {access_log, muc} ]}, {mod_privacy, []}, {mod_register, [ - {welcome_message, {""}}, + {welcome_message, {"", ""}}, {ip_access, [{allow, "127.0.0.0/8"}, {deny, "0.0.0.0/0"}]}, {access, register} diff --git a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node1_v2.cfg b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node1_v2.cfg index 36269df3779..301bb7d8716 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node1_v2.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node1_v2.cfg @@ -58,7 +58,7 @@ {mod_muc_log, [ {outdir, "/tmp/muclogs_node1"}, {access_log, muc} ]}, {mod_privacy, []}, {mod_register, [ - {welcome_message, {""}}, + {welcome_message, {"", ""}}, {ip_access, [{allow, "127.0.0.0/8"}, {deny, "0.0.0.0/0"}]}, {access, register} diff --git a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node2_v1.cfg b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node2_v1.cfg index c6e319eb923..fd61feadab1 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node2_v1.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node2_v1.cfg @@ -57,7 +57,7 @@ {mod_muc_log, [ {outdir, "/tmp/muclogs_node2"}, {access_log, muc} ]}, {mod_privacy, []}, {mod_register, [ - {welcome_message, {""}}, + {welcome_message, {"", ""}}, {ip_access, [{allow, "127.0.0.0/8"}, {deny, "0.0.0.0/0"}]}, {access, register} diff --git a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node2_v2.cfg b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node2_v2.cfg index f4ef5046f81..0eb4dcb8b1e 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node2_v2.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.no_listeners.node_specific_node2_v2.cfg @@ -58,7 +58,7 @@ {mod_muc_log, [ {outdir, "/tmp/muclogs_node2"}, {access_log, muc} ]}, {mod_privacy, []}, {mod_register, [ - {welcome_message, {""}}, + {welcome_message, {"", ""}}, {ip_access, [{allow, "127.0.0.0/8"}, {deny, "0.0.0.0/0"}]}, {access, register} diff --git a/test/ejabberd_config_SUITE_data/mongooseim.split.cfg b/test/ejabberd_config_SUITE_data/mongooseim.split.cfg index d021277684d..7c6ecd7fbc5 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.split.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.split.cfg @@ -472,7 +472,7 @@ %% After successful registration, the user receives %% a message with this subject and body. %% - {welcome_message, {""}}, + {welcome_message, {"", ""}}, %% %% When a user registers, send a notification to diff --git a/test/ejabberd_config_SUITE_data/mongooseim.with_mod_offline.cfg b/test/ejabberd_config_SUITE_data/mongooseim.with_mod_offline.cfg index b28e8d29ce2..5a57275c42c 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.with_mod_offline.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.with_mod_offline.cfg @@ -455,7 +455,7 @@ %% After successful registration, the user receives %% a message with this subject and body. %% - {welcome_message, {""}}, + {welcome_message, {"", ""}}, %% %% When a user registers, send a notification to diff --git a/test/ejabberd_config_SUITE_data/mongooseim.with_mod_offline.different_opts.cfg b/test/ejabberd_config_SUITE_data/mongooseim.with_mod_offline.different_opts.cfg index 9ead9ea0e0f..08296daa01a 100644 --- a/test/ejabberd_config_SUITE_data/mongooseim.with_mod_offline.different_opts.cfg +++ b/test/ejabberd_config_SUITE_data/mongooseim.with_mod_offline.different_opts.cfg @@ -455,7 +455,7 @@ %% After successful registration, the user receives %% a message with this subject and body. %% - {welcome_message, {""}}, + {welcome_message, {"", ""}}, %% %% When a user registers, send a notification to diff --git a/test/ejabberd_listener_SUITE_data/mongooseim.alt.cfg b/test/ejabberd_listener_SUITE_data/mongooseim.alt.cfg index 35c912f9a43..9e9f2cb7500 100644 --- a/test/ejabberd_listener_SUITE_data/mongooseim.alt.cfg +++ b/test/ejabberd_listener_SUITE_data/mongooseim.alt.cfg @@ -102,7 +102,7 @@ ]}, {mod_privacy, []}, {mod_register, [ - {welcome_message, {""}}, + {welcome_message, {"", ""}}, {ip_access, [{allow, "127.0.0.0/8"}, {deny, "0.0.0.0/0"}]}, diff --git a/test/ejabberd_listener_SUITE_data/mongooseim.basic.cfg b/test/ejabberd_listener_SUITE_data/mongooseim.basic.cfg index 6f0dc57e857..12723c95cff 100644 --- a/test/ejabberd_listener_SUITE_data/mongooseim.basic.cfg +++ b/test/ejabberd_listener_SUITE_data/mongooseim.basic.cfg @@ -102,7 +102,7 @@ ]}, {mod_privacy, []}, {mod_register, [ - {welcome_message, {""}}, + {welcome_message, {"", ""}}, {ip_access, [{allow, "127.0.0.0/8"}, {deny, "0.0.0.0/0"}]}, diff --git a/test/mongoose_config_SUITE.erl b/test/mongoose_config_SUITE.erl index f8848ebc609..ea821cfbe84 100644 --- a/test/mongoose_config_SUITE.erl +++ b/test/mongoose_config_SUITE.erl @@ -84,7 +84,7 @@ match_cases() -> flat_state_case(_C) -> - State = mongoose_config_parser:parse_terms(cool_mod_mam_config()), + State = mongoose_config_parser_cfg:parse_terms(cool_mod_mam_config()), ?assertEqual(cool_mod_mam_config_flat(), mongoose_config_reload:state_to_flat_local_opts(State)). @@ -98,10 +98,10 @@ cool_mod_mam_config_flat() -> {[h,<<"localhost">>,module_opt,mod_mam,pool],cool_pool}]. flat_module_subopts_case(_C) -> - State = mongoose_config_parser:parse_terms(gd_config()), + State = mongoose_config_parser_cfg:parse_terms(gd_config()), FlatOpts = mongoose_config_reload:state_to_flat_local_opts(State), NumConnsKey = [h,<<"localhost">>,module_subopt,mod_global_distrib, - connections,num_of_connections], + connections,connections_per_endpoint], ConnsKey = [h,<<"localhost">>,module_opt,mod_global_distrib, connections], RedisServerKey = [h,<<"localhost">>,module_subopt,mod_global_distrib, @@ -112,7 +112,7 @@ flat_module_subopts_case(_C) -> ok. expand_opts_case(_C) -> - State = mongoose_config_parser:parse_terms(cool_mod_mam_config()), + State = mongoose_config_parser_cfg:parse_terms(cool_mod_mam_config()), FlatOpts = mongoose_config_reload:state_to_flat_local_opts(State), ExpandedOpts = mongoose_config_flat:expand_all_opts(FlatOpts), CatOpts = mongoose_config_reload:state_to_categorized_options(State), @@ -123,7 +123,7 @@ expand_opts_case(_C) -> ok. expand_module_subopts_case(_C) -> - State = mongoose_config_parser:parse_terms(gd_config()), + State = mongoose_config_parser_cfg:parse_terms(gd_config()), FlatOpts = mongoose_config_reload:state_to_flat_local_opts(State), ExpandedOpts = mongoose_config_flat:expand_all_opts(FlatOpts), CatOpts = mongoose_config_reload:state_to_categorized_options(State), @@ -142,7 +142,7 @@ gd_config() -> {local_host, "datacenter1.example.com"}, {connections, [ {endpoints, [{"172.16.0.2", 5555}]}, - {num_of_connections, 22}, + {connections_per_endpoint, 22}, {tls_opts, [ {certfile, "/home/user/dc1.pem"}, {cafile, "/home/user/ca.pem"} @@ -174,36 +174,36 @@ auth_config_states() -> [auth_config_node1_config_v1()]. auth_config_node1_config_v1() -> - Terms = auth_config(), + State = mongoose_config_parser_cfg:parse_terms(auth_config()), #{mongoose_node => mim1, config_file => "/etc/mongooseim.cfg", - loaded_categorized_options => terms_to_categorized_options(Terms), - ondisc_config_terms => Terms, + loaded_categorized_options => mongoose_config_reload:state_to_categorized_options(State), + ondisc_config_state => State, missing_files => [], required_files => []}. auth_host_local_config() -> - Terms = auth_config(), - CatOpts = terms_to_categorized_options(Terms), + State = auth_config_state(), + CatOpts = mongoose_config_reload:state_to_categorized_options(State), maps:get(host_config, CatOpts). auth_config_state() -> Terms = auth_config(), - mongoose_config_parser:parse_terms(Terms). + mongoose_config_parser_cfg:parse_terms(Terms). %% Check that underscore is not treated as a config macro by config parser parse_config_with_underscore_pattern_case(_C) -> - mongoose_config_parser:parse_terms(node_specific_cool_mod_mam_config()). + mongoose_config_parser_cfg:parse_terms(node_specific_cool_mod_mam_config()). %% Check that we can convert state into node_specific_options list node_specific_options_presents_case(_C) -> - State = mongoose_config_parser:parse_terms(node_specific_cool_mod_mam_config()), + State = mongoose_config_parser_cfg:parse_terms(node_specific_cool_mod_mam_config()), NodeOpts = mongoose_config_parser:state_to_global_opt(node_specific_options, State, missing), ?assertEqual([ [h,'_',module_opt,mod_mam,pool] ], NodeOpts). %% Check that we would not crash if node_specific_options is not defined node_specific_options_missing_case(_C) -> - State = mongoose_config_parser:parse_terms(cool_mod_mam_config()), + State = mongoose_config_parser_cfg:parse_terms(cool_mod_mam_config()), NodeOpts = mongoose_config_parser:state_to_global_opt(node_specific_options, State, missing), ?assertEqual(missing, NodeOpts). @@ -212,11 +212,6 @@ node_specific_cool_mod_mam_config() -> {node_specific_options, [ [h,'_',module_opt,mod_mam,pool] ]}, {modules, [{mod_mam, [{pool, cool_pool}]}]}]. - -terms_to_categorized_options(Terms) -> - State = mongoose_config_parser:parse_terms(Terms), - mongoose_config_reload:state_to_categorized_options(State). - states_to_reloading_context_case(_C) -> Context = mongoose_config_reload:states_to_reloading_context(example_config_states()), % ct:pal("Context ~p", [Context]), @@ -252,42 +247,42 @@ example_config_states() -> %% node1_config_v1 configuration both in memory and on disc config_node1_config_v1() -> - Terms = node1_config_v1(), + State = mongoose_config_parser_cfg:parse_terms(node1_config_v1()), #{mongoose_node => mim1, config_file => "/etc/mongooseim.cfg", - loaded_categorized_options => terms_to_categorized_options(Terms), - ondisc_config_terms => Terms, + loaded_categorized_options => mongoose_config_reload:state_to_categorized_options(State), + ondisc_config_state => State, missing_files => [], required_files => []}. %% node2_config_v1 configuration both in memory and on disc config_node2_config_v1() -> - Terms = node2_config_v1(), + State = mongoose_config_parser_cfg:parse_terms(node2_config_v1()), #{mongoose_node => mim2, config_file => "/etc/mongooseim.cfg", - loaded_categorized_options => terms_to_categorized_options(Terms), - ondisc_config_terms => Terms, + loaded_categorized_options => mongoose_config_reload:state_to_categorized_options(State), + ondisc_config_state => State, missing_files => [], required_files => []}. %% node1_config_v1 configuration in memory %% node1_config_v2 configuration on disc config_node1_config_v2() -> - Terms_v1 = node1_config_v1(), - Terms_v2 = node1_config_v2(), + State_v1 = mongoose_config_parser_cfg:parse_terms(node1_config_v1()), + State_v2 = mongoose_config_parser_cfg:parse_terms(node1_config_v2()), #{mongoose_node => mim1, config_file => "/etc/mongooseim.cfg", - loaded_categorized_options => terms_to_categorized_options(Terms_v1), - ondisc_config_terms => Terms_v2, + loaded_categorized_options => mongoose_config_reload:state_to_categorized_options(State_v1), + ondisc_config_state => State_v2, missing_files => [], required_files => []}. %% node2_config_v1 configuration in memory %% node2_config_v2 configuration on disc config_node2_config_v2() -> - Terms_v1 = node2_config_v1(), - Terms_v2 = node2_config_v2(), + State_v1 = mongoose_config_parser_cfg:parse_terms(node2_config_v1()), + State_v2 = mongoose_config_parser_cfg:parse_terms(node2_config_v2()), #{mongoose_node => mim2, config_file => "/etc/mongooseim.cfg", - loaded_categorized_options => terms_to_categorized_options(Terms_v1), - ondisc_config_terms => Terms_v2, + loaded_categorized_options => mongoose_config_reload:state_to_categorized_options(State_v1), + ondisc_config_state => State_v2, missing_files => [], required_files => []}. node1_config_v1() -> @@ -377,11 +372,10 @@ node2_config_v2() -> get_config_diff_case(_C) -> %% Calculate changes to node1 reload_local to transit from v1 to v2 - Terms_v1 = node1_config_v1(), - Terms_v2 = node1_config_v2(), - CatOptions = terms_to_categorized_options(Terms_v1), - State = mongoose_config_parser:parse_terms(Terms_v2), - Diff = mongoose_config_reload:get_config_diff(State, CatOptions), + State_v1 = mongoose_config_parser_cfg:parse_terms(node1_config_v1()), + State_v2 = mongoose_config_parser_cfg:parse_terms(node1_config_v2()), + CatOptions = mongoose_config_reload:state_to_categorized_options(State_v1), + Diff = mongoose_config_reload:get_config_diff(State_v2, CatOptions), #{local_hosts_changes := #{ to_reload := ToReload }} = Diff, [{ {modules,<<"localhost">>}, OldModules, NewModules }] = ToReload, ?assertEqual(<<"secret">>, get_module_opt(mod_mam, password, OldModules)), @@ -401,7 +395,7 @@ config_with_required_files() -> ]. parse_config_with_required_files_case(_C) -> - State = mongoose_config_parser:parse_terms(config_with_required_files()), + State = mongoose_config_parser_cfg:parse_terms(config_with_required_files()), ?assertEqual(["priv/ssl/localhost_server.pem", "priv/ssl/fake_server.pem"], mongoose_config_parser:state_to_required_files(State)), diff --git a/test/revproxy_SUITE_data/mongooseim.norules.cfg b/test/revproxy_SUITE_data/mongooseim.norules.cfg index 72a30b84ab3..131eee10891 100644 --- a/test/revproxy_SUITE_data/mongooseim.norules.cfg +++ b/test/revproxy_SUITE_data/mongooseim.norules.cfg @@ -476,7 +476,7 @@ %% After successful registration, the user receives %% a message with this subject and body. %% - {welcome_message, {""}}, + {welcome_message, {"", ""}}, %% %% When a user registers, send a notification to diff --git a/test/revproxy_SUITE_data/mongooseim.onerule.cfg b/test/revproxy_SUITE_data/mongooseim.onerule.cfg index 36a9ee8252a..6a547b7ea0a 100644 --- a/test/revproxy_SUITE_data/mongooseim.onerule.cfg +++ b/test/revproxy_SUITE_data/mongooseim.onerule.cfg @@ -479,7 +479,7 @@ %% After successful registration, the user receives %% a message with this subject and body. %% - {welcome_message, {""}}, + {welcome_message, {"", ""}}, %% %% When a user registers, send a notification to diff --git a/tools/install b/tools/install index 3f1940ed827..1eb041f03e1 100755 --- a/tools/install +++ b/tools/install @@ -36,7 +36,7 @@ INSTALL_OPTS="-o ${RUNNER_USER} -g ${RUNNER_GROUP}" [ x"$SYSTEM" == x"yes" ] && install $INSTALL_OPTS -m 555 _build/prod/rel/mongooseim/bin/mongooseimctl ${BIN_DIR} [ x"$SYSTEM" == x"yes" ] && install $INSTALL_OPTS -d ${ETC_DIR} [ x"$SYSTEM" == x"yes" ] && install $INSTALL_OPTS -m 664 _build/prod/rel/mongooseim/etc/app.config ${ETC_DIR} -[ x"$SYSTEM" == x"yes" ] && install $INSTALL_OPTS -m 664 _build/prod/rel/mongooseim/etc/mongooseim.cfg ${ETC_DIR} +[ x"$SYSTEM" == x"yes" ] && install $INSTALL_OPTS -m 664 _build/prod/rel/mongooseim/etc/mongooseim.toml ${ETC_DIR} [ x"$SYSTEM" == x"yes" ] && install $INSTALL_OPTS -m 664 _build/prod/rel/mongooseim/etc/vm.args ${ETC_DIR} [ x"$SYSTEM" == x"yes" ] && install $INSTALL_OPTS -m 664 _build/prod/rel/mongooseim/etc/vm.dist.args ${ETC_DIR} install $INSTALL_OPTS -d ${LIB_DIR} @@ -44,7 +44,7 @@ cp -R _build/prod/rel/mongooseim/* ${LIB_DIR} chown -R ${RUNNER_USER}:${RUNNER_GROUP} ${LIB_DIR} [ x"$SYSTEM" == x"yes" ] && { mv ${LIB_DIR}/etc/app.config ${LIB_DIR}/etc/app.config.example - mv ${LIB_DIR}/etc/mongooseim.cfg ${LIB_DIR}/etc/mongooseim.cfg.example + mv ${LIB_DIR}/etc/mongooseim.toml ${LIB_DIR}/etc/mongooseim.toml.example mv ${LIB_DIR}/etc/vm.args ${LIB_DIR}/etc/vm.args.example mv ${LIB_DIR}/etc/vm.dist.args ${LIB_DIR}/etc/vm.dist.args.example } @@ -66,7 +66,7 @@ log () { [ x"$SYSTEM" == x"yes" ] && log ${BIN_DIR}/mongooseimctl [ x"$SYSTEM" == x"yes" ] && log ${ETC_DIR} [ x"$SYSTEM" == x"yes" ] && log ${ETC_DIR}/app.config -[ x"$SYSTEM" == x"yes" ] && log ${ETC_DIR}/mongooseim.cfg +[ x"$SYSTEM" == x"yes" ] && log ${ETC_DIR}/mongooseim.toml [ x"$SYSTEM" == x"yes" ] && log ${ETC_DIR}/vm.args [ x"$SYSTEM" == x"yes" ] && log ${ETC_DIR}/vm.dist.args find ${LIB_DIR} >> $LOG diff --git a/tools/test_runner/apply_templates.erl b/tools/test_runner/apply_templates.erl index 0f357aee8ad..20c6c1134b3 100644 --- a/tools/test_runner/apply_templates.erl +++ b/tools/test_runner/apply_templates.erl @@ -20,8 +20,8 @@ main([NodeAtom, BuildDirAtom]) -> overlay_vars(Node) -> - Vars = consult_map("rel/vars.config"), - NodeVars = consult_map("rel/" ++ atom_to_list(Node) ++ ".vars.config"), + Vars = consult_map("rel/vars-toml.config"), + NodeVars = consult_map("rel/" ++ atom_to_list(Node) ++ ".vars-toml.config"), %% NodeVars overrides Vars maps:merge(Vars, NodeVars). @@ -43,7 +43,8 @@ simple_templates() -> {"rel/files/app.config", "etc/app.config"}, {"rel/files/vm.args", "etc/vm.args"}, {"rel/files/vm.dist.args", "etc/vm.dist.args"}, - {"rel/files/mongooseim.cfg", "etc/mongooseim.cfg"} + {"rel/files/mongooseim.cfg", "etc/mongooseim.cfg"}, + {"rel/files/mongooseim.toml", "etc/mongooseim.toml"} ]. erts_templates(RelDir) ->