From e8b941c69f9414c1d2f533f25a9ced7710f420c6 Mon Sep 17 00:00:00 2001 From: Emmanuel Leblond Date: Tue, 14 Jan 2025 12:06:14 +0100 Subject: [PATCH 1/3] Improve `BackendConfig` documentation and factorize db CLI parameter parsing --- server/parsec/cli/options.py | 90 +++++++++++++++++++++++++++--------- server/parsec/cli/run.py | 6 --- server/parsec/config.py | 39 ++++++++++++---- 3 files changed, 97 insertions(+), 38 deletions(-) diff --git a/server/parsec/cli/options.py b/server/parsec/cli/options.py index 2130e74f0df..2ba95685b37 100644 --- a/server/parsec/cli/options.py +++ b/server/parsec/cli/options.py @@ -182,6 +182,57 @@ def debug_config_options(fn: Callable[P, R]) -> Callable[Concatenate[bool, P], R DEFAULT_DB_MAX_CONNECTIONS = 7 +def _parse_db_param(value: str) -> BaseDatabaseConfig: + if value.upper() == "MOCKED": + return MockedDatabaseConfig() + elif value.startswith("postgresql://") or value.startswith("postgres://"): + return PostgreSQLDatabaseConfig(url=value, min_connections=5, max_connections=7) + else: + raise click.BadParameter(f"Invalid db type `{value}`") + + +def _get_db_min_max_connections_from_raw(opts: Any) -> tuple[int, int] | None: + raw_db_min_connections = opts.get("db_min_connections", DEFAULT_DB_MIN_CONNECTIONS) + try: + db_min_connections = int(raw_db_min_connections) + except (TypeError, ValueError): + return None + + raw_db_max_connections = opts.get("db_max_connections", DEFAULT_DB_MAX_CONNECTIONS) + try: + db_max_connections = int(raw_db_max_connections) + except (TypeError, ValueError): + return None + + return db_min_connections, db_max_connections + + +class DbConfigOption(click.Option): + """ + DB connection configuration also take into account the min / max connections + constraints that are passed as separated options. + """ + + def handle_parse_result( + self, ctx: click.Context, opts: Any, args: list[str] + ) -> tuple[Any, list[str]]: + value, args = super().handle_parse_result(ctx, opts, args) + assert isinstance(value, BaseDatabaseConfig) + + # Given we hook into Click's option parsing, we cannot access the final0 + # parsed value for other options. So instead we have to parse them ourselves ! + match _get_db_min_max_connections_from_raw(opts): + case (db_min_connections, db_max_connections): + value.set_min_max_connections(db_min_connections, db_max_connections) + case None: + # Provided DB min/max connections options are invalid, we don't + # need to do anything then: Click is going to terminate the process + # as soon as it parse the faulty value. + pass + + return value, args + + class DBMaxConnectionsOption(click.Option): """ DB max connection must be superior to min connection, this is was is @@ -194,20 +245,20 @@ def handle_parse_result( value, args = super().handle_parse_result(ctx, opts, args) assert isinstance(value, int) - # We get the raw data for `db_min_connections` here, so we have to - # parse it ourselves - raw_db_min_connections = opts.get("db_min_connections", DEFAULT_DB_MIN_CONNECTIONS) - try: - db_min_connections = int(raw_db_min_connections) - except (TypeError, ValueError): - # Invalid user-provided data, our min vs max check is useless now since - # Click is going to return an error as soon as it parse the min value. - return value, args - - if db_min_connections > value: - raise click.BadParameter( - "'--db-max-connections' must be greater than '--db-min-connections'" - ) + # Given we hook into Click's option parsing, we cannot access the final0 + # parsed value for other options. So instead we have to parse them ourselves ! + match _get_db_min_max_connections_from_raw(opts): + case (db_min_connections, _): + if db_min_connections > value: + raise click.BadParameter( + "'--db-max-connections' must be greater than '--db-min-connections'" + ) + + case None: + # Provided DB min/max connections options are invalid, we don't + # need to do anything then: Click is going to terminate the process + # as soon as it parse the faulty value. + pass return value, args @@ -220,6 +271,7 @@ def db_server_options(fn: Callable[Q, T]) -> Callable[Q, T]: decorators = [ click.option( "--db", + cls=DbConfigOption, required=True, envvar="PARSEC_DB", show_envvar=True, @@ -251,6 +303,7 @@ def db_server_options(fn: Callable[Q, T]) -> Callable[Q, T]: show_default=True, envvar="PARSEC_DB_MAX_CONNECTIONS", show_envvar=True, + type=int, help="Maximum number of connections to the database if using PostgreSQL", ), ] @@ -259,15 +312,6 @@ def db_server_options(fn: Callable[Q, T]) -> Callable[Q, T]: return fn -def _parse_db_param(value: str) -> BaseDatabaseConfig: - if value.upper() == "MOCKED": - return MockedDatabaseConfig() - elif value.startswith("postgresql://") or value.startswith("postgres://"): - return PostgreSQLDatabaseConfig(url=value, min_connections=5, max_connections=7) - else: - raise click.BadParameter(f"Invalid db type `{value}`") - - # Blockstore option diff --git a/server/parsec/cli/run.py b/server/parsec/cli/run.py index 6edb9a4187b..ab17feffe72 100644 --- a/server/parsec/cli/run.py +++ b/server/parsec/cli/run.py @@ -32,7 +32,6 @@ EmailConfig, LogLevel, MockedEmailConfig, - PostgreSQLDatabaseConfig, SmtpEmailConfig, ) from parsec.logging import get_logger @@ -366,11 +365,6 @@ def run_cmd( debug: bool, dev: bool, ) -> None: - # Set min and max connections - if isinstance(db, PostgreSQLDatabaseConfig): - db.min_connections = db_min_connections - db.max_connections = db_max_connections - # Start a local server with cli_exception_handler(debug): diff --git a/server/parsec/config.py b/server/parsec/config.py index 495dc2bcefa..9ab1145e130 100644 --- a/server/parsec/config.py +++ b/server/parsec/config.py @@ -2,6 +2,7 @@ from __future__ import annotations import logging +import math from dataclasses import dataclass, field, fields from enum import Enum from typing import TYPE_CHECKING, Literal @@ -27,6 +28,9 @@ class BaseDatabaseConfig: # Overloaded by children type: Literal["POSTGRESQL", "MOCKED"] + def set_min_max_connections(self, min_connections: int, max_connections: int) -> None: + raise NotImplementedError + def is_mocked(self) -> bool: raise NotImplementedError @@ -39,6 +43,10 @@ class PostgreSQLDatabaseConfig(BaseDatabaseConfig): min_connections: int max_connections: int + def set_min_max_connections(self, min_connections: int, max_connections: int) -> None: + self.min_connections = min_connections + self.max_connections = max_connections + def is_mocked(self) -> bool: return False @@ -53,6 +61,9 @@ def __str__(self) -> str: class MockedDatabaseConfig(BaseDatabaseConfig): type = "MOCKED" + def set_min_max_connections(self, min_connections: int, max_connections: int) -> None: + pass + def is_mocked(self) -> bool: return True @@ -167,20 +178,30 @@ class LogLevel(Enum): CRITICAL = logging.CRITICAL -@dataclass(slots=True) +@dataclass(slots=True, kw_only=True) class BackendConfig: - administration_token: str - + debug: bool db_config: BaseDatabaseConfig - sse_keepalive: float # Set to `math.inf` if disabled - blockstore_config: BaseBlockStoreConfig - email_config: SmtpEmailConfig | MockedEmailConfig - proxy_trusted_addresses: str | None - server_addr: ParsecAddr | None - debug: bool + # URL of the server to use when generating redirect URLs. + # This is currently used for two things: + # - For invitation URL in emails + # - In the redirect API (e.g. `GET /redirect/FOO` -> `302 /FOO`) + server_addr: ParsecAddr + + # Bearer token used to authenticate the administration API + administration_token: str + + # Amount of time (in seconds) before a keep alive message is sent to an SSE + # connection. Set to `math.inf` to disable keep alive messages. + sse_keepalive: float = math.inf + + # Comma separated list of IP Addresses, IP Networks, or literals (e.g. UNIX Socket path) to trust with proxy headers + # Use "*" to trust all proxies. If not provided, the gunicorn/uvicorn `FORWARDED_ALLOW_IPS` + # environment variable is used, defaulting to trusting only localhost if absent. + proxy_trusted_addresses: str | None = None organization_bootstrap_webhook_url: str | None = None organization_spontaneous_bootstrap: bool = False From ecc399994e3e1e4db46c42204e6861d12b495d8b Mon Sep 17 00:00:00 2001 From: Emmanuel Leblond Date: Tue, 14 Jan 2025 11:54:57 +0100 Subject: [PATCH 2/3] Add `verbose` option to `backend_factory` (useful to avoid useless log output when used in short-lived CLI commands) --- server/parsec/backend.py | 7 ++++--- server/parsec/cli/run.py | 2 +- server/parsec/cli/testbed.py | 2 +- server/tests/common/backend.py | 2 +- server/tests/test_cross_server_event.py | 4 ++-- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/server/parsec/backend.py b/server/parsec/backend.py index e2396e06705..c5fbfe24b49 100644 --- a/server/parsec/backend.py +++ b/server/parsec/backend.py @@ -61,10 +61,11 @@ @asynccontextmanager -async def backend_factory(config: BackendConfig) -> AsyncGenerator[Backend, None]: +async def backend_factory(config: BackendConfig, verbose: bool) -> AsyncGenerator[Backend, None]: # Log the server version and the backend configuration - logger.info("Parsec version", version=server_version) - logger.info("Backend configuration", **config.logging_kwargs()) + if verbose: + logger.info("Parsec version", version=server_version) + logger.info("Backend configuration", **config.logging_kwargs()) if config.db_config.is_mocked(): components_factory = mocked_components_factory diff --git a/server/parsec/cli/run.py b/server/parsec/cli/run.py index ab17feffe72..ec009e8d27a 100644 --- a/server/parsec/cli/run.py +++ b/server/parsec/cli/run.py @@ -480,7 +480,7 @@ async def _run_backend( retry_policy.new_attempt() # Run the backend app (and connect to the database) - async with backend_factory(config=app_config) as backend: + async with backend_factory(config=app_config, verbose=True) as backend: # Connection is successful, reset the retry policy retry_policy.success() diff --git a/server/parsec/cli/testbed.py b/server/parsec/cli/testbed.py index fd940f169ee..7070f0332df 100644 --- a/server/parsec/cli/testbed.py +++ b/server/parsec/cli/testbed.py @@ -247,7 +247,7 @@ async def testbed_backend_factory( administration_token="s3cr3t", organization_spontaneous_bootstrap=True, ) - async with backend_factory(config=config) as backend: + async with backend_factory(config=config, verbose=True) as backend: yield TestbedBackend(backend=backend) diff --git a/server/tests/common/backend.py b/server/tests/common/backend.py index fa65f0c07e6..3a55b5f78de 100644 --- a/server/tests/common/backend.py +++ b/server/tests/common/backend.py @@ -65,7 +65,7 @@ async def backend( async def _run_backend(): nonlocal backend - async with backend_factory(config=backend_config) as backend: + async with backend_factory(config=backend_config, verbose=False) as backend: started.set() try: await should_stop.wait() diff --git a/server/tests/test_cross_server_event.py b/server/tests/test_cross_server_event.py index bdbe6012895..372c660f993 100644 --- a/server/tests/test_cross_server_event.py +++ b/server/tests/test_cross_server_event.py @@ -13,8 +13,8 @@ @pytest.mark.postgresql async def test_cross_server_event(backend_config: BackendConfig) -> None: async with ( - backend_factory(config=backend_config) as b1, - backend_factory(config=backend_config) as b2, + backend_factory(config=backend_config, verbose=False) as b1, + backend_factory(config=backend_config, verbose=False) as b2, ): b2_received_events = Queue() From 74b4178ae774573ce3db3de9362c5ee7bccd9280 Mon Sep 17 00:00:00 2001 From: Emmanuel Leblond Date: Tue, 14 Jan 2025 12:11:01 +0100 Subject: [PATCH 3/3] Add sequester-related CLI commands on server --- server/parsec/cli/__init__.py | 25 ++ server/parsec/cli/export.py | 192 +++++++++++++ server/parsec/cli/inspect.py | 212 ++++++++++++++ server/parsec/cli/sequester_create.py | 391 ++++++++++++++++++++++++++ server/parsec/cli/sequester_list.py | 137 +++++++++ server/parsec/cli/sequester_revoke.py | 350 +++++++++++++++++++++++ server/parsec/cli/testbed.py | 10 +- server/parsec/cli/utils.py | 73 ++++- 8 files changed, 1388 insertions(+), 2 deletions(-) create mode 100644 server/parsec/cli/export.py create mode 100644 server/parsec/cli/inspect.py create mode 100644 server/parsec/cli/sequester_create.py create mode 100644 server/parsec/cli/sequester_list.py create mode 100644 server/parsec/cli/sequester_revoke.py diff --git a/server/parsec/cli/__init__.py b/server/parsec/cli/__init__.py index 9dfd95fa3f9..d4b4bdf2c3c 100644 --- a/server/parsec/cli/__init__.py +++ b/server/parsec/cli/__init__.py @@ -7,14 +7,36 @@ import click +from parsec.cli.export import export_realm +from parsec.cli.inspect import human_accesses from parsec.cli.migration import migrate from parsec.cli.options import version_option from parsec.cli.run import run_cmd +from parsec.cli.sequester_create import create_service, generate_service_certificate +from parsec.cli.sequester_list import list_services +from parsec.cli.sequester_revoke import generate_service_revocation_certificate, revoke_service from parsec.cli.testbed import TESTBED_AVAILABLE, testbed_cmd __all__ = ("cli",) +@click.group( + short_help="Handle sequestered organization", +) +@version_option +def server_sequester_cmd() -> None: + pass + + +server_sequester_cmd.add_command(list_services, "list_services") +server_sequester_cmd.add_command(generate_service_certificate, "generate_service_certificate") +server_sequester_cmd.add_command(create_service, "create_service") +server_sequester_cmd.add_command( + generate_service_revocation_certificate, "generate_service_revocation_certificate" +) +server_sequester_cmd.add_command(revoke_service, "revoke_service") + + @click.group() @version_option def cli() -> None: @@ -23,6 +45,9 @@ def cli() -> None: cli.add_command(run_cmd, "run") cli.add_command(migrate, "migrate") +cli.add_command(export_realm, "export_realm") +cli.add_command(human_accesses, "human_accesses") +cli.add_command(server_sequester_cmd, "sequester") if TESTBED_AVAILABLE: cli.add_command(testbed_cmd, "testbed") diff --git a/server/parsec/cli/export.py b/server/parsec/cli/export.py new file mode 100644 index 00000000000..389cf9e949a --- /dev/null +++ b/server/parsec/cli/export.py @@ -0,0 +1,192 @@ +# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS +from __future__ import annotations + +import asyncio +from pathlib import Path +from typing import Any + +import click + +from parsec._parsec import ( + DateTime, + OrganizationID, + VlobID, +) +from parsec.cli.options import ( + blockstore_server_options, + db_server_options, + debug_config_options, + logging_config_options, +) +from parsec.cli.testbed import if_testbed_available +from parsec.cli.utils import cli_exception_handler, start_backend +from parsec.config import ( + BaseBlockStoreConfig, + BaseDatabaseConfig, + LogLevel, +) +from parsec.realm_export import ExportProgressStep, get_earliest_allowed_snapshot_timestamp +from parsec.realm_export import export_realm as do_export_realm + + +class DevOption(click.Option): + def handle_parse_result( + self, ctx: click.Context, opts: Any, args: list[str] + ) -> tuple[Any, list[str]]: + value, args = super().handle_parse_result(ctx, opts, args) + if value: + for key, value in ( + ("debug", True), + ("db", "MOCKED"), + ("blockstore", ("MOCKED",)), + ("with_testbed", "workspace_history"), + ("organization", "WorkspaceHistoryOrgTemplate"), + ("realm", "f0000000000000000000000000000008"), + ): + if key not in opts: + opts[key] = value + + return value, args + + +@click.command( + short_help="Export the content of a realm in order to consult it with a sequester service key" +) +@click.argument("output", type=Path, required=False) +@click.option("--organization", type=OrganizationID, required=True) +@click.option("--realm", type=VlobID.from_hex, required=True) +@click.option("--snapshot-timestamp", type=DateTime.from_rfc3339) +@db_server_options +@blockstore_server_options +# Add --log-level/--log-format/--log-file +@logging_config_options(default_log_level="INFO") +# Add --debug & --version +@debug_config_options +@if_testbed_available( + click.option("--with-testbed", help="Start by populating with a testbed template") +) +@if_testbed_available( + click.option( + "--dev", + cls=DevOption, + is_flag=True, + is_eager=True, + help=( + "Equivalent to `--debug --db=MOCKED --blockstore=MOCKED" + " --with-testbed=workspace_history --organization WorkspaceHistoryOrgTemplate --realm f0000000000000000000000000000008`" + ), + ) +) +def export_realm( + organization: OrganizationID, + realm: VlobID, + snapshot_timestamp: DateTime | None, + output: Path, + db: BaseDatabaseConfig, + db_min_connections: int, + db_max_connections: int, + blockstore: BaseBlockStoreConfig, + log_level: LogLevel, + log_format: str, + log_file: str | None, + debug: bool, + with_testbed: str | None = None, + dev: bool = False, +) -> None: + with cli_exception_handler(debug): + asyncio.run( + _export_realm( + debug=debug, + db_config=db, + blockstore_config=blockstore, + organization_id=organization, + realm_id=realm, + snapshot_timestamp=snapshot_timestamp, + output=output, + with_testbed=with_testbed, + ) + ) + + +async def _export_realm( + db_config: BaseDatabaseConfig, + blockstore_config: BaseBlockStoreConfig, + debug: bool, + with_testbed: str | None, + organization_id: OrganizationID, + realm_id: VlobID, + snapshot_timestamp: DateTime | None, + output: Path | None, +): + snapshot_timestamp = snapshot_timestamp or get_earliest_allowed_snapshot_timestamp() + output = output or Path.cwd() + + if output.is_dir(): + # Output is pointing to a directory, use a default name for the database extract + output_db_path = ( + output + / f"parsec-export-{organization_id.str}-realm-{realm_id.hex}-{snapshot_timestamp.to_rfc3339()}.sqlite" + ) + + else: + output_db_path = output + + output_db_display = click.style(str(output_db_path), fg="green") + if output_db_path.exists(): + click.echo( + f"File {output_db_display} already exists, continue the extract from where it was left" + ) + else: + click.echo(f"Creating {output_db_display}") + + click.echo( + f"Use { click.style('^C', fg='yellow') } to stop the export," + " progress won't be lost when restarting the command" + ) + + async with start_backend( + db_config=db_config, + blockstore_config=blockstore_config, + debug=debug, + populate_with_template=with_testbed, + ) as backend: + with click.progressbar( + length=0, label="Starting", show_pos=True, update_min_steps=0 + ) as bar: + + def _on_progress(step: ExportProgressStep): + match step: + case "certificates_start": + bar.finished = False + bar.label = "1/4 Exporting certificates" + bar.length = 1 + bar.update(0) + case "certificates_done": + bar.update(1) + case ("vlobs", exported, total): + bar.finished = False + bar.label = "2/4 Exporting vlobs" + bar.length = total + bar.pos = exported + bar.update(0) + case ("blocks_metadata", exported, total): + bar.finished = False + bar.label = "3/4 Exporting blocks metadata" + bar.length = total + bar.pos = exported + bar.update(0) + case ("blocks_data", exported, total): + bar.finished = False + bar.label = "4/4 Exporting blocks data" + bar.length = total + bar.pos = exported + bar.update(0) + + await do_export_realm( + backend, + organization_id, + realm_id, + snapshot_timestamp, + output_db_path, + _on_progress, + ) diff --git a/server/parsec/cli/inspect.py b/server/parsec/cli/inspect.py new file mode 100644 index 00000000000..f8a177ce8de --- /dev/null +++ b/server/parsec/cli/inspect.py @@ -0,0 +1,212 @@ +# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS +from __future__ import annotations + +import asyncio +from typing import Any + +import click + +from parsec._parsec import ( + HumanHandle, + OrganizationID, + UserID, + VlobID, +) +from parsec.cli.options import db_server_options, debug_config_options, logging_config_options +from parsec.cli.testbed import if_testbed_available +from parsec.cli.utils import cli_exception_handler, start_backend +from parsec.components.realm import RealmGrantedRole +from parsec.components.user import UserDump +from parsec.config import BaseDatabaseConfig, LogLevel, MockedBlockStoreConfig + + +class DevOption(click.Option): + def handle_parse_result( + self, ctx: click.Context, opts: Any, args: list[str] + ) -> tuple[Any, list[str]]: + value, args = super().handle_parse_result(ctx, opts, args) + if value: + for key, value in ( + ("debug", True), + ("db", "MOCKED"), + ("with_testbed", "coolorg"), + ("organization", "CoolorgOrgTemplate"), + ): + if key not in opts: + opts[key] = value + + return value, args + + +@click.command(short_help="Get information about user&realm accesses") +@click.option("--organization", type=OrganizationID, required=True) +@click.option("--filter", type=str, default="", help="Filter by human handle or user ID") +@db_server_options +# Add --log-level/--log-format/--log-file +@logging_config_options(default_log_level="INFO") +# Add --debug & --version +@debug_config_options +@if_testbed_available( + click.option("--with-testbed", help="Start by populating with a testbed template") +) +@if_testbed_available( + click.option( + "--dev", + cls=DevOption, + is_flag=True, + is_eager=True, + help=( + "Equivalent to `--debug --db=MOCKED --with-testbed=coolorg --organization CoolorgOrgTemplate`" + ), + ) +) +def human_accesses( + filter: str, + organization: OrganizationID, + db: BaseDatabaseConfig, + db_max_connections: int, + db_min_connections: int, + log_level: LogLevel, + log_format: str, + log_file: str | None, + debug: bool, + with_testbed: str | None = None, + dev: bool = False, +) -> None: + with cli_exception_handler(debug): + asyncio.run( + _human_accesses( + debug=debug, + db_config=db, + organization_id=organization, + with_testbed=with_testbed, + user_filter=filter, + ) + ) + + +async def _human_accesses( + db_config: BaseDatabaseConfig, + debug: bool, + with_testbed: str | None, + organization_id: OrganizationID, + user_filter: str, +) -> None: + # Can use a dummy blockstore config since we are not going to query it + blockstore_config = MockedBlockStoreConfig() + + async with start_backend( + db_config=db_config, + blockstore_config=blockstore_config, + debug=debug, + populate_with_template=with_testbed, + ) as backend: + dump = await backend.user.test_dump_current_users(organization_id=organization_id) + users = list(dump.values()) + if user_filter: + # Now is a good time to filter out + filter_split = user_filter.split() + filtered_users = [] + for user in users: + # Note user ID is present twice to handle both compact and dash separated formats + # (i.e. `a11cec00-1000-0000-0000-000000000000` vs `a11cec00100000000000000000000000`). + txt = f"{user.human_handle.str if user.human_handle else ''} {user.user_id.hex} {user.user_id}".lower() + if len([True for sq in filter_split if sq in txt]) == len(filter_split): + filtered_users.append(user) + users = filtered_users + + realms_granted_roles = await backend.realm.dump_realms_granted_roles( + organization_id=organization_id + ) + assert isinstance(realms_granted_roles, list) + per_user_granted_roles: dict[UserID, list[RealmGrantedRole]] = {} + for granted_role in realms_granted_roles: + user_granted_roles = per_user_granted_roles.setdefault(granted_role.user_id, []) + user_granted_roles.append(granted_role) + + humans: dict[HumanHandle, list[tuple[UserDump, dict[VlobID, list[RealmGrantedRole]]]]] = {} + for user in users: + human_users = humans.setdefault(user.human_handle, []) + per_user_per_realm_granted_role: dict[VlobID, list[RealmGrantedRole]] = {} + for granted_role in per_user_granted_roles.get(user.user_id, []): + realm_granted_roles = per_user_per_realm_granted_role.setdefault( + granted_role.realm_id, [] + ) + realm_granted_roles.append(granted_role) + + for realm_granted_roles in per_user_per_realm_granted_role.values(): + realm_granted_roles.sort(key=lambda x: x.granted_on) + + human_users.append((user, per_user_per_realm_granted_role)) + + # Typical output to display: + # + # Found 2 results: + # Human John Doe + # + # User 02e0486752d34d6ab3bf8e0befef1935 (REVOKED) + # 2000-01-01T00:00:00Z: Created with profile STANDARD + # 2000-01-02T00:00:00Z: Updated to profile CONTRIBUTOR + # 2000-12-31T00:00:00Z: Revoked + # + # User 9e082a43b51e44ab9858628bac4a61d9 (ADMIN) + # 2001-01-01T00:00:00Z: Created with profile ADMIN + # + # Realm 8006a491f0704040ae9a197ca7501f71 + # 2001-02-01T00:00:00Z: Access OWNER granted + # 2001-02-02T00:00:00Z: Access removed + # 2001-02-03T00:00:00Z: Access READER granted + # + # Realm 109c48b7c931435c913945f08d23432d + # 2001-02-01T00:00:00Z: Access OWNER granted + # + # Human Jane Doe + # + # User baf59386baf740bba93151cdde1beac8 (OUTSIDER) + # 2000-01-01T00:00:00Z: Created with profile OUTSIDER + # + # Realm 8006a491f0704040ae9a197ca7501f71 + # 2001-02-01T00:00:00Z: Access READER granted + + def _display_user( + user: UserDump, + per_realm_granted_role: dict[VlobID, list[RealmGrantedRole]], + indent: int, + ) -> None: + base_indent = "\t" * indent + display_user = click.style(user.user_id, fg="green") + if not user.revoked_on: + user_info = f"{user.current_profile}" + else: + user_info = "REVOKED" + print(base_indent + f"User {display_user} ({user_info})") + print(base_indent + f"\t{user.created_on}: Created with profile {user.initial_profile}") + + for profile_update in user.profile_updates: + print( + base_indent + f"\t{profile_update[0]}: Updated to profile {profile_update[1]}" + ) + + if user.revoked_on: + print(base_indent + f"\t{user.revoked_on}: Revoked") + + print() + + for realm_id, granted_roles in per_realm_granted_role.items(): + display_realm = click.style(realm_id.hex, fg="yellow") + print(base_indent + f"\tRealm {display_realm}") + for granted_role in granted_roles: + if granted_role.role is None: + display_role = "Access removed" + else: + display_role = f"Access {granted_role.role.str} granted" + print(base_indent + f"\t\t{granted_role.granted_on}: {display_role}") + + print(f"Found {len(humans)} result(s)") + + for human_handle, human_users in humans.items(): + display_human = click.style(human_handle, fg="green") + print(f"Human {display_human}") + for user, per_realm_granted_roles in human_users: + _display_user(user, per_realm_granted_roles, indent=1) + print() diff --git a/server/parsec/cli/sequester_create.py b/server/parsec/cli/sequester_create.py new file mode 100644 index 00000000000..3eaa9437185 --- /dev/null +++ b/server/parsec/cli/sequester_create.py @@ -0,0 +1,391 @@ +# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS +from __future__ import annotations + +import asyncio +import textwrap +from base64 import b64decode, b64encode +from pathlib import Path +from typing import Any + +import click + +from parsec._parsec import ( + CryptoError, + DateTime, + OrganizationID, + SequesterPublicKeyDer, + SequesterServiceCertificate, + SequesterServiceID, + SequesterSigningKeyDer, + SequesterVerifyKeyDer, +) +from parsec.ballpark import RequireGreaterTimestamp +from parsec.cli.options import db_server_options, debug_config_options, logging_config_options +from parsec.cli.testbed import if_testbed_available +from parsec.cli.utils import cli_exception_handler, start_backend +from parsec.components.organization import Organization, OrganizationGetBadOutcome +from parsec.components.sequester import ( + SequesterCreateServiceStoreBadOutcome, + SequesterCreateServiceValidateBadOutcome, + SequesterServiceConfig, + SequesterServiceType, +) +from parsec.config import BaseDatabaseConfig, LogLevel, MockedBlockStoreConfig + +SEQUESTER_SERVICE_CERTIFICATE_PEM_HEADER = "-----BEGIN PARSEC SEQUESTER SERVICE CERTIFICATE-----" +SEQUESTER_SERVICE_CERTIFICATE_PEM_FOOTER = "-----END PARSEC SEQUESTER SERVICE CERTIFICATE-----" + + +def _dump_sequester_service_certificate_pem( + certificate: SequesterServiceCertificate, + authority_signing_key: SequesterSigningKeyDer, +) -> str: + signed = authority_signing_key.sign(certificate.dump()) + return "\n".join( + ( + SEQUESTER_SERVICE_CERTIFICATE_PEM_HEADER, + *textwrap.wrap(b64encode(signed).decode(), width=64), + SEQUESTER_SERVICE_CERTIFICATE_PEM_FOOTER, + "", + ) + ) + + +def _load_sequester_service_certificate_pem( + pem: str, authority_verify_key: SequesterVerifyKeyDer +) -> tuple[SequesterServiceCertificate, bytes]: + err_msg = "Not a valid Parsec sequester service certificate PEM file" + try: + header, *content, footer = pem.strip().splitlines() + except ValueError as exc: + raise ValueError(err_msg) from exc + + if header != SEQUESTER_SERVICE_CERTIFICATE_PEM_HEADER: + raise ValueError(f"{err_msg}: missing `{SEQUESTER_SERVICE_CERTIFICATE_PEM_HEADER}` header") + if footer != SEQUESTER_SERVICE_CERTIFICATE_PEM_FOOTER: + raise ValueError(f"{err_msg}: missing `{SEQUESTER_SERVICE_CERTIFICATE_PEM_FOOTER}` footer") + + try: + signed = b64decode("".join(content)) + return ( + SequesterServiceCertificate.load(authority_verify_key.verify(signed)), + signed, + ) + except (ValueError, CryptoError) as exc: + raise ValueError(f"{err_msg}: invalid body ({exc})") from exc + + +SERVICE_TYPE_CHOICES: dict[str, SequesterServiceType] = { + service.value: service for service in SequesterServiceType +} + + +class GenerateServiceCertificateDevOption(click.Option): + def handle_parse_result( + self, ctx: click.Context, opts: Any, args: list[str] + ) -> tuple[Any, list[str]]: + value, args = super().handle_parse_result(ctx, opts, args) + if value: + import os + import tempfile + + from parsec._parsec import testbed + + template_content = testbed.test_get_testbed_template("sequestered") + assert template_content is not None + event = template_content.events[0] + assert isinstance(event, testbed.TestbedEventBootstrapOrganization) + assert event.sequester_authority_signing_key is not None + + # Note this file is not deleted when the application ends, this is considered + # okay since it is only used for niche testing purpose. + file_fd, file_path = tempfile.mkstemp() + os.write(file_fd, event.sequester_authority_signing_key.dump_pem().encode("utf8")) + os.close(file_fd) + + for key, value in ( + ("debug", True), + ("authority_private_key", file_path), + ): + if key not in opts: + opts[key] = value + + return value, args + + +@click.command( + short_help="Generate a certificate for a new sequester service", + help="""Generate a certificate for a new sequester service. + + A sequester service certificate references a RSA key that will be used + to decrypt all the data encrypted within the sequester organization. + + To be accepted, the RSA key must be signed by the sequester authority + RSA key (configured during organization bootstrap). + """, +) +@click.option("--service-label", type=str, help="New service name", required=True) +@click.option( + "--service-public-key", + help="File containing the service encryption public key used to encrypt data to the sequester service", + type=click.Path(exists=True, file_okay=True, dir_okay=False, path_type=Path), + required=True, +) +@click.option( + "--authority-private-key", + help="File containing the private authority key use. Used to sign the encryption key.", + type=click.Path(exists=True, file_okay=True, dir_okay=False, path_type=Path), + required=True, +) +@click.argument("output", required=False) +# Add --debug & --version +@debug_config_options +@if_testbed_available( + click.option( + "--dev", + cls=GenerateServiceCertificateDevOption, + is_flag=True, + is_eager=True, + help=( + "Equivalent to `--debug --authority-private-key=`" + ), + ) +) +def generate_service_certificate( + service_label: str, + service_public_key: Path, + authority_private_key: Path, + output: str | None, + debug: bool, + dev: bool = False, +) -> None: + output = output or str(Path.cwd()) + + with cli_exception_handler(debug): + # 1) Load key files + + service_key = SequesterPublicKeyDer.load_pem(service_public_key.read_text()) + authority_key = SequesterSigningKeyDer.load_pem(authority_private_key.read_text()) + + # 2) Generate certificate + + service_id = SequesterServiceID.new() + timestamp = DateTime.now() + certificate = SequesterServiceCertificate( + timestamp=timestamp, + service_id=service_id, + service_label=service_label, + encryption_key_der=service_key, + ) + + # 3) Write the certificate as PEM in output file + + pem_content = _dump_sequester_service_certificate_pem( + certificate=certificate, + authority_signing_key=authority_key, + ) + + cooked_output = Path(output) + if cooked_output.is_dir(): + output_file = ( + cooked_output + / f"sequester_service_certificate-{service_id.hex}-{timestamp.to_rfc3339()}.pem" + ) + else: + output_file = cooked_output + output_file.write_bytes(pem_content.encode("utf8")) + + display_service = f"{click.style(service_label, fg='yellow')} (id: {click.style(service_id.hex, fg='yellow')}, timestamp: {click.style(timestamp, fg='yellow')})" + display_file = click.style(output_file, fg="green") + click.echo(f"Sequester service certificate {display_service} exported in {display_file}") + click.echo( + f"Use {click.style('parsec sequester create_service', fg='yellow')} command to add it to an organization" + ) + + +class CreateServiceDevOption(click.Option): + def handle_parse_result( + self, ctx: click.Context, opts: Any, args: list[str] + ) -> tuple[Any, list[str]]: + value, args = super().handle_parse_result(ctx, opts, args) + if value: + for key, value in ( + ("debug", True), + ("db", "MOCKED"), + ("with_testbed", "sequestered"), + ("organization", "SequesteredOrgTemplate"), + ): + if key not in opts: + opts[key] = value + + return value, args + + +@click.command(short_help="Create a new sequester service from its existing certificate") +@click.option( + "--service-certificate", + help="File containing the sequester service certificate (previously generated by `parsec sequester generate_service_certificate` command).", + type=click.File("r", encoding="utf8"), + required=True, + metavar="CERTIFICATE.pem", +) +@click.option( + "--organization", + type=OrganizationID, + help="Organization ID where to register the service", + required=True, +) +# TODO: Webhook sequester service not implemented yet +# @click.option( +# "--service-type", +# type=click.Choice(list(SERVICE_TYPE_CHOICES.keys()), case_sensitive=False), +# default=SequesterServiceType.STORAGE.value, +# help="Service type", +# ) +# @click.option( +# "--webhook-url", +# type=str, +# default=None, +# help="[Service Type webhook only] webhook url used to send encrypted service data", +# ) +@db_server_options +# Add --log-level/--log-format/--log-file +@logging_config_options(default_log_level="INFO") +# Add --debug & --version +@debug_config_options +@if_testbed_available( + click.option("--with-testbed", help="Start by populating with a testbed template") +) +@if_testbed_available( + click.option( + "--dev", + cls=CreateServiceDevOption, + is_flag=True, + is_eager=True, + help=( + "Equivalent to `--debug --db=MOCKED --with-testbed=sequestered --organization SequesteredOrgTemplate`" + ), + ) +) +def create_service( + service_certificate: click.utils.LazyFile, + organization: OrganizationID, + db: BaseDatabaseConfig, + db_max_connections: int, + db_min_connections: int, + # TODO: Webhook sequester service not implemented yet + # service_type: str, + # webhook_url: str | None, + log_level: LogLevel, + log_format: str, + log_file: str | None, + debug: bool, + with_testbed: str | None = None, + dev: bool = False, +) -> None: + with cli_exception_handler(debug): + sequester_service_config = SequesterServiceType.STORAGE + + # TODO: Webhook sequester service not implemented yet + # cooked_service_type: SequesterServiceType = SERVICE_TYPE_CHOICES[service_type] + # # Check service type + # if webhook_url is not None and cooked_service_type != SequesterServiceType.WEBHOOK: + # raise RuntimeError( + # f"Incompatible service type {cooked_service_type} with webhook_url option\nwebhook_url can only be used with {SequesterServiceType.WEBHOOK}." + # ) + # if cooked_service_type == SequesterServiceType.WEBHOOK and not webhook_url: + # raise RuntimeError( + # "Webhook sequester service requires webhook_url argument" + # ) + + service_certificate_pem = service_certificate.read() + + asyncio.run( + _create_service( + db_config=db, + debug=debug, + with_testbed=with_testbed, + organization_id=organization, + service_certificate_pem=service_certificate_pem, + sequester_service_config=sequester_service_config, + ) + ) + click.echo(click.style("Service created", fg="green")) + + +async def _create_service( + db_config: BaseDatabaseConfig, + debug: bool, + with_testbed: str | None, + organization_id: OrganizationID, + service_certificate_pem: str, + sequester_service_config: SequesterServiceConfig, +) -> None: + # Can use a dummy blockstore config since we are not going to query it + blockstore_config = MockedBlockStoreConfig() + + async with start_backend( + db_config=db_config, + blockstore_config=blockstore_config, + debug=debug, + populate_with_template=with_testbed, + ) as backend: + # 1) Retrieve the organization and check it is compatible + + outcome = await backend.organization.get(organization_id) + match outcome: + case Organization() as org: + pass + case OrganizationGetBadOutcome.ORGANIZATION_NOT_FOUND: + raise RuntimeError("Organization doesn't exist") + + if not org.is_bootstrapped: + raise RuntimeError("Organization is not bootstrapped") + + if not org.is_sequestered: + raise RuntimeError("Organization is not sequestered") + + # 2) Validate the certificate + + assert org.sequester_authority_verify_key_der is not None + + ( + service_certificate_cooked, + service_certificate_raw, + ) = _load_sequester_service_certificate_pem( + pem=service_certificate_pem, + authority_verify_key=org.sequester_authority_verify_key_der, + ) + + # 3) Insert the certificate + + outcome = await backend.sequester.create_service( + now=DateTime.now(), + organization_id=organization_id, + service_certificate=service_certificate_raw, + config=sequester_service_config, + ) + match outcome: + case SequesterServiceCertificate(): + pass + + case RequireGreaterTimestamp() as err: + raise RuntimeError( + f"Cannot import certificate since its timestamp (`{service_certificate_cooked.timestamp}`) is older " + f"than the most recent sequester certificate already on the on server (`{err.strictly_greater_than}`)" + ) + + case SequesterCreateServiceStoreBadOutcome.SEQUESTER_SERVICE_ALREADY_EXISTS: + raise RuntimeError( + f"Sequester service with ID `{service_certificate_cooked.service_id}` already exists" + ) + + case SequesterCreateServiceStoreBadOutcome.ORGANIZATION_NOT_FOUND: + raise RuntimeError("Organization doesn't exist") + + case SequesterCreateServiceStoreBadOutcome.SEQUESTER_DISABLED: + raise RuntimeError("Organization is not sequestered") + + # Should never occur since we have already checked the validity at step 2 + case SequesterCreateServiceValidateBadOutcome.INVALID_CERTIFICATE: + assert False diff --git a/server/parsec/cli/sequester_list.py b/server/parsec/cli/sequester_list.py new file mode 100644 index 00000000000..751162dfbb8 --- /dev/null +++ b/server/parsec/cli/sequester_list.py @@ -0,0 +1,137 @@ +# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS +from __future__ import annotations + +import asyncio +from typing import Any + +import click + +from parsec._parsec import ( + OrganizationID, +) +from parsec.cli.options import db_server_options, debug_config_options, logging_config_options +from parsec.cli.testbed import if_testbed_available +from parsec.cli.utils import cli_exception_handler, start_backend +from parsec.components.sequester import ( + BaseSequesterService, + SequesterGetOrganizationServicesBadOutcome, + WebhookSequesterService, +) +from parsec.config import BaseDatabaseConfig, LogLevel, MockedBlockStoreConfig + + +class DevOption(click.Option): + def handle_parse_result( + self, ctx: click.Context, opts: Any, args: list[str] + ) -> tuple[Any, list[str]]: + value, args = super().handle_parse_result(ctx, opts, args) + if value: + for key, value in ( + ("debug", True), + ("db", "MOCKED"), + ("with_testbed", "sequestered"), + ("organization", "SequesteredOrgTemplate"), + ): + if key not in opts: + opts[key] = value + + return value, args + + +@click.command(short_help="List sequester services in a given organization") +@click.option("--organization", type=OrganizationID, help="Organization ID", required=True) +@db_server_options +# Add --log-level/--log-format/--log-file +@logging_config_options(default_log_level="INFO") +# Add --debug & --version +@debug_config_options +@if_testbed_available( + click.option("--with-testbed", help="Start by populating with a testbed template") +) +@if_testbed_available( + click.option( + "--dev", + cls=DevOption, + is_flag=True, + is_eager=True, + help=( + "Equivalent to `--debug --db=MOCKED --with-testbed=sequestered --organization SequesteredOrgTemplate`" + ), + ) +) +def list_services( + organization: OrganizationID, + db: BaseDatabaseConfig, + db_max_connections: int, + db_min_connections: int, + log_level: LogLevel, + log_format: str, + log_file: str | None, + debug: bool, + with_testbed: str | None = None, + dev: bool = False, +) -> None: + with cli_exception_handler(debug): + asyncio.run( + _list_services( + db_config=db, + debug=debug, + with_testbed=with_testbed, + organization_id=organization, + ) + ) + + +def _display_service(service: BaseSequesterService) -> None: + display_service_id = click.style(service.service_id.hex, fg="yellow") + display_service_label = click.style(service.service_label, fg="green") + click.echo(f"Service {display_service_label} (id: {display_service_id})") + click.echo(f"\tCreated on: {service.created_on}") + click.echo(f"\tService type: {service.service_type}") + if isinstance(service, WebhookSequesterService): + click.echo(f"\tWebhook endpoint URL {service.webhook_url}") + if service.is_revoked: + display_revoked = click.style("Revoked", fg="red") + click.echo(f"\t{display_revoked} on: {service.revoked_on}") + + +async def _list_services( + db_config: BaseDatabaseConfig, + debug: bool, + with_testbed: str | None, + organization_id: OrganizationID, +) -> None: + # Can use a dummy blockstore config since we are not going to query it + blockstore_config = MockedBlockStoreConfig() + + async with start_backend( + db_config=db_config, + blockstore_config=blockstore_config, + debug=debug, + populate_with_template=with_testbed, + ) as backend: + # 1) Retrieve the organization and check it is compatible + + outcome = await backend.sequester.get_organization_services(organization_id=organization_id) + match outcome: + case list() as services: + pass + case SequesterGetOrganizationServicesBadOutcome.ORGANIZATION_NOT_FOUND: + raise RuntimeError("Organization doesn't exist") + case SequesterGetOrganizationServicesBadOutcome.SEQUESTER_DISABLED: + raise RuntimeError("Organization is not sequestered") + + display_services_count = click.style(len(services), fg="green") + click.echo(f"Found {display_services_count} sequester service(s)") + + # Display active services first, and order them by creation date + + active = (service for service in services if not service.is_revoked) + for service in sorted(active, key=lambda s: s.created_on): + print() + _display_service(service) + + revoked = (service for service in services if service.is_revoked) + for service in sorted(revoked, key=lambda s: s.created_on): + print() + _display_service(service) diff --git a/server/parsec/cli/sequester_revoke.py b/server/parsec/cli/sequester_revoke.py new file mode 100644 index 00000000000..904aaa24aa9 --- /dev/null +++ b/server/parsec/cli/sequester_revoke.py @@ -0,0 +1,350 @@ +# Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS +from __future__ import annotations + +import asyncio +import textwrap +from base64 import b64decode, b64encode +from pathlib import Path +from typing import Any + +import click + +from parsec._parsec import ( + CryptoError, + DateTime, + OrganizationID, + SequesterRevokedServiceCertificate, + SequesterServiceID, + SequesterSigningKeyDer, + SequesterVerifyKeyDer, +) +from parsec.ballpark import RequireGreaterTimestamp +from parsec.cli.options import db_server_options, debug_config_options, logging_config_options +from parsec.cli.testbed import if_testbed_available +from parsec.cli.utils import cli_exception_handler, start_backend +from parsec.components.organization import Organization, OrganizationGetBadOutcome +from parsec.components.sequester import ( + SequesterRevokeServiceStoreBadOutcome, + SequesterRevokeServiceValidateBadOutcome, +) +from parsec.config import BaseDatabaseConfig, LogLevel, MockedBlockStoreConfig + +SEQUESTER_SERVICE_REVOCATION_CERTIFICATE_PEM_HEADER = ( + "-----BEGIN PARSEC SEQUESTER SERVICE REVOCATION CERTIFICATE-----" +) +SEQUESTER_SERVICE_REVOCATION_CERTIFICATE_PEM_FOOTER = ( + "-----END PARSEC SEQUESTER SERVICE REVOCATION CERTIFICATE-----" +) + + +def _dump_sequester_service_revocation_certificate_pem( + certificate: SequesterRevokedServiceCertificate, + authority_signing_key: SequesterSigningKeyDer, +) -> str: + signed = authority_signing_key.sign(certificate.dump()) + return "\n".join( + ( + SEQUESTER_SERVICE_REVOCATION_CERTIFICATE_PEM_HEADER, + *textwrap.wrap(b64encode(signed).decode(), width=64), + SEQUESTER_SERVICE_REVOCATION_CERTIFICATE_PEM_FOOTER, + "", + ) + ) + + +def _load_sequester_service_revocation_certificate_pem( + pem: str, authority_verify_key: SequesterVerifyKeyDer +) -> tuple[SequesterRevokedServiceCertificate, bytes]: + err_msg = "Not a valid Parsec sequester service revocation certificate PEM file" + try: + header, *content, footer = pem.strip().splitlines() + except ValueError as exc: + raise ValueError(err_msg) from exc + + if header != SEQUESTER_SERVICE_REVOCATION_CERTIFICATE_PEM_HEADER: + raise ValueError( + f"{err_msg}: missing `{SEQUESTER_SERVICE_REVOCATION_CERTIFICATE_PEM_HEADER}` header" + ) + if footer != SEQUESTER_SERVICE_REVOCATION_CERTIFICATE_PEM_FOOTER: + raise ValueError( + f"{err_msg}: missing `{SEQUESTER_SERVICE_REVOCATION_CERTIFICATE_PEM_FOOTER}` footer" + ) + + try: + signed = b64decode("".join(content)) + return ( + SequesterRevokedServiceCertificate.load(authority_verify_key.verify(signed)), + signed, + ) + except (ValueError, CryptoError) as exc: + raise ValueError(f"{err_msg}: invalid body ({exc})") from exc + + +class SequesterBackendCliError(Exception): + pass + + +class GenerateServiceRevocationCertificateDevOption(click.Option): + def handle_parse_result( + self, ctx: click.Context, opts: Any, args: list[str] + ) -> tuple[Any, list[str]]: + value, args = super().handle_parse_result(ctx, opts, args) + if value: + import os + import tempfile + + from parsec._parsec import testbed + + template_content = testbed.test_get_testbed_template("sequestered") + assert template_content is not None + event = template_content.events[0] + assert isinstance(event, testbed.TestbedEventBootstrapOrganization) + assert event.sequester_authority_signing_key is not None + + # Note this file is not deleted when the application ends, this is considered + # okay since it is only used for niche testing purpose. + file_fd, file_path = tempfile.mkstemp() + os.write(file_fd, event.sequester_authority_signing_key.dump_pem().encode("utf8")) + os.close(file_fd) + + for key, value in ( + ("debug", True), + ("authority_private_key", file_path), + ): + if key not in opts: + opts[key] = value + + return value, args + + +@click.command(short_help="Generate a certificate for a new sequester service") +@click.option( + "--service-id", + type=SequesterServiceID.from_hex, + required=True, +) +@click.option( + "--authority-private-key", + help="File containing the private authority key use. Used to sign the encryption key.", + type=click.Path(exists=True, file_okay=True, dir_okay=False, path_type=Path), + required=True, +) +@click.argument("output", required=False) +# Add --debug & --version +@debug_config_options +@if_testbed_available( + click.option( + "--dev", + cls=GenerateServiceRevocationCertificateDevOption, + is_flag=True, + is_eager=True, + help=( + "Equivalent to `--debug --authority-private-key=`" + ), + ) +) +def generate_service_revocation_certificate( + service_id: SequesterServiceID, + authority_private_key: Path, + output: str | None, + debug: bool, + dev: bool = False, +) -> None: + output = output or str(Path.cwd()) + + with cli_exception_handler(debug): + # 1) Load key files + + authority_key = SequesterSigningKeyDer.load_pem(authority_private_key.read_text()) + + # 2) Generate certificate + + timestamp = DateTime.now() + certificate = SequesterRevokedServiceCertificate( + timestamp=timestamp, + service_id=service_id, + ) + + # 3) Write the certificate as PEM in output file + + pem_content = _dump_sequester_service_revocation_certificate_pem( + certificate=certificate, + authority_signing_key=authority_key, + ) + + cooked_output = Path(output) + if cooked_output.is_dir(): + output_file = ( + cooked_output + / f"sequester_service_revocation_certificate-{service_id.hex}-{timestamp.to_rfc3339()}.pem" + ) + else: + output_file = cooked_output + output_file.write_bytes(pem_content.encode("utf8")) + + display_service = f"(sequester service ID: {click.style(service_id.hex, fg='yellow')}, timestamp: {click.style(timestamp, fg='yellow')})" + display_file = click.style(output_file, fg="green") + click.echo( + f"Sequester service revocation certificate {display_service} exported in {display_file}" + ) + click.echo( + f"Use {click.style('parsec sequester revoke_service', fg='yellow')} command to add it to an organization" + ) + + +class RevokeServiceDevOption(click.Option): + def handle_parse_result( + self, ctx: click.Context, opts: Any, args: list[str] + ) -> tuple[Any, list[str]]: + value, args = super().handle_parse_result(ctx, opts, args) + if value: + for key, value in ( + ("debug", True), + ("db", "MOCKED"), + ("with_testbed", "sequestered"), + ("organization", "SequesteredOrgTemplate"), + ): + if key not in opts: + opts[key] = value + + return value, args + + +@click.command(short_help="Create a new sequester service from its existing certificate") +@click.option( + "--service-revocation-certificate", + help="File containing the sequester service certificate (previously generated by `parsec sequester generate_service_revocation_certificate` command).", + type=click.File("r", encoding="utf8"), + required=True, + metavar="CERTIFICATE.pem", +) +@click.option( + "--organization", + type=OrganizationID, + help="Organization ID where to register the service", + required=True, +) +@db_server_options +# Add --log-level/--log-format/--log-file +@logging_config_options(default_log_level="INFO") +# Add --debug & --version +@debug_config_options +@if_testbed_available( + click.option("--with-testbed", help="Start by populating with a testbed template") +) +@if_testbed_available( + click.option( + "--dev", + cls=RevokeServiceDevOption, + is_flag=True, + is_eager=True, + help=( + "Equivalent to `--debug --db=MOCKED --with-testbed=sequestered --organization SequesteredOrgTemplate`" + ), + ) +) +def revoke_service( + service_revocation_certificate: click.utils.LazyFile, + organization: OrganizationID, + db: BaseDatabaseConfig, + db_max_connections: int, + db_min_connections: int, + log_level: LogLevel, + log_format: str, + log_file: str | None, + debug: bool, + with_testbed: str | None = None, + dev: bool = False, +) -> None: + debug = False + with cli_exception_handler(debug): + service_revocation_certificate_pem = service_revocation_certificate.read() + + asyncio.run( + _revoke_service( + db_config=db, + debug=debug, + with_testbed=with_testbed, + organization_id=organization, + service_revocation_certificate_pem=service_revocation_certificate_pem, + ) + ) + click.echo(click.style("Service revoked", fg="green")) + + +async def _revoke_service( + db_config: BaseDatabaseConfig, + debug: bool, + with_testbed: str | None, + organization_id: OrganizationID, + service_revocation_certificate_pem: str, +) -> None: + # Can use a dummy blockstore config since we are not going to query it + blockstore_config = MockedBlockStoreConfig() + + async with start_backend( + db_config=db_config, + blockstore_config=blockstore_config, + debug=debug, + populate_with_template=with_testbed, + ) as backend: + # 1) Retrieve the organization and check it is compatible + + outcome = await backend.organization.get(organization_id) + match outcome: + case Organization() as org: + pass + case OrganizationGetBadOutcome.ORGANIZATION_NOT_FOUND: + raise RuntimeError("Organization doesn't exist") + + if not org.is_bootstrapped: + raise RuntimeError("Organization is not bootstrapped") + + if not org.is_sequestered: + raise RuntimeError("Organization is not sequestered") + + # 2) Validate the certificate + + assert org.sequester_authority_verify_key_der is not None + + ( + revoked_service_cooked, + revoked_service_raw, + ) = _load_sequester_service_revocation_certificate_pem( + pem=service_revocation_certificate_pem, + authority_verify_key=org.sequester_authority_verify_key_der, + ) + + # 3) Insert the certificate + + outcome = await backend.sequester.revoke_service( + now=DateTime.now(), + organization_id=organization_id, + revoked_service_certificate=revoked_service_raw, + ) + + match outcome: + case SequesterRevokedServiceCertificate(): + pass + + case RequireGreaterTimestamp() as err: + raise RuntimeError( + f"Cannot import certificate since its timestamp (`{revoked_service_cooked.timestamp}`) is older " + f"than the most recent sequester certificate already on the on server (`{err.strictly_greater_than}`)" + ) + + case SequesterRevokeServiceStoreBadOutcome.ORGANIZATION_NOT_FOUND: + raise RuntimeError("Organization doesn't exist") + + case SequesterRevokeServiceStoreBadOutcome.SEQUESTER_DISABLED: + raise RuntimeError("Organization is not sequestered") + + case SequesterRevokeServiceStoreBadOutcome.SEQUESTER_SERVICE_NOT_FOUND: + raise RuntimeError("Sequester service not found") + + case SequesterRevokeServiceStoreBadOutcome.SEQUESTER_SERVICE_ALREADY_REVOKED: + raise RuntimeError("Sequester service already revoked") + + # Should never occur since we have already checked the validity at step 2 + case SequesterRevokeServiceValidateBadOutcome.INVALID_CERTIFICATE: + assert False diff --git a/server/parsec/cli/testbed.py b/server/parsec/cli/testbed.py index 7070f0332df..de461fbbf0b 100644 --- a/server/parsec/cli/testbed.py +++ b/server/parsec/cli/testbed.py @@ -5,7 +5,7 @@ import asyncio import tempfile from contextlib import asynccontextmanager -from typing import Any, AsyncIterator, TypeAlias +from typing import Any, AsyncIterator, Callable, TypeAlias import anyio import click @@ -44,6 +44,14 @@ logger: structlog.stdlib.BoundLogger = structlog.get_logger() +# Helper for other CLI commands to add dev-related options +def if_testbed_available[FC](decorator: Callable[[FC], FC]) -> Callable[[FC], FC]: + if TESTBED_AVAILABLE: + return decorator + else: + return lambda f: f + + DEFAULT_PORT = 6770 diff --git a/server/parsec/cli/utils.py b/server/parsec/cli/utils.py index c11205711ff..65a9cbce2af 100644 --- a/server/parsec/cli/utils.py +++ b/server/parsec/cli/utils.py @@ -21,7 +21,17 @@ import anyio import click -from parsec._parsec import DateTime +from parsec._parsec import DateTime, ParsecAddr +from parsec.backend import Backend, backend_factory +from parsec.config import ( + BackendConfig, + BaseBlockStoreConfig, + BaseDatabaseConfig, + SmtpEmailConfig, +) +from parsec.logging import get_logger + +logger = get_logger() class SchemesInternalType(TypedDict): @@ -252,3 +262,64 @@ def convert( py_datetime = py_datetime.replace(tzinfo=datetime.timezone.utc) return DateTime.from_timestamp_micros(int(py_datetime.timestamp() * 1_000_000)) + + +@asynccontextmanager +async def start_backend( + db_config: BaseDatabaseConfig, + blockstore_config: BaseBlockStoreConfig, + debug: bool, + populate_with_template: str | None = None, +): + """ + Start backend for + """ + + class CliBackendConfig(BackendConfig): + __slots__ = () + + @property + def administration_token(self) -> str: # type: ignore[reportIncompatibleVariableOverride] + assert False, "Unused configuration" + + @property + def email_config(self) -> SmtpEmailConfig: # type: ignore[reportIncompatibleVariableOverride] + assert False, "Unused configuration" + + @property + def server_addr(self) -> ParsecAddr: # type: ignore[reportIncompatibleVariableOverride] + assert False, "Unused configuration" + + config = BackendConfig( + debug=debug, + db_config=db_config, + blockstore_config=blockstore_config, + administration_token=None, # type: ignore + email_config=None, # type: ignore + server_addr=None, # type: ignore + ) + # Cannot directly initialize a `CliBackendConfig` since its + # `administration_token`/`email_config`/`server_addr` fields have not setter. + # + # Also note that swapping the class of an existing instance is totally fine + # as long as both classes have the same fields. + config.__class__ = CliBackendConfig + + async with backend_factory(config=config, verbose=False) as backend: + if populate_with_template is not None: + await _populate_backend(backend, populate_with_template) + + yield backend + + +async def _populate_backend(backend: Backend, testbed_template: str) -> None: + from parsec._parsec import testbed + + template_content = testbed.test_get_testbed_template(testbed_template) + if template_content is None: + raise RuntimeError(f"Testbed template `{testbed_template}` not found") + + organization_id = await backend.test_load_template(template_content) + logger.warning( + f"Populating backend with testbed template `{testbed_template}` as organization `{organization_id}`" + )