diff --git a/migrations_lockfile.txt b/migrations_lockfile.txt index 51f9b79236a736..27f992dc08ccfc 100644 --- a/migrations_lockfile.txt +++ b/migrations_lockfile.txt @@ -10,7 +10,7 @@ hybridcloud: 0016_add_control_cacheversion nodestore: 0002_nodestore_no_dictfield remote_subscriptions: 0003_drop_remote_subscription replays: 0004_index_together -sentry: 0776_drop_group_score_in_database +sentry: 0777_add_related_name_to_dashboard_permissions social_auth: 0002_default_auto_field uptime: 0017_unique_on_timeout workflow_engine: 0009_detector_type diff --git a/requirements-base.txt b/requirements-base.txt index 5bc2359f71624d..4d10fffb82e431 100644 --- a/requirements-base.txt +++ b/requirements-base.txt @@ -67,10 +67,10 @@ rfc3986-validator>=0.1.1 sentry-arroyo>=2.16.5 sentry-kafka-schemas>=0.1.111 sentry-ophio==1.0.0 -sentry-protos>=0.1.23 +sentry-protos>=0.1.26 sentry-redis-tools>=0.1.7 sentry-relay>=0.9.2 -sentry-sdk>=2.16.0 +sentry-sdk>=2.17.0 slack-sdk>=3.27.2 snuba-sdk>=3.0.43 simplejson>=3.17.6 diff --git a/requirements-dev-frozen.txt b/requirements-dev-frozen.txt index 71ee219b73115e..276752f845575b 100644 --- a/requirements-dev-frozen.txt +++ b/requirements-dev-frozen.txt @@ -184,10 +184,10 @@ sentry-forked-django-stubs==5.1.0.post2 sentry-forked-djangorestframework-stubs==3.15.1.post2 sentry-kafka-schemas==0.1.111 sentry-ophio==1.0.0 -sentry-protos==0.1.23 +sentry-protos==0.1.26 sentry-redis-tools==0.1.7 sentry-relay==0.9.2 -sentry-sdk==2.16.0 +sentry-sdk==2.17.0 sentry-usage-accountant==0.0.10 simplejson==3.17.6 six==1.16.0 diff --git a/requirements-frozen.txt b/requirements-frozen.txt index 9234f00dfa31fd..7a34edf710bb2a 100644 --- a/requirements-frozen.txt +++ b/requirements-frozen.txt @@ -125,10 +125,10 @@ s3transfer==0.10.0 sentry-arroyo==2.16.5 sentry-kafka-schemas==0.1.111 sentry-ophio==1.0.0 -sentry-protos==0.1.23 +sentry-protos==0.1.26 sentry-redis-tools==0.1.7 sentry-relay==0.9.2 -sentry-sdk==2.16.0 +sentry-sdk==2.17.0 sentry-usage-accountant==0.0.10 simplejson==3.17.6 six==1.16.0 diff --git a/src/flagpole/conditions.py b/src/flagpole/conditions.py index ff3e8c7404cd3e..ac530ffbeb6de8 100644 --- a/src/flagpole/conditions.py +++ b/src/flagpole/conditions.py @@ -20,7 +20,7 @@ class ConditionOperatorKind(str, Enum): """Provided a single value, check if the property (a list) is not included""" EQUALS = "equals" - """Comprare a value to another. Values are compared with types""" + """Compare a value to another. Values are compared with types""" NOT_EQUALS = "not_equals" """Compare a value to not be equal to another. Values are compared with types""" diff --git a/src/sentry/api/endpoints/admin_project_configs.py b/src/sentry/api/endpoints/admin_project_configs.py index a33b9a6d5a2596..3b46ff73c17ac0 100644 --- a/src/sentry/api/endpoints/admin_project_configs.py +++ b/src/sentry/api/endpoints/admin_project_configs.py @@ -46,7 +46,7 @@ def get(self, request: Request) -> Response: else: configs[key] = None - # TODO if we don't think we'll add anything to the endpoint + # TODO: if we don't think we'll add anything to the endpoint # we may as well return just the configs return Response({"configs": configs}, status=200) diff --git a/src/sentry/api/endpoints/organization_access_request_details.py b/src/sentry/api/endpoints/organization_access_request_details.py index 4dfd8c1dda50bc..06e89a732f592e 100644 --- a/src/sentry/api/endpoints/organization_access_request_details.py +++ b/src/sentry/api/endpoints/organization_access_request_details.py @@ -1,3 +1,5 @@ +import logging + from django.db import IntegrityError, router, transaction from rest_framework import serializers from rest_framework.request import Request @@ -11,8 +13,11 @@ from sentry.api.exceptions import ResourceDoesNotExist from sentry.api.serializers import serialize from sentry.models.organizationaccessrequest import OrganizationAccessRequest +from sentry.models.organizationmember import OrganizationMember from sentry.models.organizationmemberteam import OrganizationMemberTeam +logger = logging.getLogger(__name__) + class AccessRequestPermission(OrganizationPermission): scope_map = { @@ -71,8 +76,8 @@ def _can_access(self, request: Request, access_request): def get(self, request: Request, organization) -> Response: """ - Get list of requests to join org/team - + Get a list of requests to join org/team. + If any requests are redundant (user already joined the team), they are not returned. """ if request.access.has_scope("org:write"): access_requests = list( @@ -80,7 +85,7 @@ def get(self, request: Request, organization) -> Response: team__organization=organization, member__user_is_active=True, member__user_id__isnull=False, - ).select_related("team") + ).select_related("team", "member") ) elif request.access.has_scope("team:write") and request.access.team_ids_with_membership: @@ -89,20 +94,28 @@ def get(self, request: Request, organization) -> Response: member__user_is_active=True, member__user_id__isnull=False, team__id__in=request.access.team_ids_with_membership, - ).select_related("team") + ).select_related("team", "member") ) else: # Return empty response if user does not have access return Response([]) - return Response(serialize(access_requests, request.user)) + teams_by_user = OrganizationMember.objects.get_teams_by_user(organization=organization) + + # We omit any requests which are now redundant (i.e. the user joined that team some other way) + valid_access_requests = [ + access_request + for access_request in access_requests + if access_request.member.user_id is not None + and access_request.team_id not in teams_by_user[access_request.member.user_id] + ] + + return Response(serialize(valid_access_requests, request.user)) def put(self, request: Request, organization, request_id) -> Response: """ Approve or deny a request - Approve or deny a request. - {method} {path} """ diff --git a/src/sentry/api/endpoints/organization_details.py b/src/sentry/api/endpoints/organization_details.py index ac9cd0fa5dde6a..258f3202a386f3 100644 --- a/src/sentry/api/endpoints/organization_details.py +++ b/src/sentry/api/endpoints/organization_details.py @@ -62,6 +62,7 @@ SAFE_FIELDS_DEFAULT, SCRAPE_JAVASCRIPT_DEFAULT, SENSITIVE_FIELDS_DEFAULT, + TARGET_SAMPLE_RATE_DEFAULT, UPTIME_AUTODETECTION, ) from sentry.datascrubbing import validate_pii_config_update, validate_pii_selectors @@ -215,6 +216,7 @@ METRICS_ACTIVATE_LAST_FOR_GAUGES_DEFAULT, ), ("uptimeAutodetection", "sentry:uptime_autodetection", bool, UPTIME_AUTODETECTION), + ("targetSampleRate", "sentry:target_sample_rate", float, TARGET_SAMPLE_RATE_DEFAULT), ) DELETION_STATUSES = frozenset( @@ -276,6 +278,7 @@ class OrganizationSerializer(BaseOrganizationSerializer): relayPiiConfig = serializers.CharField(required=False, allow_blank=True, allow_null=True) apdexThreshold = serializers.IntegerField(min_value=1, required=False) uptimeAutodetection = serializers.BooleanField(required=False) + targetSampleRate = serializers.FloatField(required=False) @cached_property def _has_legacy_rate_limits(self): @@ -365,6 +368,25 @@ def validate_projectRateLimit(self, value): ) return value + def validate_targetSampleRate(self, value): + from sentry import features + + organization = self.context["organization"] + request = self.context["request"] + has_dynamic_sampling_custom = features.has( + "organizations:dynamic-sampling-custom", organization, actor=request.user + ) + if not has_dynamic_sampling_custom: + raise serializers.ValidationError( + "Organization does not have the custom dynamic sample rate feature enabled." + ) + + if not 0.0 <= value <= 1.0: + raise serializers.ValidationError( + "The targetSampleRate option must be in the range [0:1]" + ) + return value + def validate(self, attrs): attrs = super().validate(attrs) if attrs.get("avatarType") == "upload": diff --git a/src/sentry/api/endpoints/organization_events_trace.py b/src/sentry/api/endpoints/organization_events_trace.py index d539dc89331fbf..92f9f881500a99 100644 --- a/src/sentry/api/endpoints/organization_events_trace.py +++ b/src/sentry/api/endpoints/organization_events_trace.py @@ -762,7 +762,7 @@ def build_span_query(trace_id: str, spans_params: SnubaParams, query_spans: list sentry_sdk.set_measurement("trace_view.spans.span_minimum", span_minimum) sentry_sdk.set_tag("trace_view.split_by_char.optimization", len(query_spans) > span_minimum) if len(query_spans) > span_minimum: - # TODO because we're not doing an IN on a list of literals, snuba will not optimize the query with the HexInt + # TODO: because we're not doing an IN on a list of literals, snuba will not optimize the query with the HexInt # column processor which means we won't be taking advantage of the span_id index but if we only do this when we # have a lot of query_spans we should have a great performance improvement still once we do that we can simplify # this code and always apply this optimization diff --git a/src/sentry/api/endpoints/organization_events_trends.py b/src/sentry/api/endpoints/organization_events_trends.py index 0d92aa34063208..e2cded11a2ee81 100644 --- a/src/sentry/api/endpoints/organization_events_trends.py +++ b/src/sentry/api/endpoints/organization_events_trends.py @@ -54,7 +54,7 @@ class TrendColumns(TypedDict): TREND_TYPES = [IMPROVED, REGRESSION] -# TODO move this to the builder file and introduce a top-events version instead +# TODO: move this to the builder file and introduce a top-events version instead class TrendQueryBuilder(DiscoverQueryBuilder): def convert_aggregate_filter_to_condition( self, aggregate_filter: AggregateFilter diff --git a/src/sentry/api/endpoints/organization_events_trends_v2.py b/src/sentry/api/endpoints/organization_events_trends_v2.py index e293806d073fa4..95ca60a010f0af 100644 --- a/src/sentry/api/endpoints/organization_events_trends_v2.py +++ b/src/sentry/api/endpoints/organization_events_trends_v2.py @@ -177,7 +177,7 @@ def get_timeseries(top_events, _, rollup, zerofill_results): results[result_key]["data"].append(row) else: discarded += 1 - # TODO filter out entries that don't have transaction or trend_function + # TODO: filter out entries that don't have transaction or trend_function logger.warning( "trends.top-events.timeseries.key-mismatch", extra={ diff --git a/src/sentry/api/endpoints/organization_metrics_tag_details.py b/src/sentry/api/endpoints/organization_metrics_tag_details.py index 58ccb308646115..01b4e3d4dba634 100644 --- a/src/sentry/api/endpoints/organization_metrics_tag_details.py +++ b/src/sentry/api/endpoints/organization_metrics_tag_details.py @@ -41,7 +41,7 @@ def get(self, request: Request, organization: Organization, tag_name: str) -> Re for project in projects ): if len(metric_names) == 1 and metric_names[0].startswith("d:eap"): - # TODO hack for EAP, hardcode some metric names + # TODO: hack for EAP, hardcode some metric names if tag_name == "color": return Response( [ diff --git a/src/sentry/api/endpoints/organization_metrics_tags.py b/src/sentry/api/endpoints/organization_metrics_tags.py index 041eb403727ac8..52d0cde0c3a01d 100644 --- a/src/sentry/api/endpoints/organization_metrics_tags.py +++ b/src/sentry/api/endpoints/organization_metrics_tags.py @@ -58,7 +58,7 @@ def get(self, request: Request, organization: Organization) -> Response: for project in projects ): if metric_name.startswith("d:eap"): - # TODO hack for EAP, return a fixed list + # TODO: hack for EAP, return a fixed list return Response([Tag(key="color"), Tag(key="location")]) try: diff --git a/src/sentry/api/endpoints/seer_rpc.py b/src/sentry/api/endpoints/seer_rpc.py index f468d0f5ae5a38..8ad06295ed6c8a 100644 --- a/src/sentry/api/endpoints/seer_rpc.py +++ b/src/sentry/api/endpoints/seer_rpc.py @@ -17,6 +17,7 @@ from rest_framework.response import Response from sentry_sdk import Scope, capture_exception +from sentry import options from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.authentication import AuthenticationSiloLimit, StandardAuthentication @@ -153,8 +154,9 @@ def get_organization_slug(*, org_id: int) -> dict: def get_organization_autofix_consent(*, org_id: int) -> dict: org: Organization = Organization.objects.get(id=org_id) consent = org.get_option("sentry:gen_ai_consent", False) + github_extension_enabled = org_id in options.get("github-extension.enabled-orgs") return { - "consent": consent, + "consent": consent or github_extension_enabled, } diff --git a/src/sentry/api/helpers/actionable_items_helper.py b/src/sentry/api/helpers/actionable_items_helper.py index fccc127cf20c19..f66d12cb3cff3f 100644 --- a/src/sentry/api/helpers/actionable_items_helper.py +++ b/src/sentry/api/helpers/actionable_items_helper.py @@ -38,6 +38,8 @@ class ActionPriority: EventError.INVALID_ENVIRONMENT: ActionPriority.LOW, EventError.NATIVE_BAD_DSYM: ActionPriority.LOW, EventError.NATIVE_MISSING_DSYM: ActionPriority.LOW, + EventError.NATIVE_INTERNAL_FAILURE: ActionPriority.LOW, + EventError.NATIVE_SYMBOLICATOR_FAILED: ActionPriority.LOW, EventError.NATIVE_MISSING_OPTIONALLY_BUNDLED_DSYM: ActionPriority.LOW, EventError.PAST_TIMESTAMP: ActionPriority.LOW, EventError.PROGUARD_MISSING_LINENO: ActionPriority.LOW, @@ -66,12 +68,10 @@ class ActionPriority: EventError.JS_SCRAPING_DISABLED, EventError.JS_TOO_MANY_REMOTE_SOURCES, EventError.MISSING_ATTRIBUTE, - EventError.NATIVE_INTERNAL_FAILURE, EventError.NATIVE_MISSING_SYMBOL, EventError.NATIVE_MISSING_SYSTEM_DSYM, EventError.NATIVE_NO_CRASHED_THREAD, EventError.NATIVE_SIMULATOR_FRAME, - EventError.NATIVE_SYMBOLICATOR_FAILED, EventError.NATIVE_UNKNOWN_IMAGE, EventError.UNKNOWN_ERROR, EventError.VALUE_TOO_LONG, diff --git a/src/sentry/api/paginator.py b/src/sentry/api/paginator.py index 61684a9161f3e7..173ce25e871486 100644 --- a/src/sentry/api/paginator.py +++ b/src/sentry/api/paginator.py @@ -537,7 +537,7 @@ def get_result(self, limit, cursor=None): prev=Cursor(0, max(0, offset - limit), True, offset > 0), next=Cursor(0, max(0, offset + limit), False, has_more), ) - # TODO use Cursor.value as the `end` argument to data_fn() so that + # TODO: use Cursor.value as the `end` argument to data_fn() so that # subsequent pages returned using these cursors are using the same end # date for queries, this should stop drift from new incoming events. diff --git a/src/sentry/api/serializers/models/dashboard.py b/src/sentry/api/serializers/models/dashboard.py index acb42dc337f451..47c25c81b32756 100644 --- a/src/sentry/api/serializers/models/dashboard.py +++ b/src/sentry/api/serializers/models/dashboard.py @@ -7,6 +7,7 @@ from sentry.api.serializers import Serializer, register, serialize from sentry.constants import ALL_ACCESS_PROJECTS from sentry.models.dashboard import Dashboard +from sentry.models.dashboard_permissions import DashboardPermissions from sentry.models.dashboard_widget import ( DashboardWidget, DashboardWidgetDisplayTypes, @@ -64,6 +65,10 @@ class DashboardWidgetResponse(TypedDict): layout: dict[str, int] +class DashboardPermissionsResponse(TypedDict): + is_creator_only_editable: bool + + @register(DashboardWidget) class DashboardWidgetSerializer(Serializer): def get_attrs(self, item_list, user, **kwargs): @@ -169,6 +174,14 @@ def serialize(self, obj, attrs, user, **kwargs) -> DashboardWidgetQueryResponse: } +@register(DashboardPermissions) +class DashboardPermissionsSerializer(Serializer): + def serialize(self, obj, attrs, user, **kwargs) -> DashboardPermissionsResponse: + return { + "is_creator_only_editable": obj.is_creator_only_editable, + } + + class DashboardListResponse(TypedDict): id: str title: str @@ -259,6 +272,7 @@ class DashboardDetailsResponse(DashboardDetailsResponseOptional): widgets: list[DashboardWidgetResponse] projects: list[int] filters: DashboardFilters + permissions: DashboardPermissionsResponse | None @register(Dashboard) @@ -294,6 +308,7 @@ def serialize(self, obj, attrs, user, **kwargs) -> DashboardDetailsResponse: "widgets": attrs["widgets"], "projects": [project.id for project in obj.projects.all()], "filters": {}, + "permissions": serialize(obj.permissions) if hasattr(obj, "permissions") else None, } if obj.filters is not None: diff --git a/src/sentry/api/serializers/models/organization.py b/src/sentry/api/serializers/models/organization.py index aede5826daeda2..906781cfcee906 100644 --- a/src/sentry/api/serializers/models/organization.py +++ b/src/sentry/api/serializers/models/organization.py @@ -421,7 +421,7 @@ def serialize( class _DetailedOrganizationSerializerResponseOptional(OrganizationSerializerResponse, total=False): - role: Any # TODO replace with enum/literal + role: Any # TODO: replace with enum/literal orgRole: str uptimeAutodetection: bool diff --git a/src/sentry/api/serializers/models/project.py b/src/sentry/api/serializers/models/project.py index 821d3ccf82c0df..3465fa28b34c95 100644 --- a/src/sentry/api/serializers/models/project.py +++ b/src/sentry/api/serializers/models/project.py @@ -275,7 +275,7 @@ class ProjectSerializerResponse(ProjectSerializerBaseResponse): isPublic: bool avatar: SerializedAvatarFields color: str - status: str # TODO enum/literal + status: str # TODO: enum/literal @register(Project) diff --git a/src/sentry/api/serializers/rest_framework/dashboard.py b/src/sentry/api/serializers/rest_framework/dashboard.py index dab9c2048b66c2..67d87c831afa91 100644 --- a/src/sentry/api/serializers/rest_framework/dashboard.py +++ b/src/sentry/api/serializers/rest_framework/dashboard.py @@ -456,6 +456,12 @@ def validate(self, data): return data +class DashboardPermissionsSerializer(CamelSnakeSerializer[Dashboard]): + is_creator_only_editable = serializers.BooleanField( + help_text="Whether the dashboard is editable only by the creator.", + ) + + class DashboardDetailsSerializer(CamelSnakeSerializer[Dashboard]): # Is a string because output serializers also make it a string. id = serializers.CharField(required=False, help_text="A dashboard's unique id.") @@ -494,6 +500,11 @@ class DashboardDetailsSerializer(CamelSnakeSerializer[Dashboard]): help_text="Setting that lets you display saved time range for this dashboard in UTC.", ) validate_id = validate_id + permissions = DashboardPermissionsSerializer( + required=False, + allow_null=True, + help_text="Permissions that restrict users from editing dashboards", + ) def validate_projects(self, projects): from sentry.api.validators import validate_project_ids diff --git a/src/sentry/apidocs/examples/dashboard_examples.py b/src/sentry/apidocs/examples/dashboard_examples.py index d6afb659ef0e29..4ee6029151a05e 100644 --- a/src/sentry/apidocs/examples/dashboard_examples.py +++ b/src/sentry/apidocs/examples/dashboard_examples.py @@ -67,6 +67,7 @@ "projects": [1], "filters": {}, "period": "7d", + "permissions": {"is_creator_only_editable": False}, } DASHBOARDS_OBJECT = [ diff --git a/src/sentry/buffer/redis.py b/src/sentry/buffer/redis.py index 8b19cbf05e9e30..5b0ffbd01aa252 100644 --- a/src/sentry/buffer/redis.py +++ b/src/sentry/buffer/redis.py @@ -34,7 +34,7 @@ # load everywhere _last_validation_log: float | None = None Pipeline = Any -# TODO type Pipeline instead of using Any here +# TODO: type Pipeline instead of using Any here def _get_model_key(model: type[models.Model]) -> str: diff --git a/src/sentry/constants.py b/src/sentry/constants.py index 3ce14a64301cb4..41bee5a4a92240 100644 --- a/src/sentry/constants.py +++ b/src/sentry/constants.py @@ -710,6 +710,7 @@ class InsightModules(Enum): METRICS_ACTIVATE_LAST_FOR_GAUGES_DEFAULT = False DATA_CONSENT_DEFAULT = False UPTIME_AUTODETECTION = True +TARGET_SAMPLE_RATE_DEFAULT = 1.0 # `sentry:events_member_admin` - controls whether the 'member' role gets the event:admin scope EVENTS_MEMBER_ADMIN_DEFAULT = True diff --git a/src/sentry/data_secrecy/api/waive_data_secrecy.py b/src/sentry/data_secrecy/api/waive_data_secrecy.py index 8af1d9bab7e480..c539f80e24a441 100644 --- a/src/sentry/data_secrecy/api/waive_data_secrecy.py +++ b/src/sentry/data_secrecy/api/waive_data_secrecy.py @@ -119,12 +119,12 @@ def put(self, request: Request, organization: Organization): serialize(ds, request.user, DataSecrecyWaiverSerializer()), status=status.HTTP_200_OK ) - def delete(self, request: Request, organization): + def delete(self, request: Request, organization: Organization): """ Reinstates data secrecy for an organization. """ try: - ds = get_object_or_404(DataSecrecyWaiver, organization=organization) + ds = DataSecrecyWaiver.objects.get(organization=organization) ds.delete() self.create_audit_entry( @@ -136,7 +136,7 @@ def delete(self, request: Request, organization): {"detail": "Data secrecy has been reinstated."}, status=status.HTTP_204_NO_CONTENT, ) - except Http404: + except DataSecrecyWaiver.DoesNotExist: return Response( {"detail": "No data secrecy waiver found for this organization."}, status=status.HTTP_404_NOT_FOUND, diff --git a/src/sentry/db/models/fields/node.py b/src/sentry/db/models/fields/node.py index 7e3844319f67ee..c58cad00fbb329 100644 --- a/src/sentry/db/models/fields/node.py +++ b/src/sentry/db/models/fields/node.py @@ -192,7 +192,7 @@ def to_python(self, value): try: value = pickle.loads(decompress(value)) except Exception as e: - # TODO this is a bit dangerous as a failure to read/decode the + # TODO: this is a bit dangerous as a failure to read/decode the # node_id will end up with this record being replaced with an # empty value under a new key, potentially orphaning an # original value in nodestore. OTOH if we can't decode the info diff --git a/src/sentry/deletions/defaults/repository.py b/src/sentry/deletions/defaults/repository.py index 960befb3b93d01..41d2adedc16009 100644 --- a/src/sentry/deletions/defaults/repository.py +++ b/src/sentry/deletions/defaults/repository.py @@ -29,7 +29,7 @@ def get_child_relations(self, instance: Repository) -> list[BaseRelation]: return _get_repository_child_relations(instance) def delete_instance(self, instance: Repository) -> None: - # TODO child_relations should also send pending_delete so we + # TODO: child_relations should also send pending_delete so we # don't have to do this here. pending_delete.send(sender=type(instance), instance=instance, actor=self.get_actor()) diff --git a/src/sentry/dynamic_sampling/rules/biases/custom_rule_bias.py b/src/sentry/dynamic_sampling/rules/biases/custom_rule_bias.py index a42a7fcb388141..fb6d62c29c0af9 100644 --- a/src/sentry/dynamic_sampling/rules/biases/custom_rule_bias.py +++ b/src/sentry/dynamic_sampling/rules/biases/custom_rule_bias.py @@ -15,7 +15,7 @@ class CustomRuleBias(Bias): """ - Boosts at 100% sample rate all the traces that have a replay_id. + Boosts to 100% sample rate all the traces matching an active custom rule. """ def generate_rules(self, project: Project, base_sample_rate: float) -> list[PolymorphicRule]: diff --git a/src/sentry/eventstore/models.py b/src/sentry/eventstore/models.py index b8bda1fc180cf4..6014a7f2ff0884 100644 --- a/src/sentry/eventstore/models.py +++ b/src/sentry/eventstore/models.py @@ -602,7 +602,7 @@ def group_id(self) -> int | None: def group_id(self, value: int | None) -> None: self._group_id = value - # TODO We need a better way to cache these properties. functools + # TODO: We need a better way to cache these properties. functools # doesn't quite do the trick as there is a reference bug with unsaved # models. But the current _group_cache thing is also clunky because these # properties need to be stripped out in __getstate__. diff --git a/src/sentry/features/temporary.py b/src/sentry/features/temporary.py index 9c25551aae218a..c147fe42d052a4 100644 --- a/src/sentry/features/temporary.py +++ b/src/sentry/features/temporary.py @@ -141,8 +141,6 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:gitlab-disable-on-broken", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Allow creating `GroupHashMetadata` records manager.add("organizations:grouphash-metadata-creation", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Allows an org to have a larger set of project ownership rules per project - manager.add("organizations:higher-ownership-limit", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable increased issue_owners rate limit for auto-assignment manager.add("organizations:increased-issue-owners-rate-limit", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Starfish: extract metrics from the spans @@ -216,7 +214,7 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:navigation-sidebar-v2", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:new-page-filter", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, default=True, api_expose=True) # Display warning banner for every event issue alerts - manager.add("organizations:noisy-alert-warning", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + manager.add("organizations:noisy-alert-warning", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True, default=True) # Notify all project members when fallthrough is disabled, instead of just the auto-assignee manager.add("organizations:notification-all-recipients", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Drop obsoleted status changes in occurence consumer @@ -352,7 +350,7 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:project-event-date-limit", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) manager.add("organizations:project-templates", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable the new quick start guide - manager.add("organizations:quick-start-updates", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + manager.add("organizations:quick-start-updates", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable the new Related Events feature manager.add("organizations:related-events", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable related issues feature diff --git a/src/sentry/flags/docs/api.md b/src/sentry/flags/docs/api.md index 5667acfaf81aa7..6a42c59fe9ed9d 100644 --- a/src/sentry/flags/docs/api.md +++ b/src/sentry/flags/docs/api.md @@ -13,6 +13,7 @@ This document is structured by resource with each resource having actions that c ## Flag Logs [/organizations//flags/logs/] - Parameters + - flag (optional, string) - The flag name to filter the result by. Can be specified multiple times. - start (optional, string) - ISO 8601 format (`YYYY-MM-DDTHH:mm:ss.sssZ`) - end (optional, string) - ISO 8601 format. Required if `start` is set. - statsPeriod (optional, string) - A positive integer suffixed with a unit type. diff --git a/src/sentry/flags/endpoints/logs.py b/src/sentry/flags/endpoints/logs.py index 8da767df45f7f9..45dfeea606ec21 100644 --- a/src/sentry/flags/endpoints/logs.py +++ b/src/sentry/flags/endpoints/logs.py @@ -61,6 +61,10 @@ def get(self, request: Request, organization: Organization) -> Response: organization_id=organization.id, ) + flags = request.GET.getlist("flag") + if flags: + queryset = queryset.filter(flag__in=flags) + return self.paginate( request=request, queryset=queryset, diff --git a/src/sentry/incidents/grouptype.py b/src/sentry/incidents/grouptype.py new file mode 100644 index 00000000000000..7a7d0dc6900d52 --- /dev/null +++ b/src/sentry/incidents/grouptype.py @@ -0,0 +1,32 @@ +from dataclasses import dataclass + +from sentry.incidents.utils.types import QuerySubscriptionUpdate +from sentry.issues.grouptype import GroupCategory, GroupType +from sentry.ratelimits.sliding_windows import Quota +from sentry.types.group import PriorityLevel +from sentry.workflow_engine.models import DataPacket +from sentry.workflow_engine.models.detector import DetectorEvaluationResult, DetectorHandler + + +# TODO: This will be a stateful detector when we build that abstraction +class MetricAlertDetectorHandler(DetectorHandler[QuerySubscriptionUpdate]): + def evaluate( + self, data_packet: DataPacket[QuerySubscriptionUpdate] + ) -> list[DetectorEvaluationResult]: + # TODO: Implement + return [] + + +# Example GroupType and detector handler for metric alerts. We don't create these issues yet, but we'll use something +# like these when we're sending issues as alerts +@dataclass(frozen=True) +class MetricAlertFire(GroupType): + type_id = 8001 + slug = "metric_alert_fire" + description = "Metric alert fired" + category = GroupCategory.METRIC_ALERT.value + creation_quota = Quota(3600, 60, 100) + default_priority = PriorityLevel.HIGH + enable_auto_resolve = False + enable_escalation_detection = False + detector_handler = MetricAlertDetectorHandler diff --git a/src/sentry/incidents/logic.py b/src/sentry/incidents/logic.py index 20b2a3a69abbf7..35b68b43422438 100644 --- a/src/sentry/incidents/logic.py +++ b/src/sentry/incidents/logic.py @@ -479,6 +479,7 @@ class AlertRuleNameAlreadyUsedError(Exception): Dataset.Transactions: SnubaQuery.Type.PERFORMANCE, Dataset.PerformanceMetrics: SnubaQuery.Type.PERFORMANCE, Dataset.Metrics: SnubaQuery.Type.CRASH_RATE, + Dataset.EventsAnalyticsPlatform: SnubaQuery.Type.PERFORMANCE, } @@ -578,10 +579,12 @@ def create_alert_rule( resolution = time_window # NOTE: we hardcode seasonality for EA seasonality = AlertRuleSeasonality.AUTO - if not (sensitivity): + if not sensitivity: raise ValidationError("Dynamic alerts require a sensitivity level") if time_window not in DYNAMIC_TIME_WINDOWS: raise ValidationError(INVALID_TIME_WINDOW) + if "is:unresolved" in query: + raise ValidationError("Dynamic alerts do not support 'is:unresolved' queries") else: resolution = get_alert_resolution(time_window, organization) seasonality = None @@ -917,6 +920,8 @@ def update_alert_rule( raise ResourceDoesNotExist( "Your organization does not have access to this feature." ) + if query and "is:unresolved" in query: + raise ValidationError("Dynamic alerts do not support 'is:unresolved' queries") # NOTE: if adding a new metric alert type, take care to check that it's handled here project = projects[0] if projects else alert_rule.projects.get() update_rule_data(alert_rule, project, snuba_query, updated_fields, updated_query_fields) @@ -1841,6 +1846,22 @@ def get_opsgenie_teams(organization_id: int, integration_id: int) -> list[tuple[ "measurements.score.total", ], } +EAP_COLUMNS = [ + "span.duration", + "span.self_time", +] +EAP_FUNCTIONS = [ + "count", + "avg", + "p50", + "p75", + "p90", + "p95", + "p99", + "p100", + "max", + "min", +] def get_column_from_aggregate(aggregate: str, allow_mri: bool) -> str | None: @@ -1853,6 +1874,11 @@ def get_column_from_aggregate(aggregate: str, allow_mri: bool) -> str | None: or match.group("function") in METRICS_LAYER_UNSUPPORTED_TRANSACTION_METRICS_FUNCTIONS ): return None if match.group("columns") == "" else match.group("columns") + + # Skip additional validation for EAP queries. They don't exist in the old logic. + if match and match.group("function") in EAP_FUNCTIONS and match.group("columns") in EAP_COLUMNS: + return match.group("columns") + if allow_mri: mri_column = _get_column_from_aggregate_with_mri(aggregate) # Only if the column was allowed, we return it, otherwise we fallback to the old logic. @@ -1885,7 +1911,9 @@ def _get_column_from_aggregate_with_mri(aggregate: str) -> str | None: return columns -def check_aggregate_column_support(aggregate: str, allow_mri: bool = False) -> bool: +def check_aggregate_column_support( + aggregate: str, allow_mri: bool = False, allow_eap: bool = False +) -> bool: # TODO(ddm): remove `allow_mri` once the experimental feature flag is removed. column = get_column_from_aggregate(aggregate, allow_mri) match = is_function(aggregate) @@ -1900,6 +1928,7 @@ def check_aggregate_column_support(aggregate: str, allow_mri: bool = False) -> b isinstance(function, str) and column in INSIGHTS_FUNCTION_VALID_ARGS_MAP.get(function, []) ) + or (column in EAP_COLUMNS and allow_eap) ) diff --git a/src/sentry/incidents/serializers/__init__.py b/src/sentry/incidents/serializers/__init__.py index 061c29461acd3e..58a4bd86171ef0 100644 --- a/src/sentry/incidents/serializers/__init__.py +++ b/src/sentry/incidents/serializers/__init__.py @@ -26,7 +26,11 @@ } QUERY_TYPE_VALID_DATASETS = { SnubaQuery.Type.ERROR: {Dataset.Events}, - SnubaQuery.Type.PERFORMANCE: {Dataset.Transactions, Dataset.PerformanceMetrics}, + SnubaQuery.Type.PERFORMANCE: { + Dataset.Transactions, + Dataset.PerformanceMetrics, + Dataset.EventsAnalyticsPlatform, + }, SnubaQuery.Type.CRASH_RATE: {Dataset.Metrics}, } diff --git a/src/sentry/incidents/serializers/alert_rule.py b/src/sentry/incidents/serializers/alert_rule.py index f68911f9f0851b..756d74ef08c1d6 100644 --- a/src/sentry/incidents/serializers/alert_rule.py +++ b/src/sentry/incidents/serializers/alert_rule.py @@ -165,11 +165,17 @@ def validate_aggregate(self, aggregate): self.context["organization"], actor=self.context.get("user", None), ) + allow_eap = features.has( + "organizations:alerts-eap", + self.context["organization"], + actor=self.context.get("user", None), + ) try: if not check_aggregate_column_support( aggregate, allow_mri=allow_mri, + allow_eap=allow_eap, ): raise serializers.ValidationError( "Invalid Metric: We do not currently support this field." diff --git a/src/sentry/integrations/gitlab/webhooks.py b/src/sentry/integrations/gitlab/webhooks.py index 063cbe02fa008c..ad3a507153d766 100644 --- a/src/sentry/integrations/gitlab/webhooks.py +++ b/src/sentry/integrations/gitlab/webhooks.py @@ -168,7 +168,7 @@ def __call__( authors = {} - # TODO gitlab only sends a max of 20 commits. If a push contains + # TODO: gitlab only sends a max of 20 commits. If a push contains # more commits they provide a total count and require additional API # requests to fetch the commit details for commit in event.get("commits", []): diff --git a/src/sentry/integrations/messaging/metrics.py b/src/sentry/integrations/messaging/metrics.py index dc498f9ca0df47..00c60571668294 100644 --- a/src/sentry/integrations/messaging/metrics.py +++ b/src/sentry/integrations/messaging/metrics.py @@ -40,6 +40,8 @@ class MessagingInteractionType(Enum): UNFURL_METRIC_ALERTS = "UNFURL_METRIC_ALERTS" UNFURL_DISCOVER = "UNFURL_DISCOVER" + GET_PARENT_NOTIFICATION = "GET_PARENT_NOTIFICATION" + def __str__(self) -> str: return self.value.lower() diff --git a/src/sentry/integrations/metric_alerts.py b/src/sentry/integrations/metric_alerts.py index e311869ea9e088..fb544a6bd9b951 100644 --- a/src/sentry/integrations/metric_alerts.py +++ b/src/sentry/integrations/metric_alerts.py @@ -26,7 +26,6 @@ "percentage(sessions_crashed, sessions)": "% sessions crash free rate", "percentage(users_crashed, users)": "% users crash free rate", } -LOGO_URL = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png")) # These should be the same as the options in the frontend # COMPARISON_DELTA_OPTIONS TEXT_COMPARISON_DELTA = { @@ -39,6 +38,10 @@ } +def logo_url() -> str: + return absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png")) + + def get_metric_count_from_incident(incident: Incident) -> str: """Returns the current or last count of an incident aggregate.""" incident_trigger = ( @@ -144,7 +147,7 @@ def incident_attachment_info( return { "title": title, "text": text, - "logo_url": LOGO_URL, + "logo_url": logo_url(), "status": status, "ts": incident.date_started, "title_link": title_link, @@ -232,7 +235,7 @@ def metric_alert_attachment_info( return { "title": title, "text": text, - "logo_url": LOGO_URL, + "logo_url": logo_url(), "status": status, "date_started": date_started, "last_triggered_date": last_triggered_date, diff --git a/src/sentry/integrations/slack/actions/notification.py b/src/sentry/integrations/slack/actions/notification.py index bb50b9e923ef08..45731bb2f09376 100644 --- a/src/sentry/integrations/slack/actions/notification.py +++ b/src/sentry/integrations/slack/actions/notification.py @@ -10,6 +10,10 @@ from sentry.api.serializers.rest_framework.rule import ACTION_UUID_KEY from sentry.constants import ISSUE_ALERTS_THREAD_DEFAULT from sentry.eventstore.models import GroupEvent +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.integrations.models.integration import Integration from sentry.integrations.repository import get_default_issue_alert_repository from sentry.integrations.repository.base import NotificationMessageValidationError @@ -28,7 +32,9 @@ SLACK_ISSUE_ALERT_SUCCESS_DATADOG_METRIC, ) from sentry.integrations.slack.sdk_client import SlackSdkClient +from sentry.integrations.slack.spec import SlackMessagingSpec from sentry.integrations.slack.utils.channel import SlackChannelIdData, get_channel_id +from sentry.integrations.utils.metrics import EventLifecycle from sentry.models.options.organization_option import OrganizationOption from sentry.models.rule import Rule from sentry.notifications.additional_attachment_manager import get_additional_attachment @@ -122,41 +128,56 @@ def send_notification(event: GroupEvent, futures: Sequence[RuleFuture]) -> None: rule_action_uuid=rule_action_uuid, ) - # We need to search by rule action uuid and rule id, so only search if they exist - reply_broadcast = False - thread_ts = None - if ( - OrganizationOption.objects.get_value( - organization=self.project.organization, - key="sentry:issue_alerts_thread_flag", - default=ISSUE_ALERTS_THREAD_DEFAULT, - ) - and rule_action_uuid - and rule_id - ): - parent_notification_message = None + def get_thread_ts(lifecycle: EventLifecycle) -> str | None: + """Find the thread in which to post this notification as a reply. + + Return None to post the notification as a top-level message. + """ + + # We need to search by rule action uuid and rule id, so only search if they exist + if not ( + rule_action_uuid + and rule_id + and OrganizationOption.objects.get_value( + organization=self.project.organization, + key="sentry:issue_alerts_thread_flag", + default=ISSUE_ALERTS_THREAD_DEFAULT, + ) + ): + return None + try: parent_notification_message = self._repository.get_parent_notification_message( rule_id=rule_id, group_id=event.group.id, rule_action_uuid=rule_action_uuid, ) - except Exception: + except Exception as e: + lifecycle.record_halt(e) + # if there's an error trying to grab a parent notification, don't let that error block this flow # we already log at the repository layer, no need to log again here - pass + return None - if parent_notification_message: - # If a parent notification exists for this rule and action, then we can reply in a thread - # Make sure we track that this reply will be in relation to the parent row - new_notification_message_object.parent_notification_message_id = ( - parent_notification_message.id - ) - # To reply to a thread, use the specific key in the payload as referenced by the docs - # https://api.slack.com/methods/chat.postMessage#arg_thread_ts - thread_ts = parent_notification_message.message_identifier - # If this flow is triggered again for the same issue, we want it to be seen in the main channel - reply_broadcast = True + if parent_notification_message is None: + return None + + # If a parent notification exists for this rule and action, then we can reply in a thread + # Make sure we track that this reply will be in relation to the parent row + new_notification_message_object.parent_notification_message_id = ( + parent_notification_message.id + ) + # To reply to a thread, use the specific key in the payload as referenced by the docs + # https://api.slack.com/methods/chat.postMessage#arg_thread_ts + return parent_notification_message.message_identifier + + with MessagingInteractionEvent( + MessagingInteractionType.GET_PARENT_NOTIFICATION, SlackMessagingSpec() + ).capture() as lifecycle: + thread_ts = get_thread_ts(lifecycle) + + # If this flow is triggered again for the same issue, we want it to be seen in the main channel + reply_broadcast = thread_ts is not None client = SlackSdkClient(integration_id=integration.id) text = str(blocks.get("text")) diff --git a/src/sentry/integrations/slack/message_builder/notifications/rule_save_edit.py b/src/sentry/integrations/slack/message_builder/notifications/rule_save_edit.py index 74c202dde397ec..0241d925a561fe 100644 --- a/src/sentry/integrations/slack/message_builder/notifications/rule_save_edit.py +++ b/src/sentry/integrations/slack/message_builder/notifications/rule_save_edit.py @@ -48,7 +48,7 @@ def build(self) -> SlackBlock: else: rule_text = "*Alert rule updated*\n\n" rule_text += f"{rule_url} in the {project_url} project was recently updated." - # TODO potentially use old name if it's changed? + # TODO: potentially use old name if it's changed? blocks.append(self.get_markdown_block(rule_text)) diff --git a/src/sentry/integrations/utils/metrics.py b/src/sentry/integrations/utils/metrics.py index 1c0f9b00131ea5..83c2bc755017b6 100644 --- a/src/sentry/integrations/utils/metrics.py +++ b/src/sentry/integrations/utils/metrics.py @@ -174,6 +174,23 @@ def record_failure( self._extra.update(extra) self._terminate(EventLifecycleOutcome.FAILURE, exc) + def record_halt(self, exc: BaseException | None = None) -> None: + """Record that the event halted in an ambiguous state. + + This method can be called in response to a sufficiently ambiguous exception + or other error condition, where it may have been caused by a user error or + other expected condition, but there is some substantial chance that it + represents a bug. + + Such cases usually mean that we want to: + (1) document the ambiguity; + (2) monitor it for sudden spikes in frequency; and + (3) investigate whether more detailed error information is available + (but probably later, as a backlog item). + """ + + self._terminate(EventLifecycleOutcome.HALTED, exc) + def __enter__(self) -> Self: if self._state is not None: self._report_flow_error("The lifecycle has already been entered") diff --git a/src/sentry/issues/endpoints/group_notes_details.py b/src/sentry/issues/endpoints/group_notes_details.py index 65fb6012f2eedf..7097802a0f0f05 100644 --- a/src/sentry/issues/endpoints/group_notes_details.py +++ b/src/sentry/issues/endpoints/group_notes_details.py @@ -84,7 +84,7 @@ def put(self, request: Request, group, note_id) -> Response: if serializer.is_valid(): payload = serializer.validated_data - # TODO adding mentions to a note doesn't send notifications. Should it? + # TODO: adding mentions to a note doesn't send notifications. Should it? # Remove mentions as they shouldn't go into the database payload.pop("mentions", []) diff --git a/src/sentry/issues/grouptype.py b/src/sentry/issues/grouptype.py index 1c19909b0a452e..37f8ab41eb16e4 100644 --- a/src/sentry/issues/grouptype.py +++ b/src/sentry/issues/grouptype.py @@ -22,6 +22,7 @@ from sentry.models.organization import Organization from sentry.models.project import Project from sentry.users.models.user import User + from sentry.workflow_engine.models.detector import DetectorHandler import logging logger = logging.getLogger(__name__) @@ -35,6 +36,7 @@ class GroupCategory(Enum): REPLAY = 5 FEEDBACK = 6 UPTIME = 7 + METRIC_ALERT = 8 GROUP_CATEGORIES_CUSTOM_EMAIL = ( @@ -152,8 +154,10 @@ class GroupType: enable_auto_resolve: bool = True # Allow escalation forecasts and detection enable_escalation_detection: bool = True + # Quota around many of these issue types can be created per project in a given time window creation_quota: Quota = Quota(3600, 60, 5) # default 5 per hour, sliding window of 60 seconds notification_config: NotificationConfig = NotificationConfig() + detector_handler: type[DetectorHandler] | None = None def __init_subclass__(cls: type[GroupType], **kwargs: Any) -> None: super().__init_subclass__(**kwargs) diff --git a/src/sentry/issues/run.py b/src/sentry/issues/run.py index 0eac7116a06b15..057a7023436c39 100644 --- a/src/sentry/issues/run.py +++ b/src/sentry/issues/run.py @@ -48,7 +48,7 @@ def __init__( self.pool = MultiprocessingPool(num_processes) self.worker = None - def crate_parallel_worker( + def create_parallel_worker( self, commit: Commit, ) -> ProcessingStrategy[KafkaPayload]: @@ -63,7 +63,7 @@ def crate_parallel_worker( output_block_size=self.output_block_size, ) - def creat_batched_parallel_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: + def create_batched_parallel_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: assert self.worker is not None batch_processor = RunTask( function=functools.partial(process_batch, self.worker), @@ -81,9 +81,9 @@ def create_with_partitions( partitions: Mapping[Partition, int], ) -> ProcessingStrategy[KafkaPayload]: if self.batched: - return self.creat_batched_parallel_worker(commit) + return self.create_batched_parallel_worker(commit) else: - return self.crate_parallel_worker(commit) + return self.create_parallel_worker(commit) def shutdown(self) -> None: if self.pool: diff --git a/src/sentry/mediators/__init__.py b/src/sentry/mediators/__init__.py index df677f82741301..57874a2ed719fd 100644 --- a/src/sentry/mediators/__init__.py +++ b/src/sentry/mediators/__init__.py @@ -1,3 +1,4 @@ from .mediator import Mediator # NOQA from .param import Param # NOQA +from .token_exchange.refresher import Refresher # noqa: F401 from .token_exchange.util import AUTHORIZATION, REFRESH, GrantTypes # noqa: F401 diff --git a/src/sentry/mediators/token_exchange/__init__.py b/src/sentry/mediators/token_exchange/__init__.py index 46a4f7637503ea..84bcc14774369d 100644 --- a/src/sentry/mediators/token_exchange/__init__.py +++ b/src/sentry/mediators/token_exchange/__init__.py @@ -1,2 +1,3 @@ +from .refresher import Refresher # NOQA from .util import AUTHORIZATION, REFRESH, GrantTypes, token_expiration # NOQA from .validator import Validator # NOQA diff --git a/src/sentry/sentry_apps/token_exchange/refresher.py b/src/sentry/mediators/token_exchange/refresher.py similarity index 67% rename from src/sentry/sentry_apps/token_exchange/refresher.py rename to src/sentry/mediators/token_exchange/refresher.py index f6881700eb09ab..08bdb2d0bcd547 100644 --- a/src/sentry/sentry_apps/token_exchange/refresher.py +++ b/src/sentry/mediators/token_exchange/refresher.py @@ -1,10 +1,10 @@ -from dataclasses import dataclass - -from django.db import router, transaction +from django.db import router from django.utils.functional import cached_property from sentry import analytics from sentry.coreapi import APIUnauthorized +from sentry.mediators.mediator import Mediator +from sentry.mediators.param import Param from sentry.mediators.token_exchange.util import token_expiration from sentry.mediators.token_exchange.validator import Validator from sentry.models.apiapplication import ApiApplication @@ -15,39 +15,42 @@ from sentry.users.models.user import User -@dataclass -class Refresher: +class Refresher(Mediator): """ Exchanges a Refresh Token for a new Access Token """ - install: RpcSentryAppInstallation - refresh_token: str - client_id: str - user: User - - def run(self) -> ApiToken: - with transaction.atomic(router.db_for_write(ApiToken)): - self._validate() - self.token.delete() + install = Param(RpcSentryAppInstallation) + refresh_token = Param(str) + client_id = Param(str) + user = Param(User) + using = router.db_for_write(User) - self.record_analytics() + def call(self): + self._validate() + self._delete_token() return self._create_new_token() - def record_analytics(self) -> None: + def record_analytics(self): analytics.record( "sentry_app.token_exchanged", sentry_app_installation_id=self.install.id, exchange_type="refresh", ) - def _validate(self) -> None: + def _validate(self): Validator.run(install=self.install, client_id=self.client_id, user=self.user) + self._validate_token_belongs_to_app() + + def _validate_token_belongs_to_app(self): if self.token.application != self.application: - raise APIUnauthorized("Token does not belong to the application") + raise APIUnauthorized + + def _delete_token(self): + self.token.delete() - def _create_new_token(self) -> ApiToken: + def _create_new_token(self): token = ApiToken.objects.create( user=self.user, application=self.application, @@ -61,22 +64,22 @@ def _create_new_token(self) -> ApiToken: return token @cached_property - def token(self) -> ApiToken: + def token(self): try: return ApiToken.objects.get(refresh_token=self.refresh_token) except ApiToken.DoesNotExist: - raise APIUnauthorized("Token does not exist") + raise APIUnauthorized @cached_property - def application(self) -> ApiApplication: + def application(self): try: return ApiApplication.objects.get(client_id=self.client_id) except ApiApplication.DoesNotExist: - raise APIUnauthorized("Application does not exist") + raise APIUnauthorized @property - def sentry_app(self) -> SentryApp: + def sentry_app(self): try: return self.application.sentry_app except SentryApp.DoesNotExist: - raise APIUnauthorized("Sentry App does not exist") + raise APIUnauthorized diff --git a/src/sentry/migrations/0777_add_related_name_to_dashboard_permissions.py b/src/sentry/migrations/0777_add_related_name_to_dashboard_permissions.py new file mode 100644 index 00000000000000..4617ea31099eaf --- /dev/null +++ b/src/sentry/migrations/0777_add_related_name_to_dashboard_permissions.py @@ -0,0 +1,38 @@ +# Generated by Django 5.1.1 on 2024-10-15 18:09 + +import django.db.models.deletion +from django.db import migrations, models + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0776_drop_group_score_in_database"), + ] + + operations = [ + migrations.AlterField( + model_name="dashboardpermissions", + name="dashboard", + field=models.OneToOneField( + on_delete=django.db.models.deletion.CASCADE, + related_name="permissions", + to="sentry.dashboard", + ), + ), + ] diff --git a/src/sentry/models/dashboard_permissions.py b/src/sentry/models/dashboard_permissions.py index f52646252bc667..56d98180b99630 100644 --- a/src/sentry/models/dashboard_permissions.py +++ b/src/sentry/models/dashboard_permissions.py @@ -16,7 +16,9 @@ class DashboardPermissions(Model): __relocation_scope__ = RelocationScope.Organization is_creator_only_editable = models.BooleanField(default=False) - dashboard = models.OneToOneField("sentry.Dashboard", on_delete=models.CASCADE) + dashboard = models.OneToOneField( + "sentry.Dashboard", on_delete=models.CASCADE, related_name="permissions" + ) class Meta: app_label = "sentry" diff --git a/src/sentry/monitors/processing_errors/manager.py b/src/sentry/monitors/processing_errors/manager.py index 0d6e5c08fdad6c..af4d1e839710ab 100644 --- a/src/sentry/monitors/processing_errors/manager.py +++ b/src/sentry/monitors/processing_errors/manager.py @@ -10,7 +10,7 @@ from redis.client import StrictRedis from rediscluster import RedisCluster -from sentry import analytics, features +from sentry import analytics from sentry.models.organization import Organization from sentry.models.project import Project from sentry.monitors.models import Monitor @@ -180,8 +180,6 @@ def handle_processing_errors(item: CheckinItem, error: ProcessingErrorsException try: project = Project.objects.get_from_cache(id=item.message["project_id"]) organization = Organization.objects.get_from_cache(id=project.organization_id) - if not features.has("organizations:crons-write-user-feedback", organization): - return metrics.incr( "monitors.checkin.handle_processing_error", diff --git a/src/sentry/options/defaults.py b/src/sentry/options/defaults.py index eedde491bb11c9..0c8807fd51fe5e 100644 --- a/src/sentry/options/defaults.py +++ b/src/sentry/options/defaults.py @@ -2793,11 +2793,3 @@ default=True, flags=FLAG_AUTOMATOR_MODIFIABLE, ) - -# TODO: Temporary, to be removed -register( - "split_queue_task_router.enable", - type=Bool, - default=False, - flags=FLAG_AUTOMATOR_MODIFIABLE, -) diff --git a/src/sentry/options/manager.py b/src/sentry/options/manager.py index f8d2b98df824ba..aeaf12ff785948 100644 --- a/src/sentry/options/manager.py +++ b/src/sentry/options/manager.py @@ -298,11 +298,6 @@ def get(self, key: str, silent=False): if not (opt.flags & FLAG_NOSTORE): result = self.store.get(opt, silent=silent) if result is not None: - # HACK(mattrobenolt): SENTRY_URL_PREFIX must be kept in sync - # when reading values from the database. This should - # be replaced by a signal. - if key == "system.url-prefix": - settings.SENTRY_URL_PREFIX = result return result # Some values we don't want to allow them to be configured through diff --git a/src/sentry/replays/usecases/query/conditions/aggregate.py b/src/sentry/replays/usecases/query/conditions/aggregate.py index f5079b059ee923..ccbfd43b899b93 100644 --- a/src/sentry/replays/usecases/query/conditions/aggregate.py +++ b/src/sentry/replays/usecases/query/conditions/aggregate.py @@ -25,7 +25,7 @@ from uuid import UUID -from snuba_sdk import And, Condition, Function, Op, Or +from snuba_sdk import And, Condition, Or from snuba_sdk.expressions import Expression from sentry.replays.lib.new_query.conditions import ( @@ -40,6 +40,14 @@ from sentry.replays.lib.new_query.utils import contains, does_not_contain +def _nonempty_str(expression: Expression) -> Condition: + return StringScalar.visit_neq(expression, "") + + +def _nonnull_ipv4(expression: Expression) -> Condition: + return IPv4Scalar.visit_neq(expression, None) + + class SumOfIntegerIdScalar(GenericBase): @staticmethod def visit_eq(expression: Expression, value: int) -> Condition: @@ -61,65 +69,75 @@ def visit_not_in(expression: Expression, value: list[int]) -> Condition: class SumOfIPv4Scalar(GenericBase): @staticmethod def visit_eq(expression: Expression, value: str | None) -> Condition: + if value is None: + return does_not_contain(_nonnull_ipv4(expression)) return contains(IPv4Scalar.visit_eq(expression, value)) @staticmethod def visit_neq(expression: Expression, value: str | None) -> Condition: + if value is None: + return contains(_nonnull_ipv4(expression)) return does_not_contain(IPv4Scalar.visit_eq(expression, value)) @staticmethod def visit_in(expression: Expression, value_list: list[str | None]) -> Condition: + nonempty_case = contains( + IPv4Scalar.visit_in(expression, [v for v in value_list if v is not None]) + ) if None in value_list: - contains_cond = contains( - IPv4Scalar.visit_in(expression, [v for v in value_list if v is not None]) - ) - return Or( - conditions=[ - contains_cond, - Condition(Function("isNull", parameters=[expression]), Op.EQ, 1), - ] - ) - return contains(IPv4Scalar.visit_in(expression, value_list)) + return Or(conditions=[SumOfIPv4Scalar.visit_eq(expression, None), nonempty_case]) + return nonempty_case @staticmethod def visit_not_in(expression: Expression, value_list: list[str | None]) -> Condition: + nonempty_case = does_not_contain( + IPv4Scalar.visit_in(expression, [v for v in value_list if v is not None]) + ) if None in value_list: - does_not_contain_cond = does_not_contain( - IPv4Scalar.visit_in(expression, [v for v in value_list if v is not None]) - ) - return And( - conditions=[ - does_not_contain_cond, - Condition(Function("isNull", parameters=[expression]), Op.EQ, 0), - ] - ) - return does_not_contain(IPv4Scalar.visit_in(expression, value_list)) + return And(conditions=[SumOfIPv4Scalar.visit_neq(expression, None), nonempty_case]) + return nonempty_case class SumOfStringScalar(GenericBase): @staticmethod def visit_eq(expression: Expression, value: str) -> Condition: + if value == "": + return does_not_contain(_nonempty_str(expression)) return contains(StringScalar.visit_eq(expression, value)) @staticmethod def visit_neq(expression: Expression, value: str) -> Condition: + if value == "": + return contains(_nonempty_str(expression)) return does_not_contain(StringScalar.visit_eq(expression, value)) @staticmethod def visit_match(expression: Expression, value: str) -> Condition: + # Assumes this is only called on wildcard strings, so `value` is non-empty. return contains(StringScalar.visit_match(expression, value)) @staticmethod def visit_not_match(expression: Expression, value: str) -> Condition: + # Assumes this is only called on wildcard strings, so `value` is non-empty. return does_not_contain(StringScalar.visit_match(expression, value)) @staticmethod - def visit_in(expression: Expression, value: list[str]) -> Condition: - return contains(StringScalar.visit_in(expression, value)) + def visit_in(expression: Expression, value_list: list[str]) -> Condition: + nonempty_case = contains( + StringScalar.visit_in(expression, [v for v in value_list if v != ""]) + ) + if "" in value_list: + return Or(conditions=[SumOfStringScalar.visit_eq(expression, ""), nonempty_case]) + return nonempty_case @staticmethod - def visit_not_in(expression: Expression, value: list[str]) -> Condition: - return does_not_contain(StringScalar.visit_in(expression, value)) + def visit_not_in(expression: Expression, value_list: list[str]) -> Condition: + nonempty_case = does_not_contain( + StringScalar.visit_in(expression, [v for v in value_list if v != ""]) + ) + if "" in value_list: + return And(conditions=[SumOfStringScalar.visit_neq(expression, ""), nonempty_case]) + return nonempty_case class SumOfStringArray(GenericBase): diff --git a/src/sentry/replays/usecases/query/configs/scalar.py b/src/sentry/replays/usecases/query/configs/scalar.py index 44bf302ebb8c48..ecacafd47cecf3 100644 --- a/src/sentry/replays/usecases/query/configs/scalar.py +++ b/src/sentry/replays/usecases/query/configs/scalar.py @@ -7,19 +7,13 @@ from sentry.api.event_search import ParenExpression, SearchFilter from sentry.replays.lib.new_query.conditions import ( - IPv4Scalar, NonEmptyStringScalar, StringArray, StringScalar, UUIDArray, ) -from sentry.replays.lib.new_query.fields import ( - FieldProtocol, - NullableStringColumnField, - StringColumnField, - UUIDColumnField, -) -from sentry.replays.lib.new_query.parsers import parse_ipv4, parse_str, parse_uuid +from sentry.replays.lib.new_query.fields import FieldProtocol, StringColumnField, UUIDColumnField +from sentry.replays.lib.new_query.parsers import parse_str, parse_uuid from sentry.replays.lib.selector.parse import parse_selector from sentry.replays.usecases.query.conditions import ( ClickSelectorComposite, @@ -68,10 +62,6 @@ def string_field(column_name: str) -> StringColumnField: "error_ids": ComputedField(parse_uuid, ErrorIdScalar), "trace_ids": UUIDColumnField("trace_ids", parse_uuid, UUIDArray), "urls": StringColumnField("urls", parse_str, StringArray), - "user.email": StringColumnField("user_email", parse_str, NonEmptyStringScalar), - "user.id": StringColumnField("user_id", parse_str, NonEmptyStringScalar), - "user.ip_address": NullableStringColumnField("ip_address_v4", parse_ipv4, IPv4Scalar), - "user.username": StringColumnField("user_name", parse_str, NonEmptyStringScalar), } # Aliases @@ -79,7 +69,6 @@ def string_field(column_name: str) -> StringColumnField: varying_search_config["trace_id"] = varying_search_config["trace_ids"] varying_search_config["trace"] = varying_search_config["trace_ids"] varying_search_config["url"] = varying_search_config["urls"] -varying_search_config["user.ip"] = varying_search_config["user.ip_address"] varying_search_config["*"] = TagField(query=TagScalar) diff --git a/src/sentry/runner/initializer.py b/src/sentry/runner/initializer.py index 9408bd5b5f51a1..d94b8433d1fb06 100644 --- a/src/sentry/runner/initializer.py +++ b/src/sentry/runner/initializer.py @@ -203,15 +203,6 @@ def bootstrap_options(settings: Any, config: str | None = None) -> None: # these will be validated later after bootstrapping for k, v in options.items(): settings.SENTRY_OPTIONS[k] = v - # If SENTRY_URL_PREFIX is used in config, show deprecation warning and - # set the newer SENTRY_OPTIONS['system.url-prefix']. Needs to be here - # to check from the config file directly before the django setup is done. - # TODO: delete when SENTRY_URL_PREFIX is removed - if k == "SENTRY_URL_PREFIX": - warnings.warn( - DeprecatedSettingWarning("SENTRY_URL_PREFIX", "SENTRY_OPTIONS['system.url-prefix']") - ) - settings.SENTRY_OPTIONS["system.url-prefix"] = v # Now go back through all of SENTRY_OPTIONS and promote # back into settings. This catches the case when values are defined @@ -582,13 +573,6 @@ def apply_legacy_settings(settings: Any) -> None: # option.) settings.SENTRY_REDIS_OPTIONS = options.get("redis.clusters")["default"] - if not hasattr(settings, "SENTRY_URL_PREFIX"): - url_prefix = options.get("system.url-prefix", silent=True) - if not url_prefix: - # HACK: We need to have some value here for backwards compatibility - url_prefix = "http://sentry.example.com" - settings.SENTRY_URL_PREFIX = url_prefix - if settings.TIME_ZONE != "UTC": # non-UTC timezones are not supported show_big_error("TIME_ZONE should be set to UTC") diff --git a/src/sentry/search/events/builder/base.py b/src/sentry/search/events/builder/base.py index c1b7c3f899efbe..b02a6cf7b181fe 100644 --- a/src/sentry/search/events/builder/base.py +++ b/src/sentry/search/events/builder/base.py @@ -309,7 +309,7 @@ def resolve_time_conditions(self) -> None: self.end = self.params.end def resolve_column_name(self, col: str) -> str: - # TODO when utils/snuba.py becomes typed don't need this extra annotation + # TODO: when utils/snuba.py becomes typed don't need this extra annotation column_resolver: Callable[[str], str] = resolve_column(self.dataset) column_name = column_resolver(col) # If the original column was passed in as tag[X], then there won't be a conflict diff --git a/src/sentry/search/events/datasets/spans_indexed.py b/src/sentry/search/events/datasets/spans_indexed.py index ac2d7333d7d0f4..a5235437f7f520 100644 --- a/src/sentry/search/events/datasets/spans_indexed.py +++ b/src/sentry/search/events/datasets/spans_indexed.py @@ -565,12 +565,208 @@ def _resolve_span_duration(self, alias: str) -> SelectType: alias, ) + def _resolve_aggregate_if( + self, aggregate: str + ) -> Callable[[Mapping[str, str | Column | SelectType | int | float], str | None], SelectType]: + def extract_attr( + column: str | Column | SelectType | int | float, + ) -> tuple[Column, str] | None: + # This check exists to handle the temporay prefixing. + # Once that's removed, this condition should become much simpler + + if not isinstance(column, Function): + return None + + if column.function != "if": + return None + + if len(column.parameters) != 3: + return None + + if ( + not isinstance(column.parameters[0], Function) + or column.parameters[0].function != "mapContains" + or len(column.parameters[0].parameters) != 2 + ): + return None + + attr_col = column.parameters[0].parameters[0] + attr_name = column.parameters[0].parameters[1] + + if not isinstance(attr_col, Column) or not isinstance(attr_name, str): + return None + + return attr_col, attr_name + + def resolve_aggregate_if( + args: Mapping[str, str | Column | SelectType | int | float], + alias: str | None = None, + ) -> SelectType: + attr = extract_attr(args["column"]) + + # If we're not aggregating on an attr column, + # we can directly aggregate on the column + if attr is None: + return Function( + f"{aggregate}", + [args["column"]], + alias, + ) + + # When aggregating on an attr column, we have to make sure that we skip rows + # where the attr does not exist. + attr_col, attr_name = attr + + function = ( + aggregate.replace("quantile", "quantileIf") + if aggregate.startswith("quantile(") + else f"{aggregate}If" + ) + + unprefixed = Function("mapContains", [attr_col, attr_name]) + prefixed = Function("mapContains", [attr_col, f"sentry.{attr_name}"]) + + return Function( + function, + [ + args["column"], + Function("or", [unprefixed, prefixed]), + ], + alias, + ) + + return resolve_aggregate_if + @property def function_converter(self) -> dict[str, SnQLFunction]: - existing_functions = super().function_converter function_converter = { function.name: function for function in [ + SnQLFunction( + "eps", + snql_aggregate=lambda args, alias: Function( + "divide", [Function("count", []), args["interval"]], alias + ), + optional_args=[IntervalDefault("interval", 1, None)], + default_result_type="rate", + ), + SnQLFunction( + "epm", + snql_aggregate=lambda args, alias: Function( + "divide", + [Function("count", []), Function("divide", [args["interval"], 60])], + alias, + ), + optional_args=[IntervalDefault("interval", 1, None)], + default_result_type="rate", + ), + SnQLFunction( + "count", + optional_args=[ + with_default("span.duration", NumericColumn("column", spans=True)), + ], + snql_aggregate=self._resolve_aggregate_if("count"), + default_result_type="integer", + ), + SnQLFunction( + "count_unique", + required_args=[ColumnTagArg("column")], + snql_aggregate=lambda args, alias: Function("uniq", [args["column"]], alias), + default_result_type="integer", + ), + SnQLFunction( + "sum", + required_args=[NumericColumn("column", spans=True)], + snql_aggregate=self._resolve_aggregate_if("sum"), + result_type_fn=self.reflective_result_type(), + default_result_type="duration", + ), + SnQLFunction( + "avg", + optional_args=[ + with_default("span.duration", NumericColumn("column", spans=True)), + ], + snql_aggregate=self._resolve_aggregate_if("avg"), + result_type_fn=self.reflective_result_type(), + default_result_type="duration", + redundant_grouping=True, + ), + SnQLFunction( + "p50", + optional_args=[ + with_default("span.duration", NumericColumn("column", spans=True)), + ], + snql_aggregate=self._resolve_aggregate_if("quantile(0.5)"), + result_type_fn=self.reflective_result_type(), + default_result_type="duration", + redundant_grouping=True, + ), + SnQLFunction( + "p75", + optional_args=[ + with_default("span.duration", NumericColumn("column", spans=True)), + ], + snql_aggregate=self._resolve_aggregate_if("quantile(0.75)"), + result_type_fn=self.reflective_result_type(), + default_result_type="duration", + redundant_grouping=True, + ), + SnQLFunction( + "p90", + optional_args=[ + with_default("span.duration", NumericColumn("column", spans=True)), + ], + snql_aggregate=self._resolve_aggregate_if("quantile(0.90)"), + result_type_fn=self.reflective_result_type(), + default_result_type="duration", + redundant_grouping=True, + ), + SnQLFunction( + "p95", + optional_args=[ + with_default("span.duration", NumericColumn("column", spans=True)), + ], + snql_aggregate=self._resolve_aggregate_if("quantile(0.95)"), + result_type_fn=self.reflective_result_type(), + default_result_type="duration", + redundant_grouping=True, + ), + SnQLFunction( + "p99", + optional_args=[ + with_default("span.duration", NumericColumn("column", spans=True)), + ], + snql_aggregate=self._resolve_aggregate_if("quantile(0.99)"), + result_type_fn=self.reflective_result_type(), + default_result_type="duration", + redundant_grouping=True, + ), + SnQLFunction( + "p100", + optional_args=[ + with_default("span.duration", NumericColumn("column", spans=True)), + ], + snql_aggregate=self._resolve_aggregate_if("max"), + result_type_fn=self.reflective_result_type(), + default_result_type="duration", + redundant_grouping=True, + ), + SnQLFunction( + "min", + required_args=[NumericColumn("column", spans=True)], + snql_aggregate=self._resolve_aggregate_if("min"), + result_type_fn=self.reflective_result_type(), + default_result_type="duration", + redundant_grouping=True, + ), + SnQLFunction( + "max", + required_args=[NumericColumn("column", spans=True)], + snql_aggregate=self._resolve_aggregate_if("max"), + result_type_fn=self.reflective_result_type(), + default_result_type="duration", + redundant_grouping=True, + ), SnQLFunction( "count_weighted", optional_args=[NullColumn("column")], @@ -722,8 +918,11 @@ def function_converter(self) -> dict[str, SnQLFunction]: ] } - existing_functions.update(function_converter) - return existing_functions + for alias, name in constants.SPAN_FUNCTION_ALIASES.items(): + if name in function_converter: + function_converter[alias] = function_converter[name].alias_as(alias) + + return function_converter def _resolve_sum_weighted( self, diff --git a/src/sentry/search/snuba/executors.py b/src/sentry/search/snuba/executors.py index 7c3e1e9017ea8f..8f7fe5086eab0b 100644 --- a/src/sentry/search/snuba/executors.py +++ b/src/sentry/search/snuba/executors.py @@ -1027,7 +1027,7 @@ def query( # * we started with Postgres candidates and so only do one Snuba query max # * the paginator is returning enough results to satisfy the query (>= the limit) # * there are no more groups in Snuba to post-filter - # TODO do we actually have to rebuild this SequencePaginator every time + # TODO: do we actually have to rebuild this SequencePaginator every time # or can we just make it after we've broken out of the loop? paginator_results = SequencePaginator( [(score, id) for (id, score) in result_groups], reverse=True, **paginator_options diff --git a/src/sentry/seer/anomaly_detection/get_anomaly_data.py b/src/sentry/seer/anomaly_detection/get_anomaly_data.py index db1e0d71b3c9d1..ab28bedf393723 100644 --- a/src/sentry/seer/anomaly_detection/get_anomaly_data.py +++ b/src/sentry/seer/anomaly_detection/get_anomaly_data.py @@ -39,10 +39,10 @@ def get_anomaly_data_from_seer( # XXX: we know we have these things because the serializer makes sure we do, but mypy insists if ( - not snuba_query.time_window + alert_rule.threshold_type is None or not alert_rule.sensitivity - or not alert_rule.threshold_type or not alert_rule.seasonality + or not snuba_query.time_window ): return None diff --git a/src/sentry/sentry_apps/api/endpoints/sentry_app_authorizations.py b/src/sentry/sentry_apps/api/endpoints/sentry_app_authorizations.py index 1ff39ed50c16f4..d705b2bd5a71e3 100644 --- a/src/sentry/sentry_apps/api/endpoints/sentry_app_authorizations.py +++ b/src/sentry/sentry_apps/api/endpoints/sentry_app_authorizations.py @@ -10,10 +10,10 @@ from sentry.api.serializers.models.apitoken import ApiTokenSerializer from sentry.auth.services.auth.impl import promote_request_api_user from sentry.coreapi import APIUnauthorized +from sentry.mediators.token_exchange.refresher import Refresher from sentry.mediators.token_exchange.util import GrantTypes from sentry.sentry_apps.api.bases.sentryapps import SentryAppAuthorizationsBaseEndpoint from sentry.sentry_apps.token_exchange.grant_exchanger import GrantExchanger -from sentry.sentry_apps.token_exchange.refresher import Refresher logger = logging.getLogger(__name__) @@ -41,12 +41,12 @@ def post(self, request: Request, installation) -> Response: user=promote_request_api_user(request), ).run() elif request.json_body.get("grant_type") == GrantTypes.REFRESH: - token = Refresher( + token = Refresher.run( install=installation, refresh_token=request.json_body.get("refresh_token"), client_id=request.json_body.get("client_id"), user=promote_request_api_user(request), - ).run() + ) else: return Response({"error": "Invalid grant_type"}, status=403) except APIUnauthorized as e: diff --git a/src/sentry/sentry_metrics/indexer/cache.py b/src/sentry/sentry_metrics/indexer/cache.py index 0676acf5e59d7d..4ced794ad54026 100644 --- a/src/sentry/sentry_metrics/indexer/cache.py +++ b/src/sentry/sentry_metrics/indexer/cache.py @@ -284,7 +284,7 @@ def resolve(self, use_case_id: UseCaseID, org_id: int, string: str) -> int | Non _INDEXER_CACHE_RESOLVE_METRIC, tags={"cache_hit": "false", "use_case": use_case_id.value}, ) - # TODO this random rollout is backwards + # TODO: this random rollout is backwards if random.random() >= options.get( "sentry-metrics.indexer.disable-memcache-replenish-rollout" ): diff --git a/src/sentry/snuba/entity_subscription.py b/src/sentry/snuba/entity_subscription.py index 487426bf33d543..417e0d1697859c 100644 --- a/src/sentry/snuba/entity_subscription.py +++ b/src/sentry/snuba/entity_subscription.py @@ -16,6 +16,7 @@ from sentry.search.events.builder.base import BaseQueryBuilder from sentry.search.events.builder.discover import DiscoverQueryBuilder from sentry.search.events.builder.metrics import AlertMetricsQueryBuilder +from sentry.search.events.builder.spans_indexed import SpansEAPQueryBuilder from sentry.search.events.types import ParamsType, QueryBuilderConfig from sentry.sentry_metrics.use_case_id_registry import UseCaseID from sentry.sentry_metrics.utils import ( @@ -47,6 +48,7 @@ EntityKey.GenericMetricsGauges: "timestamp", EntityKey.MetricsCounters: "timestamp", EntityKey.MetricsSets: "timestamp", + EntityKey.EAPSpans: "timestamp", } CRASH_RATE_ALERT_AGGREGATE_RE = ( r"^percentage\([ ]*(sessions_crashed|users_crashed)[ ]*\,[ ]*(sessions|users)[ ]*\)" @@ -217,6 +219,41 @@ class PerformanceTransactionsEntitySubscription(BaseEventsAndTransactionEntitySu dataset = Dataset.Transactions +class PerformanceSpansEAPEntitySubscription(BaseEventsAndTransactionEntitySubscription): + query_type = SnubaQuery.Type.PERFORMANCE + dataset = Dataset.EventsAnalyticsPlatform + + def build_query_builder( + self, + query: str, + project_ids: list[int], + environment: Environment | None, + params: ParamsType | None = None, + skip_field_validation_for_entity_subscription_deletion: bool = False, + ) -> BaseQueryBuilder: + if params is None: + params = {} + + params["project_id"] = project_ids + + query = apply_dataset_query_conditions(self.query_type, query, self.event_types) + if environment: + params["environment"] = environment.name + + return SpansEAPQueryBuilder( + dataset=Dataset(self.dataset.value), + query=query, + selected_columns=[self.aggregate], + params=params, + offset=None, + limit=None, + config=QueryBuilderConfig( + skip_time_conditions=True, + skip_field_validation_for_entity_subscription_deletion=skip_field_validation_for_entity_subscription_deletion, + ), + ) + + class BaseMetricsEntitySubscription(BaseEntitySubscription, ABC): def __init__( self, aggregate: str, time_window: int, extra_fields: _EntitySpecificParams | None = None @@ -453,6 +490,7 @@ def get_snql_aggregations(self) -> list[str]: MetricsSetsEntitySubscription, PerformanceTransactionsEntitySubscription, PerformanceMetricsEntitySubscription, + PerformanceSpansEAPEntitySubscription, ] @@ -476,6 +514,8 @@ def get_entity_subscription( entity_subscription_cls = PerformanceTransactionsEntitySubscription elif dataset in (Dataset.Metrics, Dataset.PerformanceMetrics): entity_subscription_cls = PerformanceMetricsEntitySubscription + elif dataset == Dataset.EventsAnalyticsPlatform: + entity_subscription_cls = PerformanceSpansEAPEntitySubscription if query_type == SnubaQuery.Type.CRASH_RATE: entity_key = determine_crash_rate_alert_entity(aggregate) if entity_key == EntityKey.MetricsCounters: diff --git a/src/sentry/snuba/metrics/utils.py b/src/sentry/snuba/metrics/utils.py index d68225a3676d2a..91e1f3e2a580b8 100644 --- a/src/sentry/snuba/metrics/utils.py +++ b/src/sentry/snuba/metrics/utils.py @@ -487,7 +487,7 @@ def to_intervals( assert interval_seconds > 0 # horrible hack for backward compatibility - # TODO Try to fix this upstream + # TODO: Try to fix this upstream if start is None or end is None: return None, None, 0 diff --git a/src/sentry/statistical_detectors/issue_platform_adapter.py b/src/sentry/statistical_detectors/issue_platform_adapter.py index f1c1173d57ca1d..30c937ae50f545 100644 --- a/src/sentry/statistical_detectors/issue_platform_adapter.py +++ b/src/sentry/statistical_detectors/issue_platform_adapter.py @@ -24,7 +24,7 @@ def send_regression_to_platform(regression: BreakpointData): displayed_new_baseline = round(float(regression["aggregate_range_2"]), 2) # For legacy reasons, we're passing project id as project - # TODO fix this in the breakpoint microservice and in trends v2 + # TODO: fix this in the breakpoint microservice and in trends v2 project_id = int(regression["project"]) issue_type: type[GroupType] = PerformanceP95EndpointRegressionGroupType diff --git a/src/sentry/tasks/base.py b/src/sentry/tasks/base.py index a5648f2bf39db5..3b703d27fd9fb5 100644 --- a/src/sentry/tasks/base.py +++ b/src/sentry/tasks/base.py @@ -12,7 +12,6 @@ from django.conf import settings from django.db.models import Model -from sentry import options from sentry.celery import app from sentry.silo.base import SiloLimit, SiloMode from sentry.utils import metrics @@ -136,14 +135,9 @@ def _wrapped(*args, **kwargs): # If the split task router is configured for the task, always use queues defined # in the split task configuration - if name in settings.CELERY_SPLIT_QUEUE_TASK_ROUTES: - # TODO: remove this option once rolled out - if options.get("split_queue_task_router.enable"): - q = kwargs.pop("queue") - if q: - logger.warning( - "ignoring queue: %s, using value from CELERY_SPLIT_QUEUE_TASK_ROUTES", q - ) + if name in settings.CELERY_SPLIT_QUEUE_TASK_ROUTES and "queue" in kwargs: + q = kwargs.pop("queue") + logger.warning("ignoring queue: %s, using value from CELERY_SPLIT_QUEUE_TASK_ROUTES", q) # We never use result backends in Celery. Leaving `trail=True` means that if we schedule # many tasks from a parent task, each task leaks memory. This can lead to the scheduler diff --git a/src/sentry/tasks/store.py b/src/sentry/tasks/store.py index 02c1b74183b4a7..c2fe5942144098 100644 --- a/src/sentry/tasks/store.py +++ b/src/sentry/tasks/store.py @@ -609,7 +609,6 @@ def save_event( @instrumented_task( name="sentry.tasks.store.save_event_transaction", - queue="events.save_event_transaction", time_limit=65, soft_time_limit=60, silo_mode=SiloMode.REGION, diff --git a/src/sentry/tasks/summaries/weekly_reports.py b/src/sentry/tasks/summaries/weekly_reports.py index a3f6d464eb792e..bc6b79a874c381 100644 --- a/src/sentry/tasks/summaries/weekly_reports.py +++ b/src/sentry/tasks/summaries/weekly_reports.py @@ -320,7 +320,7 @@ def send_email(self, template_ctx: Mapping[str, Any], user_id: int) -> None: user_project_count=template_ctx["user_project_count"], ) - # TODO see if we can use the UUID to track if the email was sent or not + # TODO: see if we can use the UUID to track if the email was sent or not logger.info( "weekly_report.send_email", extra={ diff --git a/src/sentry/templates/sentry/toolbar/login-success.html b/src/sentry/templates/sentry/toolbar/login-success.html index 6fbbd5508654ba..90529a776a2a28 100644 --- a/src/sentry/templates/sentry/toolbar/login-success.html +++ b/src/sentry/templates/sentry/toolbar/login-success.html @@ -16,7 +16,7 @@ {% script %}