diff --git a/.env b/.env index 7e92530..1dc24cf 100644 --- a/.env +++ b/.env @@ -9,4 +9,83 @@ DB_USERNAME=admin DB_PASSWORD="admin" DB_SSL_MODE=disable DB_POOL_MAX_CONNS=50 -TASK_TIME_OUT=3 \ No newline at end of file +TASK_TIME_OUT=3 +# OAUTH2_ISSUER=https://dev-736553.okta.com +OAUTH2_CLIENT_ID=64660401062-s9nm4vp7esak8g9a6im8c9712jkk2lbb.apps.googleusercontent.com +OAUTH2_CLIENT_SECRET=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 +OAUTH2_PROVIDER=google + + + + + + + + + + + + + + + + + + +# // Tasks 1M (run_query) +# // Worker Count per deployment = 10000 +# // Worker Deployment = 1M/10000 = 100 +# // Timeout = 4s +# // Retry Count = 5 +# // Delay between each retry = 2s +# // Max Time = 12s +# // Min Time = 4s +# // Failed Task = 20% (Cascade effect) Retry Required + +# Initial run: +# Successful tasks (800,000): 800,000 * 4s = 3,200,000s +# Failed tasks (200,000): 200,000 * 1s = 200,000s +# Total time for initial run: 3,400,000s + +# Retry 1: +# Total Retry Task Failed 1 = 200,000 * 20% = 40,000 +# Total Retry Task Success 1 = 200,000 - 40,000 = 160,000 +# Time: (160,000 * 4s) + (40,000 * (4s + 2s)) = 880,000s + +# Retry 2: +# Total Retry Task Failed 2 = 40,000 * 20% = 8,000 +# Total Retry Task Success 2 = 40,000 - 8,000 = 32,000 +# Time: (32,000 * 4s) + (8,000 * (4s + 2s + 2s)) = + +# Retry 3: +# Total Retry Task Failed 3 = 8,000 * 20% = 1,600 +# Total Retry Task Success 3 = 8,000 - 1,600 = 6,400 +# Time: (6,400 * 4s) + (1,600 * (4s + 2s + 2s + 2s)) = 41,600s + +# Retry 4: +# Total Retry Task Failed 4 = 1,600 * 20% = 320 +# Total Retry Task Success 4 = 1,600 - 320 = 1,280 +# Time: (1,280 * 4s) + (320 * (4s + 2s + 2s + 2s + 2s)) = 8,960s + +# Retry 5: +# Total Retry Task Failed 5 = 320 * 20% = 64 +# Total Retry Task Success 5 = 320 - 64 = 256 +# Time: (256 * 4s) + (64 * (4s + 2s + 2s + 2s + 2s + 2s)) = 1,920s + +# Total Task Failed = 64 +# Total Time = 3,400,000s + 880,000s + 192,000s + 41,600s + 8,960s + 1,920s = 4,424,480s +# Total Time in hours: 4,424,480s / 3600 ≈ 1,230.13 hours +# Total Time in days: 1,230.13 hours / 24 ≈ 51.25 days + + + + + +# // TODO(Reconcile Improvment): +# // To Scale up run the system with 1M Dummy Task and Get insights of the system behavior +# // 1. Track the retry number in history table along with task table +# // 2. Reconcile based on history of the task rather than task table +# // 3. Reconcile based on the time difference of state rather than fixed time interval +# // 4. Check the task latest state and find the time difference and reconcile based on that + + diff --git a/Dockerfile.controller b/Dockerfile.controller new file mode 100644 index 0000000..a48973e --- /dev/null +++ b/Dockerfile.controller @@ -0,0 +1,33 @@ +# Build the manager binary +FROM golang:1.22 AS builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/main.go cmd/main.go +COPY api/ api/ +COPY internal/controller/ internal/controller/ + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/Makefile b/Makefile index 3c2077e..df27f13 100644 --- a/Makefile +++ b/Makefile @@ -8,37 +8,47 @@ SERVER_SRC := ./server/root DASHBOARD_SRC := ./clients/dashboard # Docker configuration -DOCKER_REPO := ghcr.io/yindia +DOCKER_REPO := ghcr.io/bruin-hiring VERSION := $(shell git describe --tags --always --dirty) DOCKER_CLI_NAME := task-cli DOCKER_SERVER_NAME := task-server DOCKER_DASHBOARD_NAME := task-dashboard -# Colors for output +# ANSI color codes for prettier output NO_COLOR := \033[0m OK_COLOR := \033[32;01m ERROR_COLOR := \033[31;01m WARN_COLOR := \033[33;01m +# Declare phony targets (targets that don't represent files) .PHONY: all bootstrap deps check-go check-npm build test docker-build docker-push helm-template helm-lint helm-fmt helm-install helm helm-dep-update + +# Default target: run deps, tests, and build all: deps test build +# Install all dependencies +deps: deps-go deps-npm -deps: check-go check-npm +# Install Go dependencies +deps-go: check-go go mod download go fmt ./... go generate ./... - npm config set @buf:registry https://buf.build/gen/npm/v1/ + +# Install npm dependencies +deps-npm: check-npm npm install --force +# Check if Go is installed check-go: @which go > /dev/null || (echo "$(ERROR_COLOR)Go is not installed$(NO_COLOR)" && exit 1) +# Check if npm is installed check-npm: @which npm > /dev/null || (echo "$(ERROR_COLOR)npm is not installed$(NO_COLOR)" && exit 1) # CLI targets -build-cli: deps +build-cli: deps-go @echo "$(OK_COLOR)==> Building the CLI...$(NO_COLOR)" @CGO_ENABLED=0 go build -v -ldflags="-s -w" -o "$(BUILD_DIR)/$(CLI_NAME)" "$(CLI_SRC)" @@ -55,7 +65,7 @@ docker-push-cli: docker-build-cli docker push $(DOCKER_REPO)/$(DOCKER_CLI_NAME):$(VERSION) # Server targets -build-server: deps +build-server: deps-go @echo "$(OK_COLOR)==> Building the server...$(NO_COLOR)" @CGO_ENABLED=0 go build -v -ldflags="-s -w" -o "$(BUILD_DIR)/$(SERVER_NAME)" "$(SERVER_SRC)" @@ -72,17 +82,17 @@ docker-push-server: docker-build-server docker push $(DOCKER_REPO)/$(DOCKER_SERVER_NAME):$(VERSION) # Dashboard targets -build-dashboard: deps +build-dashboard: deps-npm @echo "$(OK_COLOR)==> Building the dashboard...$(NO_COLOR)" npm run build -run-dashboard: deps +run-dashboard: deps-npm @echo "$(OK_COLOR)==> Running the dashboard...$(NO_COLOR)" npm run dev docker-build-dashboard: @echo "$(OK_COLOR)==> Building Docker image for dashboard...$(NO_COLOR)" - docker build -f Dockerfile.client -t $(DOCKER_REPO)/$(DOCKER_DASHBOARD_NAME):$(VERSION) . + docker build -f Dockerfile.client -t $(DOCKER_REPO)/$(DOCKER_DASHBOARD_NAME):$(VERSION) . docker-push-dashboard: docker-build-dashboard @echo "$(OK_COLOR)==> Pushing Docker image for dashboard...$(NO_COLOR)" @@ -112,6 +122,11 @@ helm-fmt: @echo "$(OK_COLOR)==> Formatting Helm charts...$(NO_COLOR)" helm lint --strict charts/task +helm-docs: + @echo "$(OK_COLOR)==> Generating Helm charts README.md...$(NO_COLOR)" + go install github.com/norwoodj/helm-docs/cmd/helm-docs@latest + helm-docs -c ./charts/task/ + helm-install: @echo "$(OK_COLOR)==> Installing Helm charts...$(NO_COLOR)" helm install my-release charts/task @@ -120,10 +135,12 @@ helm-dep-update: @echo "$(OK_COLOR)==> Updating Helm dependencies...$(NO_COLOR)" helm dependency update ./charts/task/ -helm: helm-dep-update helm-template helm-lint helm-fmt +# Run all Helm-related tasks +helm: helm-dep-update helm-template helm-lint helm-fmt helm-docs @echo "$(OK_COLOR)==> Helm template, lint, and format completed.$(NO_COLOR)" +# Set up development environment bootstrap: curl -fsSL https://pixi.sh/install.sh | bash brew install bufbuild/buf/buf - pixi shell + pixi shell \ No newline at end of file diff --git a/README.md b/README.md index 0c21132..081ad0b 100644 --- a/README.md +++ b/README.md @@ -39,12 +39,6 @@ make bootstrap # Run Database docker-compose up -d - -# Install river -go install github.com/riverqueue/river/cmd/river@latest - -# Run River migration (It will create the river resource in the database) -river migrate-up --database-url "postgres://admin:admin@127.0.0.1:5432/tasks?sslmode=disable" ``` @@ -75,7 +69,7 @@ Access at https://127.0.0.1:3000 ### 5. Worker (Data Plane) Start worker instances: ```bash -./bin/task-cli serve -n 10 +./bin/task-cli serve --log-level debug ``` ## Project Structure @@ -113,16 +107,20 @@ graph TD A[Dashboard Client] -->|Sends Request| B(Server) C[CLI Client] -->|Sends Request| B(Server) - %% Server and its connections - B(Server) -->|Reads/Writes| D[(PostgreSQL Database)] - B(Server) -->|Publishes Message| E(RiverQueue) - - %% RabbitMQ and Worker - E(RiverQueue) -->|Sends Message| F(Worker) - F(Worker) -->|Consumes Data| G[Executes Work] + %% Control Plane + subgraph Control Plane + B(Server) -->|Reads/Writes| D[(PostgreSQL Database)] + end - %% Optional back-and-forth communication if needed - F(Worker) -->|Update Status| B[(Server)] + %% Data Plane + subgraph Data Plane + E[Agent] -->|Initiates Connection| B[Server] + B[Server] -->|Publish W| E[Agent] + E -->|Creates CRD| H[CRD] + F[Controller] -->|Watches CRD| H + F -->|Executes Task| J[Task Execution] + F -->|Sends Status Update| B + end ``` This architecture allows for: @@ -267,20 +265,6 @@ graph TD K --> L ``` -Reconciliation Job (Run in every 10 minutes) as background job - -```mermaid -graph TD - %% Reconciliation Job Flow - subgraph Reconciliation Job - M[Start Reconciliation Job] --> N[Get List of Stuck Jobs] - N --> O{Jobs Found?} - O -->|Yes| P[Update Status: Queued] - P --> Q[Enqueue Message to River Queue] - O -->|No| R[End Reconciliation Job] - Q --> R - end -``` ## API Documentation - [Proto Docs](https://buf.build/evalsocket/cloud) @@ -568,54 +552,3 @@ kind delete cluster --name task-service This setup allows you to test the entire Task Service stack, including the server, workers, and dependencies, in a local Kubernetes environment. It's an excellent way to validate the Helm charts and ensure everything works together as expected in a Kubernetes setting. -## Future Improvements - -As we continue to evolve the Task Service, we are exploring several enhancements to improve its scalability, reliability, and management. - -### Kubernetes-Native Task Execution - -We are considering leveraging Kubernetes Custom Resource Definitions (CRDs) and custom controllers to manage task execution. This approach would enable us to fully utilize Kubernetes' scheduling and scaling capabilities. - -#### High-Level Architecture - -```mermaid -graph TD - %% Clients - A[Dashboard Client] -->|Sends Request| B(Server) - C[CLI Client] -->|Sends Request| B(Server) - - %% Control Plane - subgraph Control Plane - B(Server) -->|Reads/Writes| D[(PostgreSQL Database)] - end - - %% Data Plane - subgraph Data Plane - E[Agent] -->|Initiates Connection| B[Server] - E -->|Creates CRD| H[CRD] - F[Controller] -->|Watches CRD| H - F -->|Creates Pod for Task| I[Pod] - I -->|Executes Task| J[Task Execution] - F -->|Sends Status Update| B - end -``` - -In this architecture: - -1. Our agent initiates a streaming connection with the control plane and listens for events. -2. When a new task is created, the control plane generates an event for the agent. -3. Upon receiving the event, the agent creates a Custom Resource Definition (CRD) for the task in Kubernetes. -4. A custom Worker Controller watches for these CRDs and creates pods to execute the tasks. -5. Each task runs in its own pod, allowing for improved isolation and resource management. -6. The Worker Controller monitors task execution and sends status updates back to the server. - - -#### Design Advantages - -- **Separation of Concerns**: The customer does not need to open a port; our agent initiates the connection, and only the agent has permission to create resources inside the Data Plane. -- **Single Point of Setup**: Only the agent is required to set up the Data Plane, creating the necessary resources such as the controller, CRD, and other components. -- **Multiple Data Planes**: Customers can run multiple Data Planes with one Control Plane based on their requirements (from bare metal to any cloud). In the future, we can add functionality to route tasks to specific Data Planes as needed. -- **Security**: No sensitive information is stored in the Control Plane; we only retain metadata, ensuring enhanced security. -- **Infinite Scalability**: The architecture supports scaling as needed to accommodate varying workloads. -- **Co-location Flexibility**: Customers can run both the Data Plane and Control Plane together inside their VPC for easier management. -- **Secure Storage**: All input parameters are stored as S3 objects, with only references to these objects kept in the metadata, optimizing storage usage. diff --git a/buf.gen.yaml b/buf.gen.yaml index 702964b..907fa20 100644 --- a/buf.gen.yaml +++ b/buf.gen.yaml @@ -11,6 +11,11 @@ plugins: out: pkg/gen/ opt: - paths=source_relative + - plugin: buf.build/grpc/go:v1.5.1 + out: pkg/gen/ + opt: + - paths=source_relative + - plugin: buf.build/protocolbuffers/go out: pkg/gen/ opt: diff --git a/cli/cmd/agent.go b/cli/cmd/agent.go new file mode 100644 index 0000000..985a14e --- /dev/null +++ b/cli/cmd/agent.go @@ -0,0 +1,215 @@ +package cmd + +import ( + "context" + "fmt" + "log/slog" + "net/http" + "sync" + taskApi "task/controller/api/v1" + v1 "task/pkg/gen/cloud/v1" + "task/pkg/gen/cloud/v1/cloudv1connect" + k8s "task/pkg/k8s" + "task/pkg/plugins" + "task/pkg/x" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "connectrpc.com/connect" + "github.com/spf13/cobra" +) + +var serveCmd = &cobra.Command{ + Use: "serve", + Short: "Run the workflow orchestration server", + Long: `Start the workflow orchestration server and continuously stream task updates.`, + Example: ` task serve`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return runWorkflowOrchestration(cmd.Context()) + }, +} + +// Number of worker goroutines +var numWorkers = 1000 + +func init() { + rootCmd.AddCommand(serveCmd) +} + +// runWorkflowOrchestration starts the workflow orchestration server and handles task updates. +func runWorkflowOrchestration(ctx context.Context) error { + logger := slog.With("component", "workflow_orchestration") + logger.Info("Starting workflow orchestration server") + + // Create a cancelable context for graceful shutdown + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create a WaitGroup to wait for all goroutines to finish + var wg sync.WaitGroup + + for { + select { + case <-ctx.Done(): + logger.Info("Shutting down gracefully...") + wg.Wait() + logger.Info("Workflow orchestration server stopped") + return nil + default: + if err := runStreamConnection(ctx, &wg, logger); err != nil { + logger.Error("Stream connection error", "error", err) + time.Sleep(5 * time.Second) // Wait before retrying + continue + } + } + } +} + +func runStreamConnection(ctx context.Context, wg *sync.WaitGroup, logger *slog.Logger) error { + + var err error + + client := cloudv1connect.NewTaskManagementServiceClient(http.DefaultClient, "http://localhost:8080") + k8sClient, err := k8s.NewK8sClient("/Users/yuvraj/.kube/config") + if err != nil { + return fmt.Errorf("failed to create k8s client: %w", err) + } + go sendPeriodicRequests(ctx, logger, client) // Pass stream as a pointer + + stream, err := client.PullEvents(ctx, connect.NewRequest(&v1.PullEventsRequest{})) + if err != nil { + return fmt.Errorf("failed to start stream: %w", err) + } + + for { + ok := stream.Receive() + if !ok { + return fmt.Errorf("failed to receive response: %w", err) + } + + go processWork(ctx, stream.Msg(), logger, k8sClient) + } +} + +// sendPeriodicRequests sends periodic heartbeat requests to the server. +func sendPeriodicRequests(ctx context.Context, logger *slog.Logger, client cloudv1connect.TaskManagementServiceClient) { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + logger.Info("Stopping periodic requests due to context cancellation") + return + case <-ticker.C: + _, err := client.Heartbeat(ctx, connect.NewRequest(&v1.HeartbeatRequest{ + Timestamp: time.Now().Format(time.RFC3339), + })) + + if err != nil { + logger.Error("Error sending heartbeat request", "error", err) + continue + } + logger.Debug("Sent periodic heartbeat request") + } + } +} + +func processWork(ctx context.Context, task *v1.PullEventsResponse, logger *slog.Logger, k8sClient *k8s.K8s) { + + _, err := k8sClient.CreateTask(&taskApi.Task{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("task-%d", task.Work.Task.Id), + Namespace: "test", + }, + Spec: taskApi.TaskSpec{ + ID: task.Work.Task.Id, + Type: task.Work.Task.Type, + Payload: taskApi.Payload{ + Parameters: task.Work.Task.Payload.Parameters, + }, + Status: int32(task.Work.Task.Status), + Description: task.Work.Task.Description, + }, + }) + if err != nil { + logger.Error("Failed to create task", "error", err, "task", task) + return + } +} + +// processWorkflowUpdate handles different types of responses and returns the workflow state. +func processWorkflowUpdate(ctx context.Context, task *v1.PullEventsResponse, logger *slog.Logger) (v1.TaskStatusEnum, string, error) { + response := task.Work + logger = logger.With("task_id", response.Task.Id) + logger.Info("Received workflow update", "task_type", response.Task.Type) + + startTime := time.Now() + defer func() { + duration := time.Since(startTime) + logger.Info("Task processing completed", "duration", duration) + }() + + defer func() { + if r := recover(); r != nil { + // Return FAILED status in case of panic + panic(fmt.Sprintf("Task panicked: %v", r)) + } + }() + + plugin, err := plugins.NewPlugin(response.Task.Type) + if err != nil { + return v1.TaskStatusEnum_FAILED, fmt.Sprintf("Failed to create plugin: %v", err), err + } + + // Add retry logic for running the task + runErr := plugin.Run(response.Task.Payload.Parameters) + + if runErr != nil { + return v1.TaskStatusEnum_FAILED, fmt.Sprintf("Error running task after %d attempts: %v"), runErr + } + + logger.Info("Task completed successfully") + + return v1.TaskStatusEnum_SUCCEEDED, "Task completed successfully", nil +} + +// retry is a helper function to retry operations with exponential backoff +func retry(ctx context.Context, maxAttempts int, initialBackoff time.Duration, operation func() error) error { + var err error + for attempt := 0; attempt < maxAttempts; attempt++ { + err = operation() + if err == nil { + return nil + } + if attempt == maxAttempts-1 { + break + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(initialBackoff * time.Duration(1< 1000 { - return fmt.Errorf("number of tasks (%d) exceeds the maximum limit of 1000", numTasks) - } fmt.Println("Monitoring task completion...") startTime := time.Now() diff --git a/controller/Makefile b/controller/Makefile new file mode 100644 index 0000000..ca5ed65 --- /dev/null +++ b/controller/Makefile @@ -0,0 +1,200 @@ +# Image URL to use all building/pushing image targets +IMG ?= controller:latest +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.31.0 + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= docker + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out + +# Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors. +.PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up. +test-e2e: + go test ./test/e2e/ -v -ginkgo.v + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes + $(GOLANGCI_LINT) run --fix + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager cmd/main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./cmd/main.go + +# If you wish to build the manager image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + $(CONTAINER_TOOL) build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + $(CONTAINER_TOOL) push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name controller-builder + $(CONTAINER_TOOL) buildx use controller-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm controller-builder + rm Dockerfile.cross + +.PHONY: build-installer +build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p dist + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default > dist/install.yaml + +##@ Deployment + +ifndef ignore-not-found + ignore-not-found = false +endif + +.PHONY: install +install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - + +.PHONY: uninstall +uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUBECTL ?= kubectl +KUSTOMIZE ?= $(LOCALBIN)/kustomize +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen +ENVTEST ?= $(LOCALBIN)/setup-envtest +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.4.3 +CONTROLLER_TOOLS_VERSION ?= v0.16.1 +ENVTEST_VERSION ?= release-0.19 +GOLANGCI_LINT_VERSION ?= v1.59.1 + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f "$(1)-$(3)" ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +rm -f $(1) || true ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv $(1) $(1)-$(3) ;\ +} ;\ +ln -sf $(1)-$(3) $(1) +endef diff --git a/controller/PROJECT b/controller/PROJECT new file mode 100644 index 0000000..aa2fc36 --- /dev/null +++ b/controller/PROJECT @@ -0,0 +1,19 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: task.io +layout: +- go.kubebuilder.io/v4 +projectName: controller +repo: task +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: task.io + kind: Task + path: task/api/v1 + version: v1 +version: "3" diff --git a/controller/README.md b/controller/README.md new file mode 100644 index 0000000..eaf0a19 --- /dev/null +++ b/controller/README.md @@ -0,0 +1,114 @@ +# controller +// TODO(user): Add simple overview of use/purpose + +## Description +// TODO(user): An in-depth paragraph about your project and overview of use + +## Getting Started + +### Prerequisites +- go version v1.22.0+ +- docker version 17.03+. +- kubectl version v1.11.3+. +- Access to a Kubernetes v1.11.3+ cluster. + +### To Deploy on the cluster +**Build and push your image to the location specified by `IMG`:** + +```sh +make docker-build docker-push IMG=/controller:tag +``` + +**NOTE:** This image ought to be published in the personal registry you specified. +And it is required to have access to pull the image from the working environment. +Make sure you have the proper permission to the registry if the above commands don’t work. + +**Install the CRDs into the cluster:** + +```sh +make install +``` + +**Deploy the Manager to the cluster with the image specified by `IMG`:** + +```sh +make deploy IMG=/controller:tag +``` + +> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin +privileges or be logged in as admin. + +**Create instances of your solution** +You can apply the samples (examples) from the config/sample: + +```sh +kubectl apply -k config/samples/ +``` + +>**NOTE**: Ensure that the samples has default values to test it out. + +### To Uninstall +**Delete the instances (CRs) from the cluster:** + +```sh +kubectl delete -k config/samples/ +``` + +**Delete the APIs(CRDs) from the cluster:** + +```sh +make uninstall +``` + +**UnDeploy the controller from the cluster:** + +```sh +make undeploy +``` + +## Project Distribution + +Following are the steps to build the installer and distribute this project to users. + +1. Build the installer for the image built and published in the registry: + +```sh +make build-installer IMG=/controller:tag +``` + +NOTE: The makefile target mentioned above generates an 'install.yaml' +file in the dist directory. This file contains all the resources built +with Kustomize, which are necessary to install this project without +its dependencies. + +2. Using the installer + +Users can just run kubectl apply -f to install the project, i.e.: + +```sh +kubectl apply -f https://raw.githubusercontent.com//controller//dist/install.yaml +``` + +## Contributing +// TODO(user): Add detailed information on how you would like others to contribute to this project + +**NOTE:** Run `make help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +## License + +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/controller/api/v1/groupversion_info.go b/controller/api/v1/groupversion_info.go new file mode 100644 index 0000000..6da83ea --- /dev/null +++ b/controller/api/v1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the v1 API group +// +kubebuilder:object:generate=true +// +groupName=task.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "task.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/controller/api/v1/task_types.go b/controller/api/v1/task_types.go new file mode 100644 index 0000000..ee3edb5 --- /dev/null +++ b/controller/api/v1/task_types.go @@ -0,0 +1,94 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// TaskSpec defines the desired state of Task +type TaskSpec struct { + // ID is the unique identifier for the task. + ID int32 `json:"id,omitempty"` + + // Name is the name of the task. + Name string `json:"name,omitempty"` + + // Type is the type of the task. + Type string `json:"type,omitempty"` + + // Status is the current status of the task. + Status int32 `json:"status,omitempty"` + + // Retries is the number of retries attempted for this task. + Retries int32 `json:"retries,omitempty"` + + // Priority is the priority level of the task. + Priority int32 `json:"priority,omitempty"` + + // CreatedAt is the timestamp of when the task was created. + CreatedAt string `json:"created_at,omitempty"` + + // Payload contains task parameters. + Payload Payload `json:"payload,omitempty"` + + // Description is a description of the task. + Description string `json:"description,omitempty"` +} + +// Payload defines the parameters for the task. +type Payload struct { + // Parameters are dynamic key-value pairs for task parameters. + Parameters map[string]string `json:"parameters,omitempty"` +} + +// TaskStatus defines the observed state of Task +type TaskStatus struct { + // Status is the current status of the task. + Status int32 `json:"status,omitempty"` + + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Task is the Schema for the tasks API +type Task struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TaskSpec `json:"spec,omitempty"` + Status TaskStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TaskList contains a list of Task +type TaskList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Task `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Task{}, &TaskList{}) +} diff --git a/controller/api/v1/zz_generated.deepcopy.go b/controller/api/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..04020f3 --- /dev/null +++ b/controller/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,114 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Task) DeepCopyInto(out *Task) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task. +func (in *Task) DeepCopy() *Task { + if in == nil { + return nil + } + out := new(Task) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Task) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskList) DeepCopyInto(out *TaskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Task, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList. +func (in *TaskList) DeepCopy() *TaskList { + if in == nil { + return nil + } + out := new(TaskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec. +func (in *TaskSpec) DeepCopy() *TaskSpec { + if in == nil { + return nil + } + out := new(TaskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskStatus) DeepCopyInto(out *TaskStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskStatus. +func (in *TaskStatus) DeepCopy() *TaskStatus { + if in == nil { + return nil + } + out := new(TaskStatus) + in.DeepCopyInto(out) + return out +} diff --git a/controller/cmd/main.go b/controller/cmd/main.go new file mode 100644 index 0000000..984b990 --- /dev/null +++ b/controller/cmd/main.go @@ -0,0 +1,173 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "crypto/tls" + "flag" + "net/http" + "os" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + taskiov1 "task/controller/api/v1" + controller "task/controller/internal/controller" + "task/pkg/gen/cloud/v1/cloudv1connect" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(taskiov1.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + var probeAddr string + var secureMetrics bool + var enableHTTP2 bool + var tlsOpts []func(*tls.Config) + flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ + "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.BoolVar(&secureMetrics, "metrics-secure", true, + "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") + flag.BoolVar(&enableHTTP2, "enable-http2", false, + "If set, HTTP/2 will be enabled for the metrics and webhook servers") + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + setupLog.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + + webhookServer := webhook.NewServer(webhook.Options{ + TLSOpts: tlsOpts, + }) + + // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server. + // More info: + // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/metrics/server + // - https://book.kubebuilder.io/reference/metrics.html + metricsServerOptions := metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: secureMetrics, + // TODO(user): TLSOpts is used to allow configuring the TLS config used for the server. If certificates are + // not provided, self-signed certificates will be generated by default. This option is not recommended for + // production environments as self-signed certificates do not offer the same level of trust and security + // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing + // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName + // to provide certificates, ensuring the server communicates using trusted and secure certificates. + TLSOpts: tlsOpts, + } + + if secureMetrics { + // FilterProvider is used to protect the metrics endpoint with authn/authz. + // These configurations ensure that only authorized users and service accounts + // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info: + // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/metrics/filters#WithAuthenticationAndAuthorization + metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: metricsServerOptions, + WebhookServer: webhookServer, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "f448886c.task.io", + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err = (&controller.TaskReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + CloudClient: cloudv1connect.NewTaskManagementServiceClient(http.DefaultClient, "https://localhost:8080"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Task") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/controller/config/crd/bases/task.io_tasks.yaml b/controller/config/crd/bases/task.io_tasks.yaml new file mode 100644 index 0000000..a396d1b --- /dev/null +++ b/controller/config/crd/bases/task.io_tasks.yaml @@ -0,0 +1,92 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: tasks.task.io +spec: + group: task.io + names: + kind: Task + listKind: TaskList + plural: tasks + singular: task + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Task is the Schema for the tasks API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TaskSpec defines the desired state of Task + properties: + created_at: + description: CreatedAt is the timestamp of when the task was created. + type: string + description: + description: Description is a description of the task. + type: string + id: + description: ID is the unique identifier for the task. + format: int32 + type: integer + name: + description: Name is the name of the task. + type: string + payload: + description: Payload contains task parameters. + properties: + parameters: + additionalProperties: + type: string + description: Parameters are dynamic key-value pairs for task parameters. + type: object + type: object + priority: + description: Priority is the priority level of the task. + format: int32 + type: integer + retries: + description: Retries is the number of retries attempted for this task. + format: int32 + type: integer + status: + description: Status is the current status of the task. + format: int32 + type: integer + type: + description: Type is the type of the task. + type: string + type: object + status: + description: TaskStatus defines the observed state of Task + properties: + status: + description: Status is the current status of the task. + format: int32 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/controller/config/crd/kustomization.yaml b/controller/config/crd/kustomization.yaml new file mode 100644 index 0000000..8f91e1c --- /dev/null +++ b/controller/config/crd/kustomization.yaml @@ -0,0 +1,22 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/task.io_tasks.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- path: patches/cainjection_in_tasks.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# [WEBHOOK] To enable webhook, uncomment the following section +# the following config is for teaching kustomize how to do kustomization for CRDs. + +#configurations: +#- kustomizeconfig.yaml diff --git a/controller/config/crd/kustomizeconfig.yaml b/controller/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000..ec5c150 --- /dev/null +++ b/controller/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/controller/config/default/kustomization.yaml b/controller/config/default/kustomization.yaml new file mode 100644 index 0000000..0fd956d --- /dev/null +++ b/controller/config/default/kustomization.yaml @@ -0,0 +1,151 @@ +# Adds namespace to all resources. +namespace: controller-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: controller- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml +# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. +# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. +# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will +# be able to communicate with the Webhook Server. +#- ../network-policy + +# Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- path: manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- path: webhookcainjection_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +# Uncomment the following replacements to add the cert-manager CA injection annotations +#replacements: +# - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.namespace # namespace of the certificate CR +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 0 +# create: true +# - source: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldPath: .metadata.name +# targets: +# - select: +# kind: ValidatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: MutatingWebhookConfiguration +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - select: +# kind: CustomResourceDefinition +# fieldPaths: +# - .metadata.annotations.[cert-manager.io/inject-ca-from] +# options: +# delimiter: '/' +# index: 1 +# create: true +# - source: # Add cert-manager annotation to the webhook Service +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.name # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 0 +# create: true +# - source: +# kind: Service +# version: v1 +# name: webhook-service +# fieldPath: .metadata.namespace # namespace of the service +# targets: +# - select: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# fieldPaths: +# - .spec.dnsNames.0 +# - .spec.dnsNames.1 +# options: +# delimiter: '.' +# index: 1 +# create: true diff --git a/controller/config/default/manager_metrics_patch.yaml b/controller/config/default/manager_metrics_patch.yaml new file mode 100644 index 0000000..2aaef65 --- /dev/null +++ b/controller/config/default/manager_metrics_patch.yaml @@ -0,0 +1,4 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 diff --git a/controller/config/default/metrics_service.yaml b/controller/config/default/metrics_service.yaml new file mode 100644 index 0000000..ae73482 --- /dev/null +++ b/controller/config/default/metrics_service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager diff --git a/controller/config/manager/kustomization.yaml b/controller/config/manager/kustomization.yaml new file mode 100644 index 0000000..5c5f0b8 --- /dev/null +++ b/controller/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/controller/config/manager/manager.yaml b/controller/config/manager/manager.yaml new file mode 100644 index 0000000..edcfa9c --- /dev/null +++ b/controller/config/manager/manager.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + # TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + # seccompProfile: + # type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + - --health-probe-bind-address=:8081 + image: controller:latest + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/controller/config/network-policy/allow-metrics-traffic.yaml b/controller/config/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 0000000..f6ddf95 --- /dev/null +++ b/controller/config/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,26 @@ +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gathering data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: allow-metrics-traffic + namespace: system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP diff --git a/controller/config/network-policy/kustomization.yaml b/controller/config/network-policy/kustomization.yaml new file mode 100644 index 0000000..ec0fb5e --- /dev/null +++ b/controller/config/network-policy/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- allow-metrics-traffic.yaml diff --git a/controller/config/prometheus/kustomization.yaml b/controller/config/prometheus/kustomization.yaml new file mode 100644 index 0000000..ed13716 --- /dev/null +++ b/controller/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/controller/config/prometheus/monitor.yaml b/controller/config/prometheus/monitor.yaml new file mode 100644 index 0000000..a9731e4 --- /dev/null +++ b/controller/config/prometheus/monitor.yaml @@ -0,0 +1,30 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https # Ensure this is the name of the port that exposes HTTPS metrics + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification. This poses a significant security risk by making the system vulnerable to + # man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between + # Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data, + # compromising the integrity and confidentiality of the information. + # Please use the following options for secure configurations: + # caFile: /etc/metrics-certs/ca.crt + # certFile: /etc/metrics-certs/tls.crt + # keyFile: /etc/metrics-certs/tls.key + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/controller/config/rbac/kustomization.yaml b/controller/config/rbac/kustomization.yaml new file mode 100644 index 0000000..0ffacf4 --- /dev/null +++ b/controller/config/rbac/kustomization.yaml @@ -0,0 +1,27 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml +# For each CRD, "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the Project itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- task_editor_role.yaml +- task_viewer_role.yaml + diff --git a/controller/config/rbac/leader_election_role.yaml b/controller/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000..445f027 --- /dev/null +++ b/controller/config/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/controller/config/rbac/leader_election_role_binding.yaml b/controller/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000..aed609d --- /dev/null +++ b/controller/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/controller/config/rbac/metrics_auth_role.yaml b/controller/config/rbac/metrics_auth_role.yaml new file mode 100644 index 0000000..32d2e4e --- /dev/null +++ b/controller/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/controller/config/rbac/metrics_auth_role_binding.yaml b/controller/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 0000000..e775d67 --- /dev/null +++ b/controller/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/controller/config/rbac/metrics_reader_role.yaml b/controller/config/rbac/metrics_reader_role.yaml new file mode 100644 index 0000000..51a75db --- /dev/null +++ b/controller/config/rbac/metrics_reader_role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/controller/config/rbac/role.yaml b/controller/config/rbac/role.yaml new file mode 100644 index 0000000..57102d0 --- /dev/null +++ b/controller/config/rbac/role.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - task.io + resources: + - tasks + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - task.io + resources: + - tasks/finalizers + verbs: + - update +- apiGroups: + - task.io + resources: + - tasks/status + verbs: + - get + - patch + - update diff --git a/controller/config/rbac/role_binding.yaml b/controller/config/rbac/role_binding.yaml new file mode 100644 index 0000000..0953223 --- /dev/null +++ b/controller/config/rbac/role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/controller/config/rbac/service_account.yaml b/controller/config/rbac/service_account.yaml new file mode 100644 index 0000000..834b343 --- /dev/null +++ b/controller/config/rbac/service_account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/controller/config/rbac/task_editor_role.yaml b/controller/config/rbac/task_editor_role.yaml new file mode 100644 index 0000000..61dc334 --- /dev/null +++ b/controller/config/rbac/task_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit tasks. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: task-editor-role +rules: +- apiGroups: + - task.io + resources: + - tasks + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - task.io + resources: + - tasks/status + verbs: + - get diff --git a/controller/config/rbac/task_viewer_role.yaml b/controller/config/rbac/task_viewer_role.yaml new file mode 100644 index 0000000..8b03e19 --- /dev/null +++ b/controller/config/rbac/task_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view tasks. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: task-viewer-role +rules: +- apiGroups: + - task.io + resources: + - tasks + verbs: + - get + - list + - watch +- apiGroups: + - task.io + resources: + - tasks/status + verbs: + - get diff --git a/controller/config/samples/kustomization.yaml b/controller/config/samples/kustomization.yaml new file mode 100644 index 0000000..eee95f0 --- /dev/null +++ b/controller/config/samples/kustomization.yaml @@ -0,0 +1,4 @@ +## Append samples of your project ## +resources: +- v1_task.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/controller/config/samples/v1_task.yaml b/controller/config/samples/v1_task.yaml new file mode 100644 index 0000000..20c6613 --- /dev/null +++ b/controller/config/samples/v1_task.yaml @@ -0,0 +1,9 @@ +apiVersion: task.io/v1 +kind: Task +metadata: + labels: + app.kubernetes.io/name: controller + app.kubernetes.io/managed-by: kustomize + name: task-sample +spec: + # TODO(user): Add fields here diff --git a/controller/hack/boilerplate.go.txt b/controller/hack/boilerplate.go.txt new file mode 100644 index 0000000..ff72ff2 --- /dev/null +++ b/controller/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/controller/internal/controller/suite_test.go b/controller/internal/controller/suite_test.go new file mode 100644 index 0000000..bd908e3 --- /dev/null +++ b/controller/internal/controller/suite_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + taskiov1 "task/api/v1" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = taskiov1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/controller/internal/controller/task_controller.go b/controller/internal/controller/task_controller.go new file mode 100644 index 0000000..fee1377 --- /dev/null +++ b/controller/internal/controller/task_controller.go @@ -0,0 +1,166 @@ +/* +Copyright 2024. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + v1 "task/controller/api/v1" + cloudv1 "task/pkg/gen/cloud/v1" + cloudv1connect "task/pkg/gen/cloud/v1/cloudv1connect" + "task/pkg/plugins" + + "connectrpc.com/connect" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// TaskReconciler reconciles a Task object +type TaskReconciler struct { + client.Client + Scheme *runtime.Scheme + CloudClient cloudv1connect.TaskManagementServiceClient +} + +// +kubebuilder:rbac:groups=task.io,resources=tasks,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=task.io,resources=tasks/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=task.io,resources=tasks/finalizers,verbs=update + +// Reconcile is part of the main Kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// This function compares the state specified by the Task object against the +// actual cluster state, and then performs operations to make the cluster +// state reflect the state specified by the user. +func (r *TaskReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + _ = log.FromContext(ctx) + + task := &v1.Task{} + err := r.Get(ctx, req.NamespacedName, task) + if err != nil { + log.FromContext(ctx).Error(err, "Failed to get task") + return ctrl.Result{}, err + } + + maxAttempts := 3 + initialBackoff := 1 * time.Second + + var finalStatus cloudv1.TaskStatusEnum + var finalMessage string + + for attempt := 1; attempt <= maxAttempts; attempt++ { + // Update status to Running for each attempt + runningMessage := fmt.Sprintf("Running attempt %d of %d", attempt, maxAttempts) + if err := r.updateTaskStatus(ctx, int64(task.Spec.ID), cloudv1.TaskStatusEnum_RUNNING, runningMessage); err != nil { + log.FromContext(ctx).Error(err, "Failed to update task status to Running") + return ctrl.Result{}, err + } + + _, message, err := processWorkflowUpdate(ctx, task) + + if err != nil { + failedMessage := fmt.Sprintf("Attempt %d failed: %v", attempt, err) + if err := r.updateTaskStatus(ctx, int64(task.Spec.ID), cloudv1.TaskStatusEnum_FAILED, failedMessage); err != nil { + log.FromContext(ctx).Error(err, "Failed to update task status to Failed") + return ctrl.Result{}, err + } + + if attempt == maxAttempts { + finalStatus = cloudv1.TaskStatusEnum_FAILED + finalMessage = fmt.Sprintf("All %d attempts failed. Last error: %v", maxAttempts, err) + log.FromContext(ctx).Error(fmt.Errorf(finalMessage), "Final failure after max attempts") + } else { + // Wait before the next attempt + select { + case <-ctx.Done(): + return ctrl.Result{}, ctx.Err() + case <-time.After(initialBackoff * time.Duration(1< status_counts = 1; } @@ -216,4 +263,4 @@ message TaskListRequest { optional string type = 4 [(validate.rules).string = { in: ["send_email", "run_query"] }]; -} \ No newline at end of file +} diff --git a/pkg/config/config.go b/pkg/config/config.go index d2ca66b..bc69c99 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -7,6 +7,7 @@ type Config struct { ServerPort string `envconfig:"SERVER_PORT" default:"8080"` WorkerCount int `envconfig:"WORKER_COUNT" default:"100"` Database DatabaseConfig + OAuth2 OAuth2Config } // DatabaseConfig holds the database connection configuration @@ -20,6 +21,14 @@ type DatabaseConfig struct { PoolMaxConns int `envconfig:"DB_POOL_MAX_CONNS" default:"1"` } +// OAuth2Config holds the OAuth2 configuration +type OAuth2Config struct { + Provider string `envconfig:"OAUTH2_PROVIDER"` + Issuer string `envconfig:"OAUTH2_ISSUER"` + ClientID string `envconfig:"OAUTH2_CLIENT_ID"` + ClientSecret string `envconfig:"OAUTH2_CLIENT_SECRET"` +} + // ToMigrationUri returns a string for the migration package with the correct prefix func (d DatabaseConfig) ToMigrationUri() string { return fmt.Sprintf("pgx5://%s:%s@%s:%s/%s?sslmode=%s", diff --git a/pkg/gen/cloud/v1/cloud.pb.go b/pkg/gen/cloud/v1/cloud.pb.go index db2d24e..fe53e14 100644 --- a/pkg/gen/cloud/v1/cloud.pb.go +++ b/pkg/gen/cloud/v1/cloud.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: cloud/v1/cloud.proto @@ -31,7 +31,7 @@ const ( TaskStatusEnum_FAILED TaskStatusEnum = 2 // Task encountered an error and failed to complete TaskStatusEnum_SUCCEEDED TaskStatusEnum = 3 // Task completed successfully TaskStatusEnum_UNKNOWN TaskStatusEnum = 4 // Task status cannot be determined - TaskStatusEnum_ALL TaskStatusEnum = 5 // Task status cannot be determined + TaskStatusEnum_ALL TaskStatusEnum = 5 // Represents all task statuses ) // Enum value maps for TaskStatusEnum. @@ -95,11 +95,9 @@ type Payload struct { func (x *Payload) Reset() { *x = Payload{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Payload) String() string { @@ -110,7 +108,7 @@ func (*Payload) ProtoMessage() {} func (x *Payload) ProtoReflect() protoreflect.Message { mi := &file_cloud_v1_cloud_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -155,11 +153,9 @@ type CreateTaskRequest struct { func (x *CreateTaskRequest) Reset() { *x = CreateTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CreateTaskRequest) String() string { @@ -170,7 +166,7 @@ func (*CreateTaskRequest) ProtoMessage() {} func (x *CreateTaskRequest) ProtoReflect() protoreflect.Message { mi := &file_cloud_v1_cloud_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -225,11 +221,9 @@ type CreateTaskResponse struct { func (x *CreateTaskResponse) Reset() { *x = CreateTaskResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CreateTaskResponse) String() string { @@ -240,7 +234,7 @@ func (*CreateTaskResponse) ProtoMessage() {} func (x *CreateTaskResponse) ProtoReflect() protoreflect.Message { mi := &file_cloud_v1_cloud_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -294,11 +288,9 @@ type Task struct { func (x *Task) Reset() { *x = Task{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Task) String() string { @@ -309,7 +301,7 @@ func (*Task) ProtoMessage() {} func (x *Task) ProtoReflect() protoreflect.Message { mi := &file_cloud_v1_cloud_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -406,11 +398,9 @@ type TaskHistory struct { func (x *TaskHistory) Reset() { *x = TaskHistory{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskHistory) String() string { @@ -421,7 +411,7 @@ func (*TaskHistory) ProtoMessage() {} func (x *TaskHistory) ProtoReflect() protoreflect.Message { mi := &file_cloud_v1_cloud_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -476,11 +466,9 @@ type GetTaskRequest struct { func (x *GetTaskRequest) Reset() { *x = GetTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetTaskRequest) String() string { @@ -491,7 +479,7 @@ func (*GetTaskRequest) ProtoMessage() {} func (x *GetTaskRequest) ProtoReflect() protoreflect.Message { mi := &file_cloud_v1_cloud_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -525,11 +513,9 @@ type GetTaskHistoryRequest struct { func (x *GetTaskHistoryRequest) Reset() { *x = GetTaskHistoryRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetTaskHistoryRequest) String() string { @@ -540,7 +526,7 @@ func (*GetTaskHistoryRequest) ProtoMessage() {} func (x *GetTaskHistoryRequest) ProtoReflect() protoreflect.Message { mi := &file_cloud_v1_cloud_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -575,11 +561,9 @@ type GetTaskHistoryResponse struct { func (x *GetTaskHistoryResponse) Reset() { *x = GetTaskHistoryResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetTaskHistoryResponse) String() string { @@ -590,7 +574,7 @@ func (*GetTaskHistoryResponse) ProtoMessage() {} func (x *GetTaskHistoryResponse) ProtoReflect() protoreflect.Message { mi := &file_cloud_v1_cloud_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -628,11 +612,9 @@ type UpdateTaskStatusRequest struct { func (x *UpdateTaskStatusRequest) Reset() { *x = UpdateTaskStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateTaskStatusRequest) String() string { @@ -643,7 +625,7 @@ func (*UpdateTaskStatusRequest) ProtoMessage() {} func (x *UpdateTaskStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_cloud_v1_cloud_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -679,6 +661,239 @@ func (x *UpdateTaskStatusRequest) GetMessage() string { return "" } +type HeartbeatRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Timestamp of the heartbeat, in ISO 8601 format (UTC). + // This timestamp indicates when the heartbeat was sent. + Timestamp string `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Unique identifier for the heartbeat request. This UUID helps in tracking and correlating requests. + // It should be a valid UUID format. + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` +} + +func (x *HeartbeatRequest) Reset() { + *x = HeartbeatRequest{} + mi := &file_cloud_v1_cloud_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeartbeatRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeartbeatRequest) ProtoMessage() {} + +func (x *HeartbeatRequest) ProtoReflect() protoreflect.Message { + mi := &file_cloud_v1_cloud_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeartbeatRequest.ProtoReflect.Descriptor instead. +func (*HeartbeatRequest) Descriptor() ([]byte, []int) { + return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{9} +} + +func (x *HeartbeatRequest) GetTimestamp() string { + if x != nil { + return x.Timestamp + } + return "" +} + +func (x *HeartbeatRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type HeartbeatResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HeartbeatResponse) Reset() { + *x = HeartbeatResponse{} + mi := &file_cloud_v1_cloud_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeartbeatResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeartbeatResponse) ProtoMessage() {} + +func (x *HeartbeatResponse) ProtoReflect() protoreflect.Message { + mi := &file_cloud_v1_cloud_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeartbeatResponse.ProtoReflect.Descriptor instead. +func (*HeartbeatResponse) Descriptor() ([]byte, []int) { + return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{10} +} + +// Message for stream requests +type PullEventsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PullEventsRequest) Reset() { + *x = PullEventsRequest{} + mi := &file_cloud_v1_cloud_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PullEventsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullEventsRequest) ProtoMessage() {} + +func (x *PullEventsRequest) ProtoReflect() protoreflect.Message { + mi := &file_cloud_v1_cloud_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullEventsRequest.ProtoReflect.Descriptor instead. +func (*PullEventsRequest) Descriptor() ([]byte, []int) { + return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{11} +} + +// Message for stream responses +type PullEventsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Work assignment to be executed. + Work *WorkAssignment `protobuf:"bytes,1,opt,name=work,proto3" json:"work,omitempty"` // The task to be executed. +} + +func (x *PullEventsResponse) Reset() { + *x = PullEventsResponse{} + mi := &file_cloud_v1_cloud_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PullEventsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullEventsResponse) ProtoMessage() {} + +func (x *PullEventsResponse) ProtoReflect() protoreflect.Message { + mi := &file_cloud_v1_cloud_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullEventsResponse.ProtoReflect.Descriptor instead. +func (*PullEventsResponse) Descriptor() ([]byte, []int) { + return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{12} +} + +func (x *PullEventsResponse) GetWork() *WorkAssignment { + if x != nil { + return x.Work + } + return nil +} + +// Message for work assignments +type WorkAssignment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique identifier for the assignment + AssignmentId int64 `protobuf:"varint,1,opt,name=assignment_id,json=assignmentId,proto3" json:"assignment_id,omitempty"` + // The task to be executed + Task *Task `protobuf:"bytes,2,opt,name=task,proto3" json:"task,omitempty"` +} + +func (x *WorkAssignment) Reset() { + *x = WorkAssignment{} + mi := &file_cloud_v1_cloud_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkAssignment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkAssignment) ProtoMessage() {} + +func (x *WorkAssignment) ProtoReflect() protoreflect.Message { + mi := &file_cloud_v1_cloud_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkAssignment.ProtoReflect.Descriptor instead. +func (*WorkAssignment) Descriptor() ([]byte, []int) { + return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{13} +} + +func (x *WorkAssignment) GetAssignmentId() int64 { + if x != nil { + return x.AssignmentId + } + return 0 +} + +func (x *WorkAssignment) GetTask() *Task { + if x != nil { + return x.Task + } + return nil +} + // Message for GetStatus request (empty) type GetStatusRequest struct { state protoimpl.MessageState @@ -688,11 +903,9 @@ type GetStatusRequest struct { func (x *GetStatusRequest) Reset() { *x = GetStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetStatusRequest) String() string { @@ -702,8 +915,8 @@ func (x *GetStatusRequest) String() string { func (*GetStatusRequest) ProtoMessage() {} func (x *GetStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_cloud_v1_cloud_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cloud_v1_cloud_proto_msgTypes[14] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -715,7 +928,7 @@ func (x *GetStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStatusRequest.ProtoReflect.Descriptor instead. func (*GetStatusRequest) Descriptor() ([]byte, []int) { - return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{9} + return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{14} } // Message for GetStatus response @@ -724,17 +937,15 @@ type GetStatusResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Map of task statuses and their counts + // Map of task statuses and their counts. StatusCounts map[int32]int64 `protobuf:"bytes,1,rep,name=status_counts,json=statusCounts,proto3" json:"status_counts,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } func (x *GetStatusResponse) Reset() { *x = GetStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetStatusResponse) String() string { @@ -744,8 +955,8 @@ func (x *GetStatusResponse) String() string { func (*GetStatusResponse) ProtoMessage() {} func (x *GetStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_cloud_v1_cloud_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cloud_v1_cloud_proto_msgTypes[15] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -757,7 +968,7 @@ func (x *GetStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStatusResponse.ProtoReflect.Descriptor instead. func (*GetStatusResponse) Descriptor() ([]byte, []int) { - return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{10} + return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{15} } func (x *GetStatusResponse) GetStatusCounts() map[int32]int64 { @@ -778,11 +989,9 @@ type TaskList struct { func (x *TaskList) Reset() { *x = TaskList{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskList) String() string { @@ -792,8 +1001,8 @@ func (x *TaskList) String() string { func (*TaskList) ProtoMessage() {} func (x *TaskList) ProtoReflect() protoreflect.Message { - mi := &file_cloud_v1_cloud_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cloud_v1_cloud_proto_msgTypes[16] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -805,7 +1014,7 @@ func (x *TaskList) ProtoReflect() protoreflect.Message { // Deprecated: Use TaskList.ProtoReflect.Descriptor instead. func (*TaskList) Descriptor() ([]byte, []int) { - return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{11} + return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{16} } func (x *TaskList) GetTasks() []*Task { @@ -836,11 +1045,9 @@ type TaskListRequest struct { func (x *TaskListRequest) Reset() { *x = TaskListRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_cloud_v1_cloud_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cloud_v1_cloud_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskListRequest) String() string { @@ -850,8 +1057,8 @@ func (x *TaskListRequest) String() string { func (*TaskListRequest) ProtoMessage() {} func (x *TaskListRequest) ProtoReflect() protoreflect.Message { - mi := &file_cloud_v1_cloud_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_cloud_v1_cloud_proto_msgTypes[17] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -863,7 +1070,7 @@ func (x *TaskListRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use TaskListRequest.ProtoReflect.Descriptor instead. func (*TaskListRequest) Descriptor() ([]byte, []int) { - return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{12} + return file_cloud_v1_cloud_proto_rawDescGZIP(), []int{17} } func (x *TaskListRequest) GetLimit() int32 { @@ -989,78 +1196,114 @@ var file_cloud_v1_cloud_proto_rawDesc = []byte{ 0x6d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x22, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x18, 0xd0, 0x0f, 0x52, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa8, 0x01, 0x0a, 0x11, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xd6, 0x01, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x72, + 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x2d, 0xfa, 0x42, 0x2a, 0x72, 0x28, 0x32, 0x26, 0x5e, 0x5c, 0x64, 0x7b, 0x34, 0x7d, 0x2d, 0x5c, + 0x64, 0x7b, 0x32, 0x7d, 0x2d, 0x5c, 0x64, 0x7b, 0x32, 0x7d, 0x54, 0x5c, 0x64, 0x7b, 0x32, 0x7d, + 0x3a, 0x5c, 0x64, 0x7b, 0x32, 0x7d, 0x3a, 0x5c, 0x64, 0x7b, 0x32, 0x7d, 0x5a, 0x24, 0x52, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x75, 0x0a, 0x04, 0x75, 0x75, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x61, 0xfa, 0x42, 0x5e, 0x72, 0x5c, 0x32, 0x5a, + 0x5e, 0x5b, 0x30, 0x2d, 0x39, 0x61, 0x2d, 0x66, 0x41, 0x2d, 0x46, 0x5d, 0x7b, 0x38, 0x7d, 0x2d, + 0x5b, 0x30, 0x2d, 0x39, 0x61, 0x2d, 0x66, 0x41, 0x2d, 0x46, 0x5d, 0x7b, 0x34, 0x7d, 0x2d, 0x5b, + 0x31, 0x2d, 0x35, 0x5d, 0x5b, 0x30, 0x2d, 0x39, 0x61, 0x2d, 0x66, 0x41, 0x2d, 0x46, 0x5d, 0x7b, + 0x33, 0x7d, 0x2d, 0x5b, 0x38, 0x39, 0x61, 0x62, 0x41, 0x42, 0x5d, 0x5b, 0x30, 0x2d, 0x39, 0x61, + 0x2d, 0x66, 0x41, 0x2d, 0x46, 0x5d, 0x7b, 0x33, 0x7d, 0x2d, 0x5b, 0x30, 0x2d, 0x39, 0x61, 0x2d, + 0x66, 0x41, 0x2d, 0x46, 0x5d, 0x7b, 0x31, 0x32, 0x7d, 0x24, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x22, 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x13, 0x0a, 0x11, 0x50, 0x75, 0x6c, 0x6c, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x42, 0x0a, 0x12, 0x50, 0x75, + 0x6c, 0x6c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2c, 0x0a, 0x04, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x41, 0x73, + 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x04, 0x77, 0x6f, 0x72, 0x6b, 0x22, 0x63, + 0x0a, 0x0e, 0x57, 0x6f, 0x72, 0x6b, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, + 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x04, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, + 0x61, 0x73, 0x6b, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x04, 0x74, + 0x61, 0x73, 0x6b, 0x22, 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa8, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, + 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x52, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x1a, 0x3f, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x30, 0x0a, 0x08, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x69, - 0x73, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x22, 0xd5, 0x01, 0x0a, 0x0f, 0x54, 0x61, 0x73, - 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x09, 0xfa, 0x42, 0x06, - 0x1a, 0x04, 0x18, 0x64, 0x28, 0x01, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1f, 0x0a, - 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x1a, 0x02, 0x28, 0x00, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x35, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x45, 0x6e, 0x75, 0x6d, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x1c, 0xfa, 0x42, 0x19, 0x72, 0x17, 0x52, 0x0a, 0x73, 0x65, 0x6e, 0x64, - 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x48, 0x01, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x2a, 0x5a, 0x0a, 0x0e, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x45, 0x6e, - 0x75, 0x6d, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, - 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x46, - 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, - 0x45, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x05, 0x32, 0xc7, 0x03, 0x0a, - 0x15, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x35, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x18, 0x2e, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x19, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x12, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, - 0x4c, 0x69, 0x73, 0x74, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, - 0x6b, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, - 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x21, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x46, - 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x7a, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1d, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, - 0x65, 0x6e, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x43, 0x58, 0x58, 0xaa, 0x02, 0x08, 0x43, 0x6c, 0x6f, 0x75, - 0x64, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x08, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x56, 0x31, 0xe2, - 0x02, 0x14, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x09, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, - 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x1a, 0x3f, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x30, 0x0a, 0x08, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x24, + 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x05, 0x74, + 0x61, 0x73, 0x6b, 0x73, 0x22, 0xd5, 0x01, 0x0a, 0x0f, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x1a, 0x04, 0x18, 0x64, + 0x28, 0x01, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1f, 0x0a, 0x06, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a, 0x02, + 0x28, 0x00, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x45, 0x6e, 0x75, 0x6d, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x1c, 0xfa, 0x42, 0x19, 0x72, 0x17, 0x52, 0x0a, 0x73, 0x65, 0x6e, 0x64, 0x5f, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x48, 0x01, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x2a, 0x5a, 0x0a, 0x0e, + 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x0a, + 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, + 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, + 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x04, 0x12, + 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x05, 0x32, 0xdc, 0x04, 0x0a, 0x15, 0x54, 0x61, 0x73, + 0x6b, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x12, 0x1b, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x35, 0x0a, + 0x07, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x18, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, + 0x73, 0x6b, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, 0x6b, + 0x73, 0x12, 0x19, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, + 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x69, 0x73, 0x74, + 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1f, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x10, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x2e, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x09, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x46, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, + 0x1a, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, + 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x50, 0x75, + 0x6c, 0x6c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x7a, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x2e, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1d, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x70, 0x6b, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x43, 0x58, 0x58, 0xaa, 0x02, 0x08, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x08, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x56, 0x31, + 0xe2, 0x02, 0x14, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x09, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, + 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1076,7 +1319,7 @@ func file_cloud_v1_cloud_proto_rawDescGZIP() []byte { } var file_cloud_v1_cloud_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_cloud_v1_cloud_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_cloud_v1_cloud_proto_msgTypes = make([]protoimpl.MessageInfo, 20) var file_cloud_v1_cloud_proto_goTypes = []any{ (TaskStatusEnum)(0), // 0: cloud.v1.TaskStatusEnum (*Payload)(nil), // 1: cloud.v1.Payload @@ -1088,42 +1331,53 @@ var file_cloud_v1_cloud_proto_goTypes = []any{ (*GetTaskHistoryRequest)(nil), // 7: cloud.v1.GetTaskHistoryRequest (*GetTaskHistoryResponse)(nil), // 8: cloud.v1.GetTaskHistoryResponse (*UpdateTaskStatusRequest)(nil), // 9: cloud.v1.UpdateTaskStatusRequest - (*GetStatusRequest)(nil), // 10: cloud.v1.GetStatusRequest - (*GetStatusResponse)(nil), // 11: cloud.v1.GetStatusResponse - (*TaskList)(nil), // 12: cloud.v1.TaskList - (*TaskListRequest)(nil), // 13: cloud.v1.TaskListRequest - nil, // 14: cloud.v1.Payload.ParametersEntry - nil, // 15: cloud.v1.GetStatusResponse.StatusCountsEntry - (*emptypb.Empty)(nil), // 16: google.protobuf.Empty + (*HeartbeatRequest)(nil), // 10: cloud.v1.HeartbeatRequest + (*HeartbeatResponse)(nil), // 11: cloud.v1.HeartbeatResponse + (*PullEventsRequest)(nil), // 12: cloud.v1.PullEventsRequest + (*PullEventsResponse)(nil), // 13: cloud.v1.PullEventsResponse + (*WorkAssignment)(nil), // 14: cloud.v1.WorkAssignment + (*GetStatusRequest)(nil), // 15: cloud.v1.GetStatusRequest + (*GetStatusResponse)(nil), // 16: cloud.v1.GetStatusResponse + (*TaskList)(nil), // 17: cloud.v1.TaskList + (*TaskListRequest)(nil), // 18: cloud.v1.TaskListRequest + nil, // 19: cloud.v1.Payload.ParametersEntry + nil, // 20: cloud.v1.GetStatusResponse.StatusCountsEntry + (*emptypb.Empty)(nil), // 21: google.protobuf.Empty } var file_cloud_v1_cloud_proto_depIdxs = []int32{ - 14, // 0: cloud.v1.Payload.parameters:type_name -> cloud.v1.Payload.ParametersEntry + 19, // 0: cloud.v1.Payload.parameters:type_name -> cloud.v1.Payload.ParametersEntry 1, // 1: cloud.v1.CreateTaskRequest.payload:type_name -> cloud.v1.Payload 0, // 2: cloud.v1.Task.status:type_name -> cloud.v1.TaskStatusEnum 1, // 3: cloud.v1.Task.payload:type_name -> cloud.v1.Payload 0, // 4: cloud.v1.TaskHistory.status:type_name -> cloud.v1.TaskStatusEnum 5, // 5: cloud.v1.GetTaskHistoryResponse.history:type_name -> cloud.v1.TaskHistory 0, // 6: cloud.v1.UpdateTaskStatusRequest.status:type_name -> cloud.v1.TaskStatusEnum - 15, // 7: cloud.v1.GetStatusResponse.status_counts:type_name -> cloud.v1.GetStatusResponse.StatusCountsEntry - 4, // 8: cloud.v1.TaskList.tasks:type_name -> cloud.v1.Task - 0, // 9: cloud.v1.TaskListRequest.status:type_name -> cloud.v1.TaskStatusEnum - 2, // 10: cloud.v1.TaskManagementService.CreateTask:input_type -> cloud.v1.CreateTaskRequest - 6, // 11: cloud.v1.TaskManagementService.GetTask:input_type -> cloud.v1.GetTaskRequest - 13, // 12: cloud.v1.TaskManagementService.ListTasks:input_type -> cloud.v1.TaskListRequest - 7, // 13: cloud.v1.TaskManagementService.GetTaskHistory:input_type -> cloud.v1.GetTaskHistoryRequest - 9, // 14: cloud.v1.TaskManagementService.UpdateTaskStatus:input_type -> cloud.v1.UpdateTaskStatusRequest - 10, // 15: cloud.v1.TaskManagementService.GetStatus:input_type -> cloud.v1.GetStatusRequest - 3, // 16: cloud.v1.TaskManagementService.CreateTask:output_type -> cloud.v1.CreateTaskResponse - 4, // 17: cloud.v1.TaskManagementService.GetTask:output_type -> cloud.v1.Task - 12, // 18: cloud.v1.TaskManagementService.ListTasks:output_type -> cloud.v1.TaskList - 8, // 19: cloud.v1.TaskManagementService.GetTaskHistory:output_type -> cloud.v1.GetTaskHistoryResponse - 16, // 20: cloud.v1.TaskManagementService.UpdateTaskStatus:output_type -> google.protobuf.Empty - 11, // 21: cloud.v1.TaskManagementService.GetStatus:output_type -> cloud.v1.GetStatusResponse - 16, // [16:22] is the sub-list for method output_type - 10, // [10:16] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 14, // 7: cloud.v1.PullEventsResponse.work:type_name -> cloud.v1.WorkAssignment + 4, // 8: cloud.v1.WorkAssignment.task:type_name -> cloud.v1.Task + 20, // 9: cloud.v1.GetStatusResponse.status_counts:type_name -> cloud.v1.GetStatusResponse.StatusCountsEntry + 4, // 10: cloud.v1.TaskList.tasks:type_name -> cloud.v1.Task + 0, // 11: cloud.v1.TaskListRequest.status:type_name -> cloud.v1.TaskStatusEnum + 2, // 12: cloud.v1.TaskManagementService.CreateTask:input_type -> cloud.v1.CreateTaskRequest + 6, // 13: cloud.v1.TaskManagementService.GetTask:input_type -> cloud.v1.GetTaskRequest + 18, // 14: cloud.v1.TaskManagementService.ListTasks:input_type -> cloud.v1.TaskListRequest + 7, // 15: cloud.v1.TaskManagementService.GetTaskHistory:input_type -> cloud.v1.GetTaskHistoryRequest + 9, // 16: cloud.v1.TaskManagementService.UpdateTaskStatus:input_type -> cloud.v1.UpdateTaskStatusRequest + 15, // 17: cloud.v1.TaskManagementService.GetStatus:input_type -> cloud.v1.GetStatusRequest + 10, // 18: cloud.v1.TaskManagementService.Heartbeat:input_type -> cloud.v1.HeartbeatRequest + 12, // 19: cloud.v1.TaskManagementService.PullEvents:input_type -> cloud.v1.PullEventsRequest + 3, // 20: cloud.v1.TaskManagementService.CreateTask:output_type -> cloud.v1.CreateTaskResponse + 4, // 21: cloud.v1.TaskManagementService.GetTask:output_type -> cloud.v1.Task + 17, // 22: cloud.v1.TaskManagementService.ListTasks:output_type -> cloud.v1.TaskList + 8, // 23: cloud.v1.TaskManagementService.GetTaskHistory:output_type -> cloud.v1.GetTaskHistoryResponse + 21, // 24: cloud.v1.TaskManagementService.UpdateTaskStatus:output_type -> google.protobuf.Empty + 16, // 25: cloud.v1.TaskManagementService.GetStatus:output_type -> cloud.v1.GetStatusResponse + 11, // 26: cloud.v1.TaskManagementService.Heartbeat:output_type -> cloud.v1.HeartbeatResponse + 13, // 27: cloud.v1.TaskManagementService.PullEvents:output_type -> cloud.v1.PullEventsResponse + 20, // [20:28] is the sub-list for method output_type + 12, // [12:20] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name } func init() { file_cloud_v1_cloud_proto_init() } @@ -1131,172 +1385,14 @@ func file_cloud_v1_cloud_proto_init() { if File_cloud_v1_cloud_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_cloud_v1_cloud_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Payload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*CreateTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*CreateTaskResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Task); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*TaskHistory); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*GetTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*GetTaskHistoryRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*GetTaskHistoryResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*UpdateTaskStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*GetStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*GetStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*TaskList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cloud_v1_cloud_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*TaskListRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cloud_v1_cloud_proto_msgTypes[12].OneofWrappers = []any{} + file_cloud_v1_cloud_proto_msgTypes[17].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_cloud_v1_cloud_proto_rawDesc, NumEnums: 1, - NumMessages: 15, + NumMessages: 20, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/gen/cloud/v1/cloud.swagger.json b/pkg/gen/cloud/v1/cloud.swagger.json index 6654d08..e32c9a6 100644 --- a/pkg/gen/cloud/v1/cloud.swagger.json +++ b/pkg/gen/cloud/v1/cloud.swagger.json @@ -65,7 +65,7 @@ "type": "string", "format": "int64" }, - "title": "Map of task statuses and their counts" + "description": "Map of task statuses and their counts." } }, "title": "Message for GetStatus response" @@ -84,6 +84,10 @@ }, "title": "Message for Task history response" }, + "v1HeartbeatResponse": { + "type": "object", + "description": "Response message for the heartbeat request.\n Currently, this message is empty, indicating successful receipt of the heartbeat." + }, "v1Payload": { "type": "object", "properties": { @@ -97,6 +101,16 @@ }, "title": "Message for Task Payload" }, + "v1PullEventsResponse": { + "type": "object", + "properties": { + "work": { + "$ref": "#/definitions/v1WorkAssignment", + "description": "Work assignment to be executed.\n\nThe task to be executed." + } + }, + "title": "Message for stream responses" + }, "v1Task": { "type": "object", "properties": { @@ -190,8 +204,23 @@ "ALL" ], "default": "QUEUED", - "description": "- QUEUED: Task is in the queue, waiting to be processed\n - RUNNING: Task is currently being executed\n - FAILED: Task encountered an error and failed to complete\n - SUCCEEDED: Task completed successfully\n - UNKNOWN: Task status cannot be determined\n - ALL: Task status cannot be determined", + "description": "- QUEUED: Task is in the queue, waiting to be processed\n - RUNNING: Task is currently being executed\n - FAILED: Task encountered an error and failed to complete\n - SUCCEEDED: Task completed successfully\n - UNKNOWN: Task status cannot be determined\n - ALL: Represents all task statuses", "title": "Enum for Task statuses" + }, + "v1WorkAssignment": { + "type": "object", + "properties": { + "assignmentId": { + "type": "string", + "format": "int64", + "title": "Unique identifier for the assignment" + }, + "task": { + "$ref": "#/definitions/v1Task", + "title": "The task to be executed" + } + }, + "title": "Message for work assignments" } } } diff --git a/pkg/gen/cloud/v1/cloud_grpc.pb.go b/pkg/gen/cloud/v1/cloud_grpc.pb.go new file mode 100644 index 0000000..784a833 --- /dev/null +++ b/pkg/gen/cloud/v1/cloud_grpc.pb.go @@ -0,0 +1,420 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: cloud/v1/cloud.proto + +package cloudv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + TaskManagementService_CreateTask_FullMethodName = "/cloud.v1.TaskManagementService/CreateTask" + TaskManagementService_GetTask_FullMethodName = "/cloud.v1.TaskManagementService/GetTask" + TaskManagementService_ListTasks_FullMethodName = "/cloud.v1.TaskManagementService/ListTasks" + TaskManagementService_GetTaskHistory_FullMethodName = "/cloud.v1.TaskManagementService/GetTaskHistory" + TaskManagementService_UpdateTaskStatus_FullMethodName = "/cloud.v1.TaskManagementService/UpdateTaskStatus" + TaskManagementService_GetStatus_FullMethodName = "/cloud.v1.TaskManagementService/GetStatus" + TaskManagementService_Heartbeat_FullMethodName = "/cloud.v1.TaskManagementService/Heartbeat" + TaskManagementService_PullEvents_FullMethodName = "/cloud.v1.TaskManagementService/PullEvents" +) + +// TaskManagementServiceClient is the client API for TaskManagementService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Task Management service definition +type TaskManagementServiceClient interface { + // Creates a new task based on the provided request. + // Returns a CreateTaskResponse containing the unique identifier of the created task. + CreateTask(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) + // Retrieves the current status and details of the specified task. + // Returns a Task message containing all information about the requested task. + GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*Task, error) + // Lists tasks currently available in the system, with pagination support. + // Returns a TaskList containing the requested subset of tasks. + ListTasks(ctx context.Context, in *TaskListRequest, opts ...grpc.CallOption) (*TaskList, error) + // Retrieves the execution history of the specified task. + // Returns a GetTaskHistoryResponse containing a list of historical status updates. + GetTaskHistory(ctx context.Context, in *GetTaskHistoryRequest, opts ...grpc.CallOption) (*GetTaskHistoryResponse, error) + // Updates the status of the specified task. + // Returns an empty response to confirm the update was processed. + UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Retrieves the count of tasks for each status. + // Returns a GetStatusResponse containing a map of status counts. + GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) + Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) + PullEvents(ctx context.Context, in *PullEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[PullEventsResponse], error) +} + +type taskManagementServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewTaskManagementServiceClient(cc grpc.ClientConnInterface) TaskManagementServiceClient { + return &taskManagementServiceClient{cc} +} + +func (c *taskManagementServiceClient) CreateTask(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateTaskResponse) + err := c.cc.Invoke(ctx, TaskManagementService_CreateTask_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskManagementServiceClient) GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*Task, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Task) + err := c.cc.Invoke(ctx, TaskManagementService_GetTask_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskManagementServiceClient) ListTasks(ctx context.Context, in *TaskListRequest, opts ...grpc.CallOption) (*TaskList, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(TaskList) + err := c.cc.Invoke(ctx, TaskManagementService_ListTasks_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskManagementServiceClient) GetTaskHistory(ctx context.Context, in *GetTaskHistoryRequest, opts ...grpc.CallOption) (*GetTaskHistoryResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetTaskHistoryResponse) + err := c.cc.Invoke(ctx, TaskManagementService_GetTaskHistory_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskManagementServiceClient) UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, TaskManagementService_UpdateTaskStatus_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskManagementServiceClient) GetStatus(ctx context.Context, in *GetStatusRequest, opts ...grpc.CallOption) (*GetStatusResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetStatusResponse) + err := c.cc.Invoke(ctx, TaskManagementService_GetStatus_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskManagementServiceClient) Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HeartbeatResponse) + err := c.cc.Invoke(ctx, TaskManagementService_Heartbeat_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *taskManagementServiceClient) PullEvents(ctx context.Context, in *PullEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[PullEventsResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &TaskManagementService_ServiceDesc.Streams[0], TaskManagementService_PullEvents_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[PullEventsRequest, PullEventsResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type TaskManagementService_PullEventsClient = grpc.ServerStreamingClient[PullEventsResponse] + +// TaskManagementServiceServer is the server API for TaskManagementService service. +// All implementations must embed UnimplementedTaskManagementServiceServer +// for forward compatibility. +// +// Task Management service definition +type TaskManagementServiceServer interface { + // Creates a new task based on the provided request. + // Returns a CreateTaskResponse containing the unique identifier of the created task. + CreateTask(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) + // Retrieves the current status and details of the specified task. + // Returns a Task message containing all information about the requested task. + GetTask(context.Context, *GetTaskRequest) (*Task, error) + // Lists tasks currently available in the system, with pagination support. + // Returns a TaskList containing the requested subset of tasks. + ListTasks(context.Context, *TaskListRequest) (*TaskList, error) + // Retrieves the execution history of the specified task. + // Returns a GetTaskHistoryResponse containing a list of historical status updates. + GetTaskHistory(context.Context, *GetTaskHistoryRequest) (*GetTaskHistoryResponse, error) + // Updates the status of the specified task. + // Returns an empty response to confirm the update was processed. + UpdateTaskStatus(context.Context, *UpdateTaskStatusRequest) (*emptypb.Empty, error) + // Retrieves the count of tasks for each status. + // Returns a GetStatusResponse containing a map of status counts. + GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error) + Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error) + PullEvents(*PullEventsRequest, grpc.ServerStreamingServer[PullEventsResponse]) error + mustEmbedUnimplementedTaskManagementServiceServer() +} + +// UnimplementedTaskManagementServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedTaskManagementServiceServer struct{} + +func (UnimplementedTaskManagementServiceServer) CreateTask(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateTask not implemented") +} +func (UnimplementedTaskManagementServiceServer) GetTask(context.Context, *GetTaskRequest) (*Task, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTask not implemented") +} +func (UnimplementedTaskManagementServiceServer) ListTasks(context.Context, *TaskListRequest) (*TaskList, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListTasks not implemented") +} +func (UnimplementedTaskManagementServiceServer) GetTaskHistory(context.Context, *GetTaskHistoryRequest) (*GetTaskHistoryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTaskHistory not implemented") +} +func (UnimplementedTaskManagementServiceServer) UpdateTaskStatus(context.Context, *UpdateTaskStatusRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTaskStatus not implemented") +} +func (UnimplementedTaskManagementServiceServer) GetStatus(context.Context, *GetStatusRequest) (*GetStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetStatus not implemented") +} +func (UnimplementedTaskManagementServiceServer) Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Heartbeat not implemented") +} +func (UnimplementedTaskManagementServiceServer) PullEvents(*PullEventsRequest, grpc.ServerStreamingServer[PullEventsResponse]) error { + return status.Errorf(codes.Unimplemented, "method PullEvents not implemented") +} +func (UnimplementedTaskManagementServiceServer) mustEmbedUnimplementedTaskManagementServiceServer() {} +func (UnimplementedTaskManagementServiceServer) testEmbeddedByValue() {} + +// UnsafeTaskManagementServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TaskManagementServiceServer will +// result in compilation errors. +type UnsafeTaskManagementServiceServer interface { + mustEmbedUnimplementedTaskManagementServiceServer() +} + +func RegisterTaskManagementServiceServer(s grpc.ServiceRegistrar, srv TaskManagementServiceServer) { + // If the following call pancis, it indicates UnimplementedTaskManagementServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&TaskManagementService_ServiceDesc, srv) +} + +func _TaskManagementService_CreateTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskManagementServiceServer).CreateTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TaskManagementService_CreateTask_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskManagementServiceServer).CreateTask(ctx, req.(*CreateTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskManagementService_GetTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskManagementServiceServer).GetTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TaskManagementService_GetTask_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskManagementServiceServer).GetTask(ctx, req.(*GetTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskManagementService_ListTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TaskListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskManagementServiceServer).ListTasks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TaskManagementService_ListTasks_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskManagementServiceServer).ListTasks(ctx, req.(*TaskListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskManagementService_GetTaskHistory_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTaskHistoryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskManagementServiceServer).GetTaskHistory(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TaskManagementService_GetTaskHistory_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskManagementServiceServer).GetTaskHistory(ctx, req.(*GetTaskHistoryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskManagementService_UpdateTaskStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskManagementServiceServer).UpdateTaskStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TaskManagementService_UpdateTaskStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskManagementServiceServer).UpdateTaskStatus(ctx, req.(*UpdateTaskStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskManagementService_GetStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskManagementServiceServer).GetStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TaskManagementService_GetStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskManagementServiceServer).GetStatus(ctx, req.(*GetStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskManagementService_Heartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HeartbeatRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TaskManagementServiceServer).Heartbeat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TaskManagementService_Heartbeat_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TaskManagementServiceServer).Heartbeat(ctx, req.(*HeartbeatRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TaskManagementService_PullEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(PullEventsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TaskManagementServiceServer).PullEvents(m, &grpc.GenericServerStream[PullEventsRequest, PullEventsResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type TaskManagementService_PullEventsServer = grpc.ServerStreamingServer[PullEventsResponse] + +// TaskManagementService_ServiceDesc is the grpc.ServiceDesc for TaskManagementService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var TaskManagementService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "cloud.v1.TaskManagementService", + HandlerType: (*TaskManagementServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateTask", + Handler: _TaskManagementService_CreateTask_Handler, + }, + { + MethodName: "GetTask", + Handler: _TaskManagementService_GetTask_Handler, + }, + { + MethodName: "ListTasks", + Handler: _TaskManagementService_ListTasks_Handler, + }, + { + MethodName: "GetTaskHistory", + Handler: _TaskManagementService_GetTaskHistory_Handler, + }, + { + MethodName: "UpdateTaskStatus", + Handler: _TaskManagementService_UpdateTaskStatus_Handler, + }, + { + MethodName: "GetStatus", + Handler: _TaskManagementService_GetStatus_Handler, + }, + { + MethodName: "Heartbeat", + Handler: _TaskManagementService_Heartbeat_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "PullEvents", + Handler: _TaskManagementService_PullEvents_Handler, + ServerStreams: true, + }, + }, + Metadata: "cloud/v1/cloud.proto", +} diff --git a/pkg/gen/cloud/v1/cloudv1connect/cloud.connect.go b/pkg/gen/cloud/v1/cloudv1connect/cloud.connect.go index 4466f59..6d97232 100644 --- a/pkg/gen/cloud/v1/cloudv1connect/cloud.connect.go +++ b/pkg/gen/cloud/v1/cloudv1connect/cloud.connect.go @@ -52,6 +52,12 @@ const ( // TaskManagementServiceGetStatusProcedure is the fully-qualified name of the // TaskManagementService's GetStatus RPC. TaskManagementServiceGetStatusProcedure = "/cloud.v1.TaskManagementService/GetStatus" + // TaskManagementServiceHeartbeatProcedure is the fully-qualified name of the + // TaskManagementService's Heartbeat RPC. + TaskManagementServiceHeartbeatProcedure = "/cloud.v1.TaskManagementService/Heartbeat" + // TaskManagementServicePullEventsProcedure is the fully-qualified name of the + // TaskManagementService's PullEvents RPC. + TaskManagementServicePullEventsProcedure = "/cloud.v1.TaskManagementService/PullEvents" ) // TaskManagementServiceClient is a client for the cloud.v1.TaskManagementService service. @@ -74,6 +80,8 @@ type TaskManagementServiceClient interface { // Retrieves the count of tasks for each status. // Returns a GetStatusResponse containing a map of status counts. GetStatus(context.Context, *connect.Request[v1.GetStatusRequest]) (*connect.Response[v1.GetStatusResponse], error) + Heartbeat(context.Context, *connect.Request[v1.HeartbeatRequest]) (*connect.Response[v1.HeartbeatResponse], error) + PullEvents(context.Context, *connect.Request[v1.PullEventsRequest]) (*connect.ServerStreamForClient[v1.PullEventsResponse], error) } // NewTaskManagementServiceClient constructs a client for the cloud.v1.TaskManagementService @@ -116,6 +124,16 @@ func NewTaskManagementServiceClient(httpClient connect.HTTPClient, baseURL strin baseURL+TaskManagementServiceGetStatusProcedure, opts..., ), + heartbeat: connect.NewClient[v1.HeartbeatRequest, v1.HeartbeatResponse]( + httpClient, + baseURL+TaskManagementServiceHeartbeatProcedure, + opts..., + ), + pullEvents: connect.NewClient[v1.PullEventsRequest, v1.PullEventsResponse]( + httpClient, + baseURL+TaskManagementServicePullEventsProcedure, + opts..., + ), } } @@ -127,6 +145,8 @@ type taskManagementServiceClient struct { getTaskHistory *connect.Client[v1.GetTaskHistoryRequest, v1.GetTaskHistoryResponse] updateTaskStatus *connect.Client[v1.UpdateTaskStatusRequest, emptypb.Empty] getStatus *connect.Client[v1.GetStatusRequest, v1.GetStatusResponse] + heartbeat *connect.Client[v1.HeartbeatRequest, v1.HeartbeatResponse] + pullEvents *connect.Client[v1.PullEventsRequest, v1.PullEventsResponse] } // CreateTask calls cloud.v1.TaskManagementService.CreateTask. @@ -159,6 +179,16 @@ func (c *taskManagementServiceClient) GetStatus(ctx context.Context, req *connec return c.getStatus.CallUnary(ctx, req) } +// Heartbeat calls cloud.v1.TaskManagementService.Heartbeat. +func (c *taskManagementServiceClient) Heartbeat(ctx context.Context, req *connect.Request[v1.HeartbeatRequest]) (*connect.Response[v1.HeartbeatResponse], error) { + return c.heartbeat.CallUnary(ctx, req) +} + +// PullEvents calls cloud.v1.TaskManagementService.PullEvents. +func (c *taskManagementServiceClient) PullEvents(ctx context.Context, req *connect.Request[v1.PullEventsRequest]) (*connect.ServerStreamForClient[v1.PullEventsResponse], error) { + return c.pullEvents.CallServerStream(ctx, req) +} + // TaskManagementServiceHandler is an implementation of the cloud.v1.TaskManagementService service. type TaskManagementServiceHandler interface { // Creates a new task based on the provided request. @@ -179,6 +209,8 @@ type TaskManagementServiceHandler interface { // Retrieves the count of tasks for each status. // Returns a GetStatusResponse containing a map of status counts. GetStatus(context.Context, *connect.Request[v1.GetStatusRequest]) (*connect.Response[v1.GetStatusResponse], error) + Heartbeat(context.Context, *connect.Request[v1.HeartbeatRequest]) (*connect.Response[v1.HeartbeatResponse], error) + PullEvents(context.Context, *connect.Request[v1.PullEventsRequest], *connect.ServerStream[v1.PullEventsResponse]) error } // NewTaskManagementServiceHandler builds an HTTP handler from the service implementation. It @@ -217,6 +249,16 @@ func NewTaskManagementServiceHandler(svc TaskManagementServiceHandler, opts ...c svc.GetStatus, opts..., ) + taskManagementServiceHeartbeatHandler := connect.NewUnaryHandler( + TaskManagementServiceHeartbeatProcedure, + svc.Heartbeat, + opts..., + ) + taskManagementServicePullEventsHandler := connect.NewServerStreamHandler( + TaskManagementServicePullEventsProcedure, + svc.PullEvents, + opts..., + ) return "/cloud.v1.TaskManagementService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case TaskManagementServiceCreateTaskProcedure: @@ -231,6 +273,10 @@ func NewTaskManagementServiceHandler(svc TaskManagementServiceHandler, opts ...c taskManagementServiceUpdateTaskStatusHandler.ServeHTTP(w, r) case TaskManagementServiceGetStatusProcedure: taskManagementServiceGetStatusHandler.ServeHTTP(w, r) + case TaskManagementServiceHeartbeatProcedure: + taskManagementServiceHeartbeatHandler.ServeHTTP(w, r) + case TaskManagementServicePullEventsProcedure: + taskManagementServicePullEventsHandler.ServeHTTP(w, r) default: http.NotFound(w, r) } @@ -263,3 +309,11 @@ func (UnimplementedTaskManagementServiceHandler) UpdateTaskStatus(context.Contex func (UnimplementedTaskManagementServiceHandler) GetStatus(context.Context, *connect.Request[v1.GetStatusRequest]) (*connect.Response[v1.GetStatusResponse], error) { return nil, connect.NewError(connect.CodeUnimplemented, errors.New("cloud.v1.TaskManagementService.GetStatus is not implemented")) } + +func (UnimplementedTaskManagementServiceHandler) Heartbeat(context.Context, *connect.Request[v1.HeartbeatRequest]) (*connect.Response[v1.HeartbeatResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("cloud.v1.TaskManagementService.Heartbeat is not implemented")) +} + +func (UnimplementedTaskManagementServiceHandler) PullEvents(context.Context, *connect.Request[v1.PullEventsRequest], *connect.ServerStream[v1.PullEventsResponse]) error { + return connect.NewError(connect.CodeUnimplemented, errors.New("cloud.v1.TaskManagementService.PullEvents is not implemented")) +} diff --git a/pkg/gen/index.html b/pkg/gen/index.html index 4160ae0..d933e10 100644 --- a/pkg/gen/index.html +++ b/pkg/gen/index.html @@ -333,6 +333,14 @@

Table of Contents

MGetTaskRequest +
  • + MHeartbeatRequest +
  • + +
  • + MHeartbeatResponse +
  • +
  • MPayload
  • @@ -341,6 +349,14 @@

    Table of Contents

    MPayload.ParametersEntry +
  • + MPullEventsRequest +
  • + +
  • + MPullEventsResponse +
  • +
  • MTask
  • @@ -361,6 +377,10 @@

    Table of Contents

    MUpdateTaskStatusRequest +
  • + MWorkAssignment +
  • +
  • ETaskStatusEnum @@ -2607,7 +2627,7 @@

    GetStatusResponse

    status_counts GetStatusResponse.StatusCountsEntry repeated -

    Map of task statuses and their counts

    +

    Map of task statuses and their counts.

    @@ -2804,6 +2824,84 @@

    Validated Fields

    +

    HeartbeatRequest

    +

    + + + + + + + + + + + + + + + + + + + + + + + +
    FieldTypeLabelDescription
    timestampstring

    Timestamp of the heartbeat, in ISO 8601 format (UTC). +This timestamp indicates when the heartbeat was sent.

    uuidstring

    Unique identifier for the heartbeat request. This UUID helps in tracking and correlating requests. +It should be a valid UUID format.

    + + + + +

    Validated Fields

    + + + + + + + + + + + + + + + + + + + + +
    FieldValidations
    timestamp +
      + +
    • string.pattern: ^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$
    • + +
    +
    uuid +
      + +
    • string.pattern: ^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[1-5][0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$
    • + +
    +
    + + + + + +

    HeartbeatResponse

    +

    Response message for the heartbeat request.

    Currently, this message is empty, indicating successful receipt of the heartbeat.

    + + + + +

    Payload

    Message for Task Payload

    @@ -2890,6 +2988,39 @@

    Payload.ParametersEntry

    +

    PullEventsRequest

    +

    Message for stream requests

    Request message for pulling events.

    Currently, this message is empty, indicating that no specific parameters are required.

    + + + + + +

    PullEventsResponse

    +

    Message for stream responses

    + + + + + + + + + + + + + + + + +
    FieldTypeLabelDescription
    workWorkAssignment

    Work assignment to be executed. + +The task to be executed.

    + + + + +

    Task

    Message for Task status

    @@ -3409,6 +3540,64 @@

    Validated Fields

    +

    WorkAssignment

    +

    Message for work assignments

    + + + + + + + + + + + + + + + + + + + + + + + +
    FieldTypeLabelDescription
    assignment_idint64

    Unique identifier for the assignment

    taskTask

    The task to be executed

    + + + + +

    Validated Fields

    + + + + + + + + + + + + + + + +
    FieldValidations
    task +
      + +
    • message.required: true
    • + +
    +
    + + + + +

    TaskStatusEnum

    @@ -3452,7 +3641,7 @@

    TaskStatusEnum

    ALL 5 -

    Task status cannot be determined

    +

    Represents all task statuses

    @@ -3518,6 +3707,20 @@

    TaskManagementService

    Returns a GetStatusResponse containing a map of status counts.

    + + Heartbeat + HeartbeatRequest + HeartbeatResponse +

    + + + + PullEvents + PullEventsRequest + PullEventsResponse stream +

    + + diff --git a/pkg/gen/validate/validate.pb.go b/pkg/gen/validate/validate.pb.go index 31f91c2..9b28a1f 100644 --- a/pkg/gen/validate/validate.pb.go +++ b/pkg/gen/validate/validate.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: validate/validate.proto @@ -121,11 +121,9 @@ type FieldRules struct { func (x *FieldRules) Reset() { *x = FieldRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldRules) String() string { @@ -136,7 +134,7 @@ func (*FieldRules) ProtoMessage() {} func (x *FieldRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -480,11 +478,9 @@ type FloatRules struct { func (x *FloatRules) Reset() { *x = FloatRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FloatRules) String() string { @@ -495,7 +491,7 @@ func (*FloatRules) ProtoMessage() {} func (x *FloatRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -601,11 +597,9 @@ type DoubleRules struct { func (x *DoubleRules) Reset() { *x = DoubleRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DoubleRules) String() string { @@ -616,7 +610,7 @@ func (*DoubleRules) ProtoMessage() {} func (x *DoubleRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -722,11 +716,9 @@ type Int32Rules struct { func (x *Int32Rules) Reset() { *x = Int32Rules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int32Rules) String() string { @@ -737,7 +729,7 @@ func (*Int32Rules) ProtoMessage() {} func (x *Int32Rules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -843,11 +835,9 @@ type Int64Rules struct { func (x *Int64Rules) Reset() { *x = Int64Rules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int64Rules) String() string { @@ -858,7 +848,7 @@ func (*Int64Rules) ProtoMessage() {} func (x *Int64Rules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -964,11 +954,9 @@ type UInt32Rules struct { func (x *UInt32Rules) Reset() { *x = UInt32Rules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt32Rules) String() string { @@ -979,7 +967,7 @@ func (*UInt32Rules) ProtoMessage() {} func (x *UInt32Rules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1085,11 +1073,9 @@ type UInt64Rules struct { func (x *UInt64Rules) Reset() { *x = UInt64Rules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt64Rules) String() string { @@ -1100,7 +1086,7 @@ func (*UInt64Rules) ProtoMessage() {} func (x *UInt64Rules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1206,11 +1192,9 @@ type SInt32Rules struct { func (x *SInt32Rules) Reset() { *x = SInt32Rules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SInt32Rules) String() string { @@ -1221,7 +1205,7 @@ func (*SInt32Rules) ProtoMessage() {} func (x *SInt32Rules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1327,11 +1311,9 @@ type SInt64Rules struct { func (x *SInt64Rules) Reset() { *x = SInt64Rules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SInt64Rules) String() string { @@ -1342,7 +1324,7 @@ func (*SInt64Rules) ProtoMessage() {} func (x *SInt64Rules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1448,11 +1430,9 @@ type Fixed32Rules struct { func (x *Fixed32Rules) Reset() { *x = Fixed32Rules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Fixed32Rules) String() string { @@ -1463,7 +1443,7 @@ func (*Fixed32Rules) ProtoMessage() {} func (x *Fixed32Rules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1569,11 +1549,9 @@ type Fixed64Rules struct { func (x *Fixed64Rules) Reset() { *x = Fixed64Rules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Fixed64Rules) String() string { @@ -1584,7 +1562,7 @@ func (*Fixed64Rules) ProtoMessage() {} func (x *Fixed64Rules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1690,11 +1668,9 @@ type SFixed32Rules struct { func (x *SFixed32Rules) Reset() { *x = SFixed32Rules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SFixed32Rules) String() string { @@ -1705,7 +1681,7 @@ func (*SFixed32Rules) ProtoMessage() {} func (x *SFixed32Rules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1811,11 +1787,9 @@ type SFixed64Rules struct { func (x *SFixed64Rules) Reset() { *x = SFixed64Rules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SFixed64Rules) String() string { @@ -1826,7 +1800,7 @@ func (*SFixed64Rules) ProtoMessage() {} func (x *SFixed64Rules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1909,11 +1883,9 @@ type BoolRules struct { func (x *BoolRules) Reset() { *x = BoolRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BoolRules) String() string { @@ -1924,7 +1896,7 @@ func (*BoolRules) ProtoMessage() {} func (x *BoolRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2030,11 +2002,9 @@ const ( func (x *StringRules) Reset() { *x = StringRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringRules) String() string { @@ -2045,7 +2015,7 @@ func (*StringRules) ProtoMessage() {} func (x *StringRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2384,11 +2354,9 @@ type BytesRules struct { func (x *BytesRules) Reset() { *x = BytesRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BytesRules) String() string { @@ -2399,7 +2367,7 @@ func (*BytesRules) ProtoMessage() {} func (x *BytesRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2568,11 +2536,9 @@ type EnumRules struct { func (x *EnumRules) Reset() { *x = EnumRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumRules) String() string { @@ -2583,7 +2549,7 @@ func (*EnumRules) ProtoMessage() {} func (x *EnumRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2642,11 +2608,9 @@ type MessageRules struct { func (x *MessageRules) Reset() { *x = MessageRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageRules) String() string { @@ -2657,7 +2621,7 @@ func (*MessageRules) ProtoMessage() {} func (x *MessageRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2713,11 +2677,9 @@ type RepeatedRules struct { func (x *RepeatedRules) Reset() { *x = RepeatedRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RepeatedRules) String() string { @@ -2728,7 +2690,7 @@ func (*RepeatedRules) ProtoMessage() {} func (x *RepeatedRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2806,11 +2768,9 @@ type MapRules struct { func (x *MapRules) Reset() { *x = MapRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MapRules) String() string { @@ -2821,7 +2781,7 @@ func (*MapRules) ProtoMessage() {} func (x *MapRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2897,11 +2857,9 @@ type AnyRules struct { func (x *AnyRules) Reset() { *x = AnyRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AnyRules) String() string { @@ -2912,7 +2870,7 @@ func (*AnyRules) ProtoMessage() {} func (x *AnyRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2981,11 +2939,9 @@ type DurationRules struct { func (x *DurationRules) Reset() { *x = DurationRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DurationRules) String() string { @@ -2996,7 +2952,7 @@ func (*DurationRules) ProtoMessage() {} func (x *DurationRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3104,11 +3060,9 @@ type TimestampRules struct { func (x *TimestampRules) Reset() { *x = TimestampRules{} - if protoimpl.UnsafeEnabled { - mi := &file_validate_validate_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_validate_validate_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TimestampRules) String() string { @@ -3119,7 +3073,7 @@ func (*TimestampRules) ProtoMessage() {} func (x *TimestampRules) ProtoReflect() protoreflect.Message { mi := &file_validate_validate_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3770,284 +3724,6 @@ func file_validate_validate_proto_init() { if File_validate_validate_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_validate_validate_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FieldRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FloatRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DoubleRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Int32Rules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Int64Rules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*UInt32Rules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*UInt64Rules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*SInt32Rules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*SInt64Rules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*Fixed32Rules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*Fixed64Rules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*SFixed32Rules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*SFixed64Rules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*BoolRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*StringRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*BytesRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*EnumRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MessageRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*RepeatedRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*MapRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*AnyRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*DurationRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_validate_validate_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*TimestampRules); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_validate_validate_proto_msgTypes[0].OneofWrappers = []any{ (*FieldRules_Float)(nil), (*FieldRules_Double)(nil), diff --git a/pkg/k8s/k8s.go b/pkg/k8s/k8s.go new file mode 100644 index 0000000..1d94844 --- /dev/null +++ b/pkg/k8s/k8s.go @@ -0,0 +1,108 @@ +package K8s + +import ( + "context" + "os" + v1 "task/controller/api/v1" + + "k8s.io/apimachinery/pkg/runtime" // Import runtime for scheme + // Import for GroupVersionKind + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + // Import for error handling +) + +type K8s struct { + kubeconfigPath string + client *kubernetes.Clientset + scheme *runtime.Scheme // Add scheme to K8s struct +} + +// Add the following function to create a Kubernetes client for local and in-cluster setup +func NewK8sClient(kubeconfigPath string) (*K8s, error) { + var config *rest.Config + var err error + + // In-cluster config + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" { + config, err = rest.InClusterConfig() + if err != nil { + return nil, err + } + } else { // Local config + config, err = clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return nil, err + } + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + + k := &K8s{ + kubeconfigPath: kubeconfigPath, + client: clientset, + scheme: runtime.NewScheme(), // Initialize the scheme + } + + // Register your Task type with the scheme + if err := v1.AddToScheme(k.scheme); err != nil { + return nil, err + } + + return k, nil +} + +// CreateTask creates a new Task resource in the Kubernetes cluster +func (k *K8s) CreateTask(task *v1.Task) (*v1.Task, error) { + tasksClient := k.client.RESTClient(). + Post(). + Resource("tasks"). + Namespace(task.Namespace). + Body(task) + + result := &v1.Task{} + err := tasksClient.Do(context.TODO()).Into(result) + return result, err +} + +// GetTask retrieves a Task resource from the Kubernetes cluster +func (k *K8s) GetTask(namespace, name string) (*v1.Task, error) { + result := &v1.Task{} + err := k.client.RESTClient(). + Get(). + Resource("tasks"). + Namespace(namespace). + Name(name). + Do(context.TODO()). + Into(result) + return result, err +} + +// UpdateTask updates an existing Task resource in the Kubernetes cluster +func (k *K8s) UpdateTask(task *v1.Task) (*v1.Task, error) { + result := &v1.Task{} + err := k.client.RESTClient(). + Put(). + Resource("tasks"). + Namespace(task.Namespace). + Name(task.Name). + Body(task). + Do(context.TODO()). + Into(result) + return result, err +} + +// DeleteTask deletes a Task resource from the Kubernetes cluster +func (k *K8s) DeleteTask(namespace, name string) error { + return k.client.RESTClient(). + Delete(). + Resource("tasks"). + Namespace(namespace). + Name(name). + Do(context.TODO()). + Error() +} diff --git a/pkg/worker/cron.go b/pkg/worker/cron.go deleted file mode 100644 index c145f51..0000000 --- a/pkg/worker/cron.go +++ /dev/null @@ -1,124 +0,0 @@ -package worker - -import ( - "context" - "database/sql" - "fmt" - "log/slog" - "os" - cloudv1 "task/pkg/gen/cloud/v1" - - "task/pkg/x" - "task/server/repository/model/task" - "time" - - "connectrpc.com/connect" - "github.com/riverqueue/river" - "gorm.io/driver/postgres" - "gorm.io/gorm" -) - -// ReconcileTaskWorkerArgs contains the arguments for the ReconcileTaskWorker. -type ReconcileTaskWorkerArgs struct { - Status int `json:"status"` - URL string -} - -// Kind returns the kind of the task argument. -func (ReconcileTaskWorkerArgs) Kind() string { return "reconcile_tasks" } - -// InsertOpts returns the insertion options for the task. -func (ReconcileTaskWorkerArgs) InsertOpts() river.InsertOpts { - return river.InsertOpts{MaxAttempts: 1} -} - -// ReconcileTaskWorker is the worker implementation for processing and reconciling tasks. -type ReconcileTaskWorker struct { - river.WorkerDefaults[ReconcileTaskWorkerArgs] - Logger *slog.Logger - db *gorm.DB -} - -// Work processes a single reconciliation job for tasks. -// It finds tasks that have been in a specific status for too long and updates them to QUEUED status. -func (w *ReconcileTaskWorker) Work(ctx context.Context, job *river.Job[ReconcileTaskWorkerArgs]) error { - w.Logger = slog.Default().With("worker", "ReconcileTaskWorker") - w.Logger.Info("Starting task reconciliation") - - sqlDB, err := sql.Open("pgx", job.Args.URL) - if err != nil { - return fmt.Errorf("failed to open database connection: %w", err) - } - defer sqlDB.Close() - - db, err := gorm.Open(postgres.New(postgres.Config{ - Conn: sqlDB, - }), &gorm.Config{}) - if err != nil { - return fmt.Errorf("failed to initialize GORM: %w", err) - } - - w.db = db - - runningTasks, err := w.fetchRunningTasks(ctx, job.Args.Status) - if err != nil { - return fmt.Errorf("failed to fetch running tasks: %w", err) - } - - w.Logger.Info("Found running tasks", "count", len(runningTasks)) - - updatedCount, err := w.updateAndQueueTasks(ctx, runningTasks) - if err != nil { - return fmt.Errorf("failed to update and queue tasks: %w", err) - } - - w.Logger.Info("Finished processing tasks", "updated_count", updatedCount) - return nil -} - -func (w *ReconcileTaskWorker) fetchRunningTasks(ctx context.Context, status int) ([]task.Task, error) { - var runningTasks []task.Task - twentyMinutesAgo := time.Now().Add(-time.Duration(x.CRON_TIME) * time.Minute) - - err := w.db.WithContext(ctx). - Where("status = ? AND created_at <= ?", status, twentyMinutesAgo). - Find(&runningTasks).Error - if err != nil { - return nil, fmt.Errorf("failed to query running tasks: %w", err) - } - - return runningTasks, nil -} - -func (w *ReconcileTaskWorker) updateAndQueueTasks(ctx context.Context, tasks []task.Task) (int, error) { - updatedCount := 0 - for _, t := range tasks { - if err := w.updateTaskStatus(ctx, t); err != nil { - w.Logger.Error("Failed to update task status", "task_id", t.ID, "error", err) - continue - } - updatedCount++ - } - return updatedCount, nil -} - -func (w *ReconcileTaskWorker) updateTaskStatus(ctx context.Context, t task.Task) error { - cloud, err := x.CreateClient(os.Getenv("SERVER_ENDPOINT")) - if err != nil { - return fmt.Errorf("failed to create client: %w", err) - } - - req := &cloudv1.UpdateTaskStatusRequest{ - Id: int32(t.ID), - Status: cloudv1.TaskStatusEnum_QUEUED, - Message: "Task has been queued again", - } - - _, err = cloud.UpdateTaskStatus(ctx, connect.NewRequest(req)) - if err != nil { - return fmt.Errorf("failed to update task status: %w", err) - } - - w.Logger.Info("Updated task status to QUEUED", "task_id", t.ID) - return nil -} diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go deleted file mode 100644 index 2e89dd5..0000000 --- a/pkg/worker/worker.go +++ /dev/null @@ -1,133 +0,0 @@ -package worker - -import ( - "context" - "encoding/json" - "fmt" - "log/slog" - "os" - "strconv" - v1 "task/pkg/gen/cloud/v1" - "task/pkg/gen/cloud/v1/cloudv1connect" - "task/pkg/plugins" - "task/pkg/x" - "task/server/repository/model/task" - "time" - - "connectrpc.com/connect" - "github.com/riverqueue/river" -) - -var cloudClient cloudv1connect.TaskManagementServiceClient - -// TaskArgument represents the argument structure for a task job. -type TaskArgument struct { - Task task.Task `json:"task"` -} - -// Kind returns the kind of the task argument. -func (TaskArgument) Kind() string { return "email_send" } - -// InsertOpts returns the insertion options for the task. -func (TaskArgument) InsertOpts() river.InsertOpts { - return river.InsertOpts{MaxAttempts: 5} -} - -// TaskWorker is the worker implementation for processing tasks. -type TaskWorker struct { - river.WorkerDefaults[TaskArgument] -} - -// Work processes a single task job. -func (w *TaskWorker) Work(ctx context.Context, job *river.Job[TaskArgument]) error { - logger := slog.With("task_id", job.Args.Task.ID, "attempt", job.Attempt) - logger.Info("Starting task processing") - - startTime := time.Now() - defer func() { - duration := time.Since(startTime) - logger.Info("Task processing completed", "duration", duration, "task_type", job.Args.Task.Type) - }() - - if err := updateTaskStatus(ctx, int64(job.Args.Task.ID), v1.TaskStatusEnum_RUNNING, fmt.Sprintf("Task started (Attempt %d)", job.Attempt)); err != nil { - logger.Error("Failed to update task status to RUNNING", "error", err) - return fmt.Errorf("failed to update task status to RUNNING: %w", err) - } - - defer func() { - if r := recover(); r != nil { - logger.Error("Task panicked", "panic", r) - if err := updateTaskStatus(ctx, int64(job.Args.Task.ID), v1.TaskStatusEnum_FAILED, fmt.Sprintf("Task panicked (Attempt %d): %v", job.Attempt, r)); err != nil { - logger.Error("Failed to update task status after panic", "error", err) - } - } - }() - - plugin, err := plugins.NewPlugin(job.Args.Task.Type) - if err != nil { - return w.handleError(ctx, job, logger, "Failed to create plugin", err) - } - - var payloadMap map[string]string - if err := json.Unmarshal([]byte(job.Args.Task.Payload), &payloadMap); err != nil { - return w.handleError(ctx, job, logger, "Failed to unmarshal payload", err) - } - - if err := plugin.Run(payloadMap); err != nil { - return w.handleError(ctx, job, logger, "Error running task", err) - } - - logger.Info("Task completed successfully") - if err := updateTaskStatus(ctx, int64(job.Args.Task.ID), v1.TaskStatusEnum_SUCCEEDED, fmt.Sprintf("Task completed successfully (Attempt %d)", job.Attempt)); err != nil { - logger.Error("Failed to update task status to SUCCEEDED", "error", err) - return fmt.Errorf("failed to update task status to SUCCEEDED: %w", err) - } - - return nil -} - -// handleError is a helper function to handle errors during task processing. -func (w *TaskWorker) handleError(ctx context.Context, job *river.Job[TaskArgument], logger *slog.Logger, message string, err error) error { - logger.Error(message, "error", err) - errorMsg := fmt.Sprintf("%s (Attempt %d): %v", message, job.Attempt, err) - if updateErr := updateTaskStatus(ctx, int64(job.Args.Task.ID), v1.TaskStatusEnum_FAILED, errorMsg); updateErr != nil { - logger.Error("Failed to update task status to FAILED", "error", updateErr) - } - return fmt.Errorf("%s for task %d (Attempt %d): %w", message, job.Args.Task.ID, job.Attempt, err) -} - -// NextRetry determines the time for the next retry attempt. -func (w *TaskWorker) NextRetry(job *river.Job[TaskArgument]) time.Time { - return time.Now().Add(2 * time.Second) -} - -// Timeout sets the maximum duration for a task to complete. -func (w *TaskWorker) Timeout(job *river.Job[TaskArgument]) time.Duration { - timeout := 10 - if timeoutStr := os.Getenv("TASK_TIME_OUT"); timeoutStr != "" { - if parsedTimeout, err := strconv.Atoi(timeoutStr); err == nil { - timeout = parsedTimeout - } - } - - return (time.Duration(timeout) + 5) * time.Second -} - -// updateTaskStatus updates the status of a task using the Task Management Service. -func updateTaskStatus(ctx context.Context, taskID int64, status v1.TaskStatusEnum, message string) error { - client, err := x.CreateClient(os.Getenv("SERVER_ENDPOINT")) - if err != nil { - return fmt.Errorf("failed to create client: %w", err) - } - - _, err = client.UpdateTaskStatus(ctx, connect.NewRequest(&v1.UpdateTaskStatusRequest{ - Id: int32(taskID), - Status: status, - Message: message, - })) - if err != nil { - return fmt.Errorf("failed to update task %d status: %w", taskID, err) - } - - return nil -} diff --git a/server/repository/factory.go b/server/repository/factory.go index ecb1aae..5ba10df 100644 --- a/server/repository/factory.go +++ b/server/repository/factory.go @@ -1,22 +1,14 @@ package repositories import ( - "context" "database/sql" "fmt" "log/slog" - "os" "time" - cloudv1 "task/pkg/gen/cloud/v1" - "task/pkg/worker" - "task/pkg/x" interfaces "task/server/repository/interface" tasks "task/server/repository/model/task" - "github.com/riverqueue/river" - "github.com/riverqueue/river/riverdriver/riverdatabasesql" - "github.com/riverqueue/river/rivermigrate" "gorm.io/driver/postgres" "gorm.io/gorm" ) @@ -47,77 +39,6 @@ func GetRepository(url string, workerCount int, maxConns int) (interfaces.TaskMa return nil, err } - migrator, err := rivermigrate.New(riverdatabasesql.New(sqlDB), nil) - if err != nil { - return nil, fmt.Errorf("failed to create river migrator: %w", err) - } - - _, err = migrator.Migrate(context.Background(), rivermigrate.DirectionUp, &rivermigrate.MigrateOpts{}) - if err != nil { - panic(err) - } - - // Set up River workers and client - workers := river.NewWorkers() - if err := river.AddWorkerSafely(workers, &worker.TaskWorker{}); err != nil { - return nil, fmt.Errorf("failed to add TaskWorker: %w", err) - } - - if err := river.AddWorkerSafely(workers, &worker.ReconcileTaskWorker{}); err != nil { - return nil, fmt.Errorf("failed to add TaskWorker: %w", err) - } - - // TODO: Add comprehensive documentation - // We added periodic reconciliation jobs to handle stuck tasks. These jobs will: - // 1. Get a list of all stuck tasks - // 2. Change their status to "queued" - // 3. Enqueue them for processing - // Currently, we're making direct DB changes, but ideally, the server should - // implement an API to handle this logic. In the future, these scheduled jobs - // should just call the API instead of modifying the database directly. - - var reconcileTasks = []*river.PeriodicJob{ - river.NewPeriodicJob( - river.PeriodicInterval(time.Duration(x.CRON_TIME)*time.Second), - func() (river.JobArgs, *river.InsertOpts) { - return worker.ReconcileTaskWorkerArgs{ - - Status: int(cloudv1.TaskStatusEnum_RUNNING), - URL: url, - }, nil - }, - &river.PeriodicJobOpts{RunOnStart: true}, - ), - river.NewPeriodicJob( - river.PeriodicInterval(time.Duration(x.CRON_TIME)*time.Second), - func() (river.JobArgs, *river.InsertOpts) { - return worker.ReconcileTaskWorkerArgs{ - Status: int(cloudv1.TaskStatusEnum_QUEUED), - URL: url, - }, nil - }, - &river.PeriodicJobOpts{RunOnStart: true}, - ), - } - - riverClient, err := river.NewClient(riverdatabasesql.New(sqlDB), &river.Config{ - Queues: map[string]river.QueueConfig{ - river.QueueDefault: {MaxWorkers: workerCount}, - }, - Workers: workers, - PeriodicJobs: reconcileTasks, - ErrorHandler: &worker.CustomErrorHandler{}, - Logger: slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelInfo})), - }) - if err != nil { - return nil, fmt.Errorf("failed to create River client: %w", err) - } - - // Start River client - if err := riverClient.Start(context.Background()); err != nil { - return nil, fmt.Errorf("failed to start River client: %w", err) - } - // Perform database migrations if err = db.AutoMigrate(&tasks.Task{}, &tasks.TaskHistory{}); err != nil { return nil, fmt.Errorf("failed to run auto migrations: %w", err) @@ -141,5 +62,5 @@ func GetRepository(url string, workerCount int, maxConns int) (interfaces.TaskMa slog.Info("Created index", "name", idx.name) } - return NewPostgresRepo(db, riverClient), nil + return NewPostgresRepo(db), nil } diff --git a/server/repository/gormimpl/history.go b/server/repository/gormimpl/history.go index ad38510..7f015ac 100644 --- a/server/repository/gormimpl/history.go +++ b/server/repository/gormimpl/history.go @@ -2,12 +2,10 @@ package gormimpl import ( "context" - "database/sql" "fmt" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/riverqueue/river" "gorm.io/gorm" interfaces "task/server/repository/interface" @@ -26,8 +24,7 @@ var ( // TaskHistoryRepo handles database operations for task history entries. type TaskHistoryRepo struct { - db *gorm.DB - riverClient *river.Client[*sql.Tx] + db *gorm.DB } // CreateTaskHistory creates a new history entry for a task. @@ -70,9 +67,8 @@ func (s *TaskHistoryRepo) ListTaskHistories(ctx context.Context, taskID uint) ([ // NewTaskHistoryRepo creates and returns a new instance of TaskHistoryRepo. // It takes a GORM database connection and a River client as parameters. -func NewTaskHistoryRepo(db *gorm.DB, riverClient *river.Client[*sql.Tx]) interfaces.TaskHistoryRepo { +func NewTaskHistoryRepo(db *gorm.DB) interfaces.TaskHistoryRepo { return &TaskHistoryRepo{ - db: db, - riverClient: riverClient, + db: db, } } diff --git a/server/repository/gormimpl/task.go b/server/repository/gormimpl/task.go index c5164f2..c4c729f 100644 --- a/server/repository/gormimpl/task.go +++ b/server/repository/gormimpl/task.go @@ -2,16 +2,13 @@ package gormimpl import ( "context" - "database/sql" "fmt" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/riverqueue/river" "gorm.io/gorm" - cloudv1 "task/pkg/gen/cloud/v1" - "task/pkg/worker" interfaces "task/server/repository/interface" models "task/server/repository/model/task" ) @@ -37,8 +34,7 @@ var ( // TaskRepo implements the TaskRepo interface using GORM for database operations // and River for task queue management. type TaskRepo struct { - db *gorm.DB - riverClient *river.Client[*sql.Tx] + db *gorm.DB } // CreateTask creates a new task in the database and enqueues it for processing. @@ -57,15 +53,6 @@ func (s *TaskRepo) CreateTask(ctx context.Context, task models.Task) (models.Tas taskOperations.WithLabelValues("create", "error").Inc() return models.Task{}, fmt.Errorf("failed to get task ID after creation") } - _, err := s.riverClient.Insert(context.Background(), worker.TaskArgument{ - Task: task, - }, &river.InsertOpts{ - MaxAttempts: 5, - }) - if err != nil { - taskOperations.WithLabelValues("create", "error").Inc() - return models.Task{}, fmt.Errorf("failed to enqueue task: %w", err) - } taskOperations.WithLabelValues("create", "success").Inc() return task, nil @@ -96,21 +83,6 @@ func (s *TaskRepo) UpdateTaskStatus(ctx context.Context, taskID uint, status int taskOperations.WithLabelValues("update_status", "error").Inc() return fmt.Errorf("failed to update task status: %w", err) } - if status == int(cloudv1.TaskStatusEnum_QUEUED) { - task, err := s.GetTaskByID(ctx, taskID) - if err != nil { - return fmt.Errorf("failed to get task by ID: %w", err) - } - _, err = s.riverClient.Insert(ctx, worker.TaskArgument{ - Task: *task, - }, &river.InsertOpts{ - MaxAttempts: 5, - }) - if err != nil { - taskOperations.WithLabelValues("update_status", "error").Inc() - return fmt.Errorf("failed to enqueue task: %w", err) - } - } taskOperations.WithLabelValues("update_status", "success").Inc() return nil @@ -175,11 +147,53 @@ func (s *TaskRepo) GetTaskStatusCounts(ctx context.Context) (map[int]int64, erro return counts, nil } +// GetStalledTasks retrieves tasks with status "unknown" or "queue" that have been in that state for more than 10 seconds. +// It returns a slice of tasks and an error if the operation fails. +func (s *TaskRepo) GetStalledTasks(ctx context.Context) ([]models.Task, error) { + timer := prometheus.NewTimer(taskLatency.WithLabelValues("get_stalled")) + defer timer.ObserveDuration() + + var tasks []models.Task + thirtySecondsAgo := time.Now().Add(-30 * time.Second) + + // Start a transaction + err := s.db.Transaction(func(tx *gorm.DB) error { + // Find stalled tasks and lock them for update + if err := tx.Set("gorm:query_option", "FOR UPDATE SKIP LOCKED"). + Where("(status = ?) AND updated_at < ?", 4, thirtySecondsAgo). + Find(&tasks).Error; err != nil { + return err + } + + // Update the status of found tasks to a temporary "processing" state + if len(tasks) > 0 { + taskIDs := make([]uint, len(tasks)) + for i, task := range tasks { + taskIDs[i] = task.ID + } + if err := tx.Model(&models.Task{}). + Where("id IN ?", taskIDs). + Update("status", 5).Error; err != nil { // Assuming 5 is a temporary "processing" status + return err + } + } + + return nil + }) + + if err != nil { + taskOperations.WithLabelValues("get_stalled", "error").Inc() + return nil, fmt.Errorf("failed to retrieve and lock stalled tasks: %w", err) + } + + taskOperations.WithLabelValues("get_stalled", "success").Inc() + return tasks, nil +} + // NewTaskRepo creates and returns a new instance of TaskRepo. // It requires a GORM database connection and a River client for task queue management. -func NewTaskRepo(db *gorm.DB, riverClient *river.Client[*sql.Tx]) interfaces.TaskRepo { +func NewTaskRepo(db *gorm.DB) interfaces.TaskRepo { return &TaskRepo{ - db: db, - riverClient: riverClient, + db: db, } } diff --git a/server/repository/interface/task.go b/server/repository/interface/task.go index 0d614b4..39cc956 100644 --- a/server/repository/interface/task.go +++ b/server/repository/interface/task.go @@ -36,4 +36,6 @@ type TaskRepo interface { // It returns a map where the key is the status code and the value is the count of tasks with that status. // An error is returned if any occurs during the operation. GetTaskStatusCounts(ctx context.Context) (map[int]int64, error) + + GetStalledTasks(ctx context.Context) ([]model.Task, error) } diff --git a/server/repository/model/task/task.go b/server/repository/model/task/task.go index 5c2c0a3..beba8de 100644 --- a/server/repository/model/task/task.go +++ b/server/repository/model/task/task.go @@ -37,7 +37,7 @@ func (t *Task) BeforeCreate(tx *gorm.DB) (err error) { } // Ensure task status is valid - if t.Status > 3 { + if t.Status > 4 { return errors.New("invalid task status") } diff --git a/server/repository/postgres.go b/server/repository/postgres.go index 7c3653d..63c8158 100644 --- a/server/repository/postgres.go +++ b/server/repository/postgres.go @@ -1,12 +1,10 @@ package repositories import ( - "database/sql" "fmt" gormimpl "task/server/repository/gormimpl" interfaces "task/server/repository/interface" - "github.com/riverqueue/river" "gorm.io/gorm" ) @@ -24,9 +22,9 @@ func (r Postgres) TaskHistoryRepo() interfaces.TaskHistoryRepo { return r.history } -func NewPostgresRepo(db *gorm.DB, riverClient *river.Client[*sql.Tx]) interfaces.TaskManagmentInterface { +func NewPostgresRepo(db *gorm.DB) interfaces.TaskManagmentInterface { return &Postgres{ - task: gormimpl.NewTaskRepo(db, riverClient), - history: gormimpl.NewTaskHistoryRepo(db, riverClient), + task: gormimpl.NewTaskRepo(db), + history: gormimpl.NewTaskHistoryRepo(db), } } diff --git a/server/root/main.go b/server/root/main.go index d26de24..e181b95 100644 --- a/server/root/main.go +++ b/server/root/main.go @@ -5,26 +5,29 @@ import ( "errors" "fmt" "log/slog" + "math" "net/http" "os" "os/signal" "syscall" "time" + cloudv1connect "task/pkg/gen/cloud/v1/cloudv1connect" + "task/pkg/x" // Import the x package for env and config + repository "task/server/repository" // Import repository package + interfaces "task/server/repository/interface" // Import repository package + "task/server/route" // Import route package + oauth2 "task/server/route/oauth2" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" + "connectrpc.com/connect" "connectrpc.com/grpchealth" "connectrpc.com/grpcreflect" "connectrpc.com/otelconnect" "github.com/rs/cors" "go.akshayshah.org/connectauth" - "golang.org/x/net/http2" - "golang.org/x/net/http2/h2c" - - cloudv1connect "task/pkg/gen/cloud/v1/cloudv1connect" - "task/pkg/x" // Import the x package for env and config - repository "task/server/repository" // Import repository package - interfaces "task/server/repository/interface" // Import repository package - "task/server/route" // Import route package "github.com/prometheus/client_golang/prometheus/promhttp" ) @@ -38,6 +41,14 @@ type AuthCtx struct { Username string } +type Config struct { + OAuth2 struct { + Issuer string + ClientID string + ClientSecret string + } +} + // newCORS initializes CORS settings for the server // It allows all origins and methods, and exposes necessary headers for gRPC-Web func newCORS() *cors.Cors { @@ -103,6 +114,18 @@ func run() error { exitChan := make(chan os.Signal, 1) signal.Notify(exitChan, syscall.SIGINT, syscall.SIGTERM) + auth, err := oauth2.NewAuthServer(oauth2.Config{ + Provider: env.OAuth2.Provider, + Issuer: env.OAuth2.Issuer, + ClientID: env.OAuth2.ClientID, + ClientSecret: env.OAuth2.ClientSecret, + RedirectURL: "http://localhost:8080/authorization-code/callback", + SessionKey: "session", + }) + if err != nil { + return fmt.Errorf("failed to initialize authorization server: %w", err) + } + // Create the repository with DB configuration repo, err := repository.GetRepository(env.Database.ToDbConnectionUri(), env.WorkerCount, env.Database.PoolMaxConns) if err != nil { @@ -112,7 +135,12 @@ func run() error { slog.Info("Database repository initialized", "workerCount", env.WorkerCount) // Set up gRPC middleware - middleware := connectauth.NewMiddleware(GrpcMiddleware) + middleware := connectauth.NewMiddleware(func(ctx context.Context, req *connectauth.Request) (any, error) { + if auth.IsAuthenticated(req) { + return AuthCtx{}, nil + } + return nil, errors.New("user is not authenticated") // Updated to return an error for unauthenticated users + }) // Set up HTTP server mux := http.NewServeMux() @@ -120,6 +148,10 @@ func run() error { return fmt.Errorf("failed to set up handlers: %w", err) } + mux.HandleFunc("/login", auth.LoginHandler) + mux.HandleFunc("/authorization-code/callback", auth.AuthCodeCallbackHandler) + mux.HandleFunc("/logout", auth.LogoutHandler) + // Add Prometheus metrics endpoint mux.Handle("/metrics", promhttp.Handler()) @@ -130,10 +162,10 @@ func run() error { newCORS().Handler(mux), &http2.Server{}, ), - ReadHeaderTimeout: time.Second, - ReadTimeout: 5 * time.Minute, - WriteTimeout: 5 * time.Minute, - MaxHeaderBytes: 8 * 1024, // 8KiB + MaxHeaderBytes: 1 << 20, // 1 MB + ReadHeaderTimeout: 60 * time.Minute, + ReadTimeout: 60 * time.Minute, + WriteTimeout: 60 * time.Minute, } // Start the server in a goroutine @@ -173,8 +205,10 @@ func setupHandlers(mux *http.ServeMux, repo interfaces.TaskManagmentInterface, m route.NewTaskServer(repo), connect.WithInterceptors(otelInterceptor), connect.WithCompressMinBytes(CompressMinByte), + connect.WithSendMaxBytes(math.MaxInt32), + connect.WithReadMaxBytes(math.MaxInt32), ) - mux.Handle(pattern, middleware.Wrap(handler)) + mux.Handle(pattern, handler) // Health check and reflection handlers mux.Handle(grpchealth.NewHandler( @@ -203,11 +237,3 @@ func shutdownServer(srv *http.Server) error { slog.Info("Server shutdown completed") return nil } - -// GrpcMiddleware is the gRPC middleware used for authentication. -// Currently, it uses a placeholder authentication mechanism. -func GrpcMiddleware(ctx context.Context, req *connectauth.Request) (any, error) { - // TODO: Implement proper authentication logic - slog.Warn("Using placeholder authentication", "username", "tqindia") - return AuthCtx{Username: "tqindia"}, nil -} diff --git a/server/route/oauth2/oauth2.go b/server/route/oauth2/oauth2.go new file mode 100644 index 0000000..87a99ab --- /dev/null +++ b/server/route/oauth2/oauth2.go @@ -0,0 +1,227 @@ +package oauth2 + +import ( + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + "sync" + + "github.com/coreos/go-oidc/v3/oidc" + "github.com/gorilla/sessions" + "go.akshayshah.org/connectauth" + "golang.org/x/oauth2" + "golang.org/x/oauth2/github" + "golang.org/x/oauth2/google" +) + +const ( + sessionName = "custom-auth-session-store" +) + +// Config represents the configuration for the AuthServer +type Config struct { + Provider string // "google", "github", "facebook", or "okta" + Issuer string + ClientID string + ClientSecret string + RedirectURL string + SessionKey string +} + +// AuthServer represents the authorization server +type AuthServer struct { + config Config + sessionStore *sessions.CookieStore + state string + oauth2Config oauth2.Config + verifier *oidc.IDTokenVerifier + mu sync.Mutex // Add a mutex for thread-safe operations +} + +// NewAuthServer creates and initializes a new AuthServer +func NewAuthServer(config Config) (*AuthServer, error) { + ctx := context.Background() + + var oauth2Config oauth2.Config + var verifier *oidc.IDTokenVerifier + fmt.Println("config.Provider", config.Provider) + switch config.Provider { + case "google": + oauth2Config = oauth2.Config{ + ClientID: config.ClientID, + ClientSecret: config.ClientSecret, + RedirectURL: config.RedirectURL, + Endpoint: google.Endpoint, + Scopes: []string{oidc.ScopeOpenID, "profile", "email"}, + } + provider, err := oidc.NewProvider(ctx, "https://accounts.google.com") + if err != nil { + return nil, fmt.Errorf("failed to get Google provider: %v", err) + } + verifier = provider.Verifier(&oidc.Config{ClientID: config.ClientID}) + + case "github": + oauth2Config = oauth2.Config{ + ClientID: config.ClientID, + ClientSecret: config.ClientSecret, + RedirectURL: config.RedirectURL, + Endpoint: github.Endpoint, + Scopes: []string{"user:email"}, + } + // GitHub doesn't support OIDC, so we'll need to handle token verification differently + + case "okta": + provider, err := oidc.NewProvider(ctx, config.Issuer) + if err != nil { + return nil, fmt.Errorf("failed to get Okta provider: %v", err) + } + oauth2Config = oauth2.Config{ + ClientID: config.ClientID, + ClientSecret: config.ClientSecret, + RedirectURL: config.RedirectURL, + Endpoint: provider.Endpoint(), + Scopes: []string{oidc.ScopeOpenID, "profile", "email"}, + } + verifier = provider.Verifier(&oidc.Config{ClientID: config.ClientID}) + + default: + return nil, fmt.Errorf("unsupported provider: %s", config.Provider) + } + + return &AuthServer{ + config: config, + sessionStore: sessions.NewCookieStore([]byte(config.SessionKey)), + state: generateState(), + oauth2Config: oauth2Config, + verifier: verifier, + }, nil +} + +func generateState() string { + b := make([]byte, 16) + rand.Read(b) + return hex.EncodeToString(b) +} + +// LoginHandler handles the login request +func (as *AuthServer) LoginHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-cache") + + as.mu.Lock() + authURL := as.oauth2Config.AuthCodeURL(as.state, oidc.Nonce(generateState())) + as.mu.Unlock() + + http.Redirect(w, r, authURL, http.StatusFound) +} + +// AuthCodeCallbackHandler handles the authorization code callback +func (as *AuthServer) AuthCodeCallbackHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + as.mu.Lock() + if r.URL.Query().Get("state") != as.state { + as.mu.Unlock() + http.Error(w, `{"error": "Invalid state"}`, http.StatusBadRequest) + return + } + as.mu.Unlock() + + // Make sure the code was provided + if r.URL.Query().Get("code") == "" { + http.Error(w, `{"error": "The code was not returned or is not accessible"}`, http.StatusBadRequest) + return + } + + oauth2Token, err := as.oauth2Config.Exchange(r.Context(), r.URL.Query().Get("code")) + if err != nil { + http.Error(w, fmt.Sprintf(`{"error": "Failed to exchange token: %v"}`, err), http.StatusInternalServerError) + return + } + + rawIDToken, ok := oauth2Token.Extra("id_token").(string) + if !ok { + http.Error(w, "No id_token field in oauth2 token.", http.StatusInternalServerError) + return + } + + _, err = as.verifier.Verify(r.Context(), rawIDToken) + if err != nil { + http.Error(w, "Failed to verify ID Token: "+err.Error(), http.StatusInternalServerError) + return + } + + // http.Redirect(w, r, "/", http.StatusFound) + w.Write([]byte(fmt.Sprintf(`{"access_token": "%s"}`, oauth2Token.AccessToken))) +} + +// LogoutHandler handles the logout request +func (as *AuthServer) LogoutHandler(w http.ResponseWriter, r *http.Request) { + as.mu.Lock() + session, err := as.sessionStore.Get(r, sessionName) + as.mu.Unlock() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + as.mu.Lock() + delete(session.Values, "id_token") + delete(session.Values, "access_token") + err = session.Save(r, w) + as.mu.Unlock() + if err != nil { + http.Error(w, "Failed to save session: "+err.Error(), http.StatusInternalServerError) + return + } + + http.Redirect(w, r, "/", http.StatusFound) +} + +// IsAuthenticated checks if the user is authenticated +func (as *AuthServer) IsAuthenticated(r *connectauth.Request) bool { + // Check for bearer token in the Authorization header + authHeader := r.Header.Get("Authorization") + if strings.HasPrefix(authHeader, "Bearer ") { + token := strings.TrimPrefix(authHeader, "Bearer ") + // Verify the token + _, err := as.verifier.Verify(context.Background(), token) + if err == nil { + return false + } + return true + } + return false +} + +// getProfileData retrieves the user's profile data +func (as *AuthServer) getProfileData(r *http.Request) (map[string]interface{}, error) { + as.mu.Lock() + session, err := as.sessionStore.Get(r, "okta-hosted-login-session-store") + as.mu.Unlock() + if err != nil { + return nil, err + } + + accessToken, ok := session.Values["access_token"].(string) + if !ok { + return nil, fmt.Errorf("no access token found in session") + } + + userInfo, err := as.oauth2Config.Client(r.Context(), &oauth2.Token{AccessToken: accessToken}).Get(os.Getenv("ISSUER") + "/v1/userinfo") + if err != nil { + return nil, fmt.Errorf("failed to get userinfo: %v", err) + } + defer userInfo.Body.Close() + + var profile map[string]interface{} + if err := json.NewDecoder(userInfo.Body).Decode(&profile); err != nil { + return nil, fmt.Errorf("failed to decode userinfo: %v", err) + } + + return profile, nil +} diff --git a/server/route/task.go b/server/route/task.go index f4b0585..218ff3a 100644 --- a/server/route/task.go +++ b/server/route/task.go @@ -14,9 +14,11 @@ import ( "google.golang.org/protobuf/types/known/emptypb" + "sync" + connect "connectrpc.com/connect" - "github.com/avast/retry-go/v4" protovalidate "github.com/bufbuild/protovalidate-go" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "google.golang.org/protobuf/reflect/protoreflect" @@ -31,11 +33,15 @@ const ( // TaskServer represents the server handling task-related requests. // It implements the cloudv1connect.TaskManagementServiceHandler interface. type TaskServer struct { - taskRepo interfaces.TaskRepo - historyRepo interfaces.TaskHistoryRepo - logger *log.Logger - validator *protovalidate.Validator - metrics *taskMetrics + taskRepo interfaces.TaskRepo + historyRepo interfaces.TaskHistoryRepo + logger *log.Logger + validator *protovalidate.Validator + metrics *taskMetrics + channel chan task.Task + maxWorkers int + clientHeartbeats sync.Map + heartbeatTimeout time.Duration } type taskMetrics struct { @@ -84,18 +90,23 @@ func newTaskMetrics() *taskMetrics { // NewTaskServer creates and returns a new instance of TaskServer. // It initializes the validator, sets up the logger, and configures metrics. +// The maxWorkers parameter can be configured to control the number of concurrent workers. func NewTaskServer(repo interfaces.TaskManagmentInterface) cloudv1connect.TaskManagementServiceHandler { + // Initialize the validator for request validation validator, err := protovalidate.New() if err != nil { log.Fatalf("Failed to initialize validator: %v", err) } + maxWorkers := 500 // Configurable maximum number of concurrent workers server := &TaskServer{ - taskRepo: repo.TaskRepo(), - historyRepo: repo.TaskHistoryRepo(), - logger: log.New(os.Stdout, logPrefix, log.LstdFlags|log.Lshortfile), - validator: validator, - metrics: newTaskMetrics(), + taskRepo: repo.TaskRepo(), + historyRepo: repo.TaskHistoryRepo(), + logger: log.New(os.Stdout, logPrefix, log.LstdFlags|log.Lshortfile), + validator: validator, + metrics: newTaskMetrics(), + maxWorkers: maxWorkers, + heartbeatTimeout: 30 * time.Second, // Configurable timeout for heartbeats } server.logger.Println("TaskServer initialized successfully") @@ -111,6 +122,7 @@ func (s *TaskServer) CreateTask(ctx context.Context, req *connect.Request[v1.Cre s.metrics.createTaskCounter.Inc() s.logger.Printf("Creating task: name=%s, type=%s", req.Msg.Name, req.Msg.GetType()) + // Validate the incoming request if err := s.validateRequest(req.Msg); err != nil { s.logger.Printf("CreateTask validation failed: %v", err) return nil, err @@ -118,35 +130,18 @@ func (s *TaskServer) CreateTask(ctx context.Context, req *connect.Request[v1.Cre newTask := s.prepareNewTask(req.Msg) + // Attempt to create the task in the repository createdTask, err := s.taskRepo.CreateTask(ctx, newTask) if err != nil { s.metrics.errorCounter.WithLabelValues("create_task").Inc() return nil, s.logError(err, "Failed to create task in repository") } - // Attempt to log task creation history with retries - err = retry.Do( - func() error { - return s.logTaskCreationHistory(ctx, createdTask.ID) - }, - retry.Attempts(3), - retry.Delay(100*time.Millisecond), - retry.DelayType(retry.BackOffDelay), - retry.OnRetry(func(n uint, err error) { - s.logger.Printf("Retry %d: Failed to create task status history: %v", n, err) - }), - ) - - if err != nil { - s.logger.Printf("WARNING: Failed to create task status history after retries: %v", err) - // Consider whether to return an error here or continue - } - s.logger.Printf("Task created successfully: id=%d", createdTask.ID) return connect.NewResponse(&v1.CreateTaskResponse{Id: int32(createdTask.ID)}), nil } -// GetTask retrieves the status of a task. +// GetTask retrieves the status of a task by its ID. func (s *TaskServer) GetTask(ctx context.Context, req *connect.Request[v1.GetTaskRequest]) (*connect.Response[v1.Task], error) { timer := prometheus.NewTimer(s.metrics.taskDuration.WithLabelValues("get_task")) defer timer.ObserveDuration() @@ -154,10 +149,12 @@ func (s *TaskServer) GetTask(ctx context.Context, req *connect.Request[v1.GetTas s.metrics.getTaskCounter.Inc() s.logger.Printf("Retrieving task: id=%d", req.Msg.Id) + // Validate the incoming request if err := s.validateRequest(req.Msg); err != nil { return nil, err } + // Fetch the task from the repository taskResponse, err := s.taskRepo.GetTaskByID(ctx, uint(req.Msg.Id)) if err != nil { s.metrics.errorCounter.WithLabelValues("get_task").Inc() @@ -168,7 +165,7 @@ func (s *TaskServer) GetTask(ctx context.Context, req *connect.Request[v1.GetTas return connect.NewResponse(s.convertTaskToProto(taskResponse)), nil } -// GetTaskHistory retrieves the history of a task. +// GetTaskHistory retrieves the history of a task by its ID. func (s *TaskServer) GetTaskHistory(ctx context.Context, req *connect.Request[v1.GetTaskHistoryRequest]) (*connect.Response[v1.GetTaskHistoryResponse], error) { timer := prometheus.NewTimer(s.metrics.taskDuration.WithLabelValues("get_task_history")) defer timer.ObserveDuration() @@ -176,10 +173,12 @@ func (s *TaskServer) GetTaskHistory(ctx context.Context, req *connect.Request[v1 s.metrics.getTaskHistoryCounter.Inc() s.logger.Printf("Retrieving task history: id=%d", req.Msg.Id) + // Validate the incoming request if err := s.validateRequest(req.Msg); err != nil { return nil, err } + // Fetch the task history from the repository history, err := s.historyRepo.ListTaskHistories(ctx, uint(req.Msg.Id)) if err != nil { s.metrics.errorCounter.WithLabelValues("get_task_history").Inc() @@ -192,7 +191,7 @@ func (s *TaskServer) GetTaskHistory(ctx context.Context, req *connect.Request[v1 return connect.NewResponse(&v1.GetTaskHistoryResponse{History: protoHistory}), nil } -// UpdateTaskStatus updates the status of a task. +// UpdateTaskStatus updates the status of a task and logs the operation. func (s *TaskServer) UpdateTaskStatus(ctx context.Context, req *connect.Request[v1.UpdateTaskStatusRequest]) (*connect.Response[emptypb.Empty], error) { timer := prometheus.NewTimer(s.metrics.taskDuration.WithLabelValues("update_task_status")) defer timer.ObserveDuration() @@ -200,25 +199,27 @@ func (s *TaskServer) UpdateTaskStatus(ctx context.Context, req *connect.Request[ s.metrics.updateTaskStatusCounter.Inc() s.logger.Printf("Updating task status: id=%d, status=%s", req.Msg.Id, req.Msg.Status) + // Validate the incoming request if err := s.validateRequest(req.Msg); err != nil { return nil, err } + // Update the task status in the repository if err := s.taskRepo.UpdateTaskStatus(ctx, uint(req.Msg.Id), int(req.Msg.Status)); err != nil { s.metrics.errorCounter.WithLabelValues("update_task_status").Inc() return nil, s.logError(err, "Failed to update task status: id=%d", req.Msg.Id) } + // Log the status update in the task history if err := s.createTaskStatusHistory(ctx, uint(req.Msg.Id), int(req.Msg.Status), req.Msg.Message); err != nil { s.logger.Printf("WARNING: Failed to create task status history: %v", err) - // Consider whether to return an error here or continue } s.logger.Printf("Task status updated: id=%d", req.Msg.Id) return connect.NewResponse(&emptypb.Empty{}), nil } -// ListTasks retrieves a list of tasks. +// ListTasks retrieves a list of tasks with pagination support. func (s *TaskServer) ListTasks(ctx context.Context, req *connect.Request[v1.TaskListRequest]) (*connect.Response[v1.TaskList], error) { timer := prometheus.NewTimer(s.metrics.taskDuration.WithLabelValues("list_tasks")) defer timer.ObserveDuration() @@ -226,6 +227,7 @@ func (s *TaskServer) ListTasks(ctx context.Context, req *connect.Request[v1.Task s.metrics.listTasksCounter.Inc() s.logger.Print("Retrieving list of tasks") + // Validate the incoming request if err := s.validateRequest(req.Msg); err != nil { return nil, err } @@ -242,6 +244,7 @@ func (s *TaskServer) ListTasks(ctx context.Context, req *connect.Request[v1.Task offset = 0 // Default offset } + // Fetch the list of tasks from the repository tasks, err := s.taskRepo.ListTasks(ctx, limit, offset, int(req.Msg.GetStatus()), req.Msg.GetType()) if err != nil { s.metrics.errorCounter.WithLabelValues("list_tasks").Inc() @@ -265,10 +268,12 @@ func (s *TaskServer) GetStatus(ctx context.Context, req *connect.Request[v1.GetS s.metrics.getTaskCounter.Inc() s.logger.Print("Retrieving task status counts") + // Validate the incoming request if err := s.validateRequest(req.Msg); err != nil { return nil, err } + // Fetch the task status counts from the repository statusCounts, err := s.taskRepo.GetTaskStatusCounts(ctx) if err != nil { s.metrics.errorCounter.WithLabelValues("get_status").Inc() @@ -287,6 +292,74 @@ func (s *TaskServer) GetStatus(ctx context.Context, req *connect.Request[v1.GetS return connect.NewResponse(response), nil } +// Heartbeat handles client heartbeats to maintain connection status. +func (s *TaskServer) Heartbeat(ctx context.Context, req *connect.Request[v1.HeartbeatRequest]) (*connect.Response[v1.HeartbeatResponse], error) { + s.clientHeartbeats.Store("clientID", req.Msg.Timestamp) + return connect.NewResponse(&v1.HeartbeatResponse{}), nil +} + +// PullEvents handles bidirectional streaming for task updates and assignments. +func (s *TaskServer) PullEvents(ctx context.Context, req *connect.Request[v1.PullEventsRequest], stream *connect.ServerStream[v1.PullEventsResponse]) error { + ticker := time.NewTicker(10 * time.Second) // Trigger every 10 seconds + defer ticker.Stop() + + for { + select { + case <-ticker.C: + tasks, err := s.taskRepo.GetStalledTasks(ctx) + if err != nil { + s.logger.Printf("Error checking stalled tasks: %v", err) + continue // Skip to the next tick on error + } + + for _, t := range tasks { + if err := stream.Send(&v1.PullEventsResponse{ + Work: &v1.WorkAssignment{ + AssignmentId: int64(t.ID), + Task: s.convertTaskToProto(&t), + }, + }); err != nil { + s.logger.Printf("Error sending task to client: %v", err) + return err + } + if err := s.updateTaskStatus(ctx, uint(t.ID), v1.TaskStatusEnum_QUEUED, "Task is Queued"); err != nil { + s.logger.Printf("Error updating task status: %v", err) + } + } + case <-ctx.Done(): + return ctx.Err() // Exit if the context is done + } + } +} + +// updateTaskStatus updates the task status and creates a history entry. +func (s *TaskServer) updateTaskStatus(ctx context.Context, taskID uint, status v1.TaskStatusEnum, message string) error { + if err := s.taskRepo.UpdateTaskStatus(ctx, taskID, int(status)); err != nil { + return fmt.Errorf("failed to update task status: %w", err) + } + + if err := s.createTaskStatusHistory(ctx, taskID, int(status), message); err != nil { + s.logger.Printf("WARNING: Failed to create task status history: %v", err) + } + + return nil +} + +// handleUpdateTaskStatus processes task status update requests. +func (s *TaskServer) handleUpdateTaskStatus(ctx context.Context, update *v1.UpdateTaskStatusRequest) error { + if err := s.taskRepo.UpdateTaskStatus(ctx, uint(update.Id), int(update.Status)); err != nil { + s.metrics.errorCounter.WithLabelValues("update_task_status").Inc() + return fmt.Errorf("failed to update task status: id=%d, error: %w", update.Id, err) + } + + if err := s.createTaskStatusHistory(ctx, uint(update.Id), int(update.Status), update.Message); err != nil { + s.logger.Printf("WARNING: Failed to create task status history: %v", err) + } + + s.logger.Printf("Task status updated: id=%d, new status=%d", update.Id, update.Status) + return nil +} + // createTaskStatusHistory creates a new task history entry for the status update. func (s *TaskServer) createTaskStatusHistory(ctx context.Context, taskID uint, status int, message string) error { _, err := s.historyRepo.CreateTaskHistory(ctx, task.TaskHistory{ @@ -324,7 +397,7 @@ func (s *TaskServer) prepareNewTask(req *v1.CreateTaskRequest) task.Task { newTask := task.Task{ Name: req.Name, - Status: int(v1.TaskStatusEnum_QUEUED), + Status: int(v1.TaskStatusEnum_UNKNOWN), Description: req.Description, Type: req.Type, Payload: payloadJSON,