-
Notifications
You must be signed in to change notification settings - Fork 367
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(backend): implement kafka connect grpc conect
Extend kafka connect services to expose them in grpc connect and grpc gateway so they get exposed via REST API Signed-off-by: Santiago Jimenez Giraldo <[email protected]>
Showing
19 changed files
with
1,016 additions
and
759 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,176 @@ | ||
// Copyright 2023 Redpanda Data, Inc. | ||
// | ||
// Use of this software is governed by the Business Source License | ||
// included in the file licenses/BSL.md | ||
// | ||
// As of the Change Date specified in that file, in accordance with | ||
// the Business Source License, use of this software will be governed | ||
// by the Apache License, Version 2.0 | ||
|
||
// Package kafkaconnect implements the KafkaConnect interface for the Connect API. | ||
package kafkaconnect | ||
|
||
import ( | ||
"fmt" | ||
|
||
con "github.com/cloudhut/connect-client" | ||
"golang.org/x/exp/slices" | ||
|
||
kafkaconnect "github.com/redpanda-data/console/backend/pkg/connect" | ||
dataplanev1alpha1 "github.com/redpanda-data/console/backend/pkg/protogen/redpanda/api/dataplane/v1alpha1" | ||
) | ||
|
||
type mapper struct{} | ||
|
||
func (m mapper) connectorsHTTPResponseToProto(httpResponse kafkaconnect.ClusterConnectors, request *dataplanev1alpha1.ListConnectorsRequest) (*dataplanev1alpha1.ListConnectorsResponse, error) { | ||
connectors := make([]*dataplanev1alpha1.ListConnectorsResponse_ConnectorInfoStatus, len(httpResponse.Connectors)) | ||
|
||
for i, connector := range httpResponse.Connectors { | ||
errors, enumErr := m.connectorErrorsToProto(connector.Errors) | ||
if enumErr != nil { | ||
return nil, enumErr | ||
} | ||
|
||
connectors[i] = &dataplanev1alpha1.ListConnectorsResponse_ConnectorInfoStatus{ | ||
Name: connector.Name, | ||
HolisticState: m.holisticStateToProto(connector.Status), | ||
Errors: errors, | ||
} | ||
|
||
if slices.Contains(request.Expand, "info") { | ||
connectors[i].Info = &dataplanev1alpha1.ConnectorSpec{ | ||
Name: connector.Name, | ||
Type: connector.Type, | ||
Config: connector.Config, | ||
Tasks: m.taskInfoListToProtoInfo(connector.Name, connector.Tasks), | ||
} | ||
} | ||
|
||
if slices.Contains(request.Expand, "status") { | ||
connectors[i].Status = &dataplanev1alpha1.ConnectorStatus{ | ||
Name: connector.Name, | ||
Connector: &dataplanev1alpha1.ConnectorStatus_Connector{ | ||
State: connector.State, | ||
WorkerId: connector.WorkerID, | ||
}, | ||
Tasks: m.taskInfoListToProtoStatus(connector.Tasks), | ||
Type: connector.Type, | ||
Trace: connector.Trace, | ||
} | ||
} | ||
} | ||
|
||
return &dataplanev1alpha1.ListConnectorsResponse{ | ||
Connectors: connectors, | ||
}, nil | ||
} | ||
|
||
func (m mapper) taskInfoListToProtoInfo(connectorName string, taskInfoList []kafkaconnect.ClusterConnectorTaskInfo) []*dataplanev1alpha1.TaskInfo { | ||
tasks := make([]*dataplanev1alpha1.TaskInfo, len(taskInfoList)) | ||
for i, task := range taskInfoList { | ||
tasks[i] = m.taskToProto(connectorName, task.TaskID) | ||
} | ||
return tasks | ||
} | ||
|
||
func (m mapper) connectorTaskIDToProto(connectorName string, taskInfoList []con.ConnectorTaskID) []*dataplanev1alpha1.TaskInfo { | ||
tasks := make([]*dataplanev1alpha1.TaskInfo, len(taskInfoList)) | ||
for i, task := range taskInfoList { | ||
tasks[i] = m.taskToProto(connectorName, task.Task) | ||
} | ||
return tasks | ||
} | ||
|
||
func (mapper) taskToProto(name string, taskID int) *dataplanev1alpha1.TaskInfo { | ||
return &dataplanev1alpha1.TaskInfo{ | ||
Connector: name, | ||
Task: int32(taskID), | ||
} | ||
} | ||
|
||
func (mapper) taskInfoListToProtoStatus(taskInfoList []kafkaconnect.ClusterConnectorTaskInfo) []*dataplanev1alpha1.TaskStatus { | ||
tasks := make([]*dataplanev1alpha1.TaskStatus, len(taskInfoList)) | ||
for i, task := range taskInfoList { | ||
tasks[i] = &dataplanev1alpha1.TaskStatus{ | ||
Id: int32(task.TaskID), | ||
State: task.State, | ||
WorkerId: task.WorkerID, | ||
Trace: task.Trace, | ||
} | ||
} | ||
return tasks | ||
} | ||
|
||
func (mapper) holisticStateToProto(state string) dataplanev1alpha1.HolisticState { | ||
switch state { | ||
case kafkaconnect.ConnectorStatusPaused: | ||
return dataplanev1alpha1.HolisticState_HOLISTIC_STATE_PAUSED | ||
case kafkaconnect.ConnectorStatusStopped: | ||
return dataplanev1alpha1.HolisticState_HOLISTIC_STATE_STOPPED | ||
case kafkaconnect.ConnectorStatusRestarting: | ||
return dataplanev1alpha1.HolisticState_HOLISTIC_STATE_RESTARTING | ||
case kafkaconnect.ConnectorStatusDestroyed: | ||
return dataplanev1alpha1.HolisticState_HOLISTIC_STATE_DESTROYED | ||
case kafkaconnect.ConnectorStatusUnassigned: | ||
return dataplanev1alpha1.HolisticState_HOLISTIC_STATE_UNASSIGNED | ||
case kafkaconnect.ConnectorStatusHealthy: | ||
return dataplanev1alpha1.HolisticState_HOLISTIC_STATE_HEALTHY | ||
case kafkaconnect.ConnectorStatusUnhealthy: | ||
return dataplanev1alpha1.HolisticState_HOLISTIC_STATE_UNHEALTHY | ||
case kafkaconnect.ConnectorStatusDegraded: | ||
return dataplanev1alpha1.HolisticState_HOLISTIC_STATE_DEGRADED | ||
default: | ||
return dataplanev1alpha1.HolisticState_HOLISTIC_STATE_UNKNOWN | ||
} | ||
} | ||
|
||
func (m mapper) connectorErrorsToProto(errors []kafkaconnect.ClusterConnectorInfoError) ([]*dataplanev1alpha1.ConnectorError, error) { | ||
connectErrors := make([]*dataplanev1alpha1.ConnectorError, len(errors)) | ||
for i, err := range errors { | ||
errorType, enumErr := m.connectorErrorTypeToProto(err.Type) | ||
if enumErr != nil { | ||
return nil, enumErr | ||
} | ||
connectErrors[i] = &dataplanev1alpha1.ConnectorError{ | ||
Title: err.Title, | ||
Content: err.Content, | ||
Type: errorType, | ||
} | ||
} | ||
return connectErrors, nil | ||
} | ||
|
||
func (mapper) connectorErrorTypeToProto(errorType string) (dataplanev1alpha1.ConnectorError_Type, error) { | ||
switch errorType { | ||
case "ERROR": | ||
return dataplanev1alpha1.ConnectorError_TYPE_ERROR, nil | ||
case "WARNING": | ||
return dataplanev1alpha1.ConnectorError_TYPE_WARNING, nil | ||
default: | ||
return dataplanev1alpha1.ConnectorError_TYPE_UNSPECIFIED, fmt.Errorf("failed to map given error type %q to proto", errorType) | ||
} | ||
} | ||
|
||
func (mapper) createConnectorProtoToClientRequest(createConnector *dataplanev1alpha1.CreateConnectorRequest) (*con.CreateConnectorRequest, error) { | ||
if createConnector == nil || createConnector.Connector == nil { | ||
return nil, fmt.Errorf("create connector request is nil") | ||
} | ||
|
||
if len(createConnector.Connector.Config) == 0 { | ||
return nil, fmt.Errorf("create connector request config is empty") | ||
} | ||
|
||
return &con.CreateConnectorRequest{ | ||
Name: createConnector.Connector.Name, | ||
Config: ConvertStringMapToInterfaceMap(createConnector.Connector.Config), | ||
}, nil | ||
} | ||
|
||
// ConvertStringMapToInterfaceMap converts interface map to string map | ||
func ConvertStringMapToInterfaceMap(stringMap map[string]string) map[string]interface{} { | ||
interfaceMap := make(map[string]interface{}, len(stringMap)) | ||
for key, value := range stringMap { | ||
interfaceMap[key] = value | ||
} | ||
return interfaceMap | ||
} |
Oops, something went wrong.