Skip to content

Commit

Permalink
feat: generate subcommand and default provisioners
Browse files Browse the repository at this point in the history
  • Loading branch information
astromechza committed Mar 10, 2024
1 parent 0885fc9 commit 72146d8
Show file tree
Hide file tree
Showing 6 changed files with 604 additions and 1 deletion.
76 changes: 76 additions & 0 deletions internal/command/99-default.provisioners.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
# The default volume provisioner provided by score-compose allows basic volume resources to be created in the resources
# system. The volume resource just creates an ephemeral Docker volume with a random string as the name, and source
# attribute that we can reference.
- uri: template://default-provisioners/volume
# By default, match all classes and ids of volume. If you want to override this, create another provisioner definition
# with a higher priority.
type: volume
init: |
randomVolumeName: {{ .Id | replace "." "-" }}-{{ randAlphaNum 6 }}
# Store the random volume name if we haven't chosen one yet, otherwise use the one that exists already
state: |
name: {{ dig "name" .Init.randomVolumeName .State }}
# Return a source value with the volume name. This can be used in volume resource references now.
outputs: |
source: {{ .State.name }}
# Add a volume to the docker compose file. We assume our name is unique here. We also apply a label to help ensure
# that we can track the volume back to the workload and resource that created it.
volumes: |
{{ .State.name }}:
name: {{ .State.name }}
driver: local
labels:
dev.score.compose.res.uid: {{ .Uid }}
# The default redis provisioner adds a redis service to the project which returns a host, port, username, and password.
- uri: template://default-provisioners/redis
# By default, match all redis types regardless of class and id. If you want to override this, create another
# provisioner definition with a higher priority.
type: redis
# Init template has the default port and a random service name and password if needed later
init: |
port: 6379
randomServiceName: redis-{{ randAlphaNum 6 }}
randomPassword: {{ randAlphaNum 16 | quote }}
# The only state we need to persist is the chosen random service name and password
state: |
serviceName: {{ dig "serviceName" .Init.randomServiceName .State | quote }}
password: {{ dig "password" .Init.randomPassword .State | quote }}
# Return the outputs schema that consumers expect
outputs: |
host: {{ .State.serviceName }}
port: {{ .Init.port }}
username: default
password: {{ .State.password | quote }}
# write the config file to the mounts directory
files: |
{{ .State.serviceName }}/redis.conf: |
requirepass {{ .State.password }}
port {{ .Init.port }}
save 60 1
loglevel warning
# add a volume for persistence of the redis data
volumes: |
{{ .State.serviceName }}-data:
name: {{ .State.serviceName }}-data
driver: local
labels:
dev.score.compose.res.uid: {{ .Uid }}
# And the redis service itself with volumes bound in
services: |
{{ .State.serviceName }}:
labels:
dev.score.compose.res.uid: {{ .Uid }}
image: redis:7
entrypoint: ["redis-server"]
command: ["/usr/local/etc/redis/redis.conf"]
volumes:
- type: bind
source: {{ .MountsDirectory }}/{{ .State.serviceName }}/redis.conf
target: /usr/local/etc/redis/redis.conf
read_only: true
- type: volume
source: {{ .State.serviceName }}-data
target: /data
volume:
nocopy: true
285 changes: 285 additions & 0 deletions internal/command/generate.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,285 @@
package command

import (
"context"
"fmt"
"log/slog"
"os"
"slices"
"strings"

"github.com/compose-spec/compose-go/v2/types"
"github.com/imdario/mergo"
"github.com/score-spec/score-go/loader"
"github.com/score-spec/score-go/schema"
score "github.com/score-spec/score-go/types"
"github.com/spf13/cobra"
"gopkg.in/yaml.v3"

"github.com/score-spec/score-compose/internal/compose"
"github.com/score-spec/score-compose/internal/project"
"github.com/score-spec/score-compose/internal/provisioners"
provloader "github.com/score-spec/score-compose/internal/provisioners/loader"
)

const (
generateCmdOverridesFileFlag = "overrides-file"
generateCmdOverridePropertyFlag = "override-property"
)

var generateCommand = &cobra.Command{
Use: "generate",
Args: cobra.ArbitraryArgs,
Short: "Convert one or more Score files into a Docker compose manifest",
Long: `The generate command will convert Score files in the current Score compose project into a combined Docker compose
manifest. All resources and links between Workloads will be resolved and provisioned as required.
By default this command looks for score.yaml in the current directory, but can take explicit file names as positional
arguments.
"score-compose init" MUST be run first. An error will be thrown if the project directory is not present.
`,
Example: `
# Use default values
score-compose generate
# Specify Score files
score-compose generate score.yaml *.score.yaml
# Provide overrides when one score file is provided
score-compose generate score.yaml --override-file=./overrides.score.yaml --override-property=metadata.key=value
# Provide overrides when more than one score file is provided
score-compose generate score.yaml score-two.yaml --override-file=my-workload=./overrides.score.yaml --override-property=my-other-workload=metadata.key=value`,

// don't print the errors - we print these ourselves in main()
SilenceErrors: true,

RunE: func(cmd *cobra.Command, args []string) error {
cmd.SilenceUsage = true

// find the input score files
inputFiles := []string{scoreFileDefault}
if len(args) > 0 {
inputFiles = args
}
slices.Sort(inputFiles)
slog.Debug("Input Score files", "files", inputFiles)

// first load all the score files, parse them with a dummy yaml decoder to find the workload name, reject any
// with invalid or duplicate names.
workloadNames, workloadSpecs, err := loadRawScoreFiles(inputFiles)
if err != nil {
return err
}
slog.Debug("Input Workload names", "names", workloadNames)
if len(workloadNames) == 0 {
return fmt.Errorf("at least one Score file must be provided")
}

// Now read and apply any overrides files to the score files
if v, _ := cmd.Flags().GetString(generateCmdOverridesFileFlag); v != "" {
if len(workloadNames) > 1 {
return fmt.Errorf("--%s cannot be used when multiple score files are provided", generateCmdOverridesFileFlag)
}
if err := parseAndApplyOverrideFile(v, generateCmdOverridesFileFlag, workloadSpecs[workloadNames[0]]); err != nil {
return err
}
}

// Now read, parse, and apply any override properties to the score files
if v, _ := cmd.Flags().GetStringArray(generateCmdOverridePropertyFlag); len(v) > 0 {
for _, overridePropertyEntry := range v {
if err := parseAndApplyOverrideProperty(overridePropertyEntry, generateCmdOverridesFileFlag, workloadSpecs[workloadNames[0]]); err != nil {
return err
}
}
}

sd, ok, err := project.LoadStateDirectory(".")
if err != nil {
return fmt.Errorf("failed to load existing state directory: %w", err)
} else if !ok {
return fmt.Errorf("state directory does not exist, please run \"score-compose init\" first")
}
slog.Info(fmt.Sprintf("Loaded state directory with docker compose project '%s'", sd.State.ComposeProjectName))
currentState := &sd.State

// Now validate with score spec
for workloadName, spec := range workloadSpecs {
// Ensure transforms are applied (be a good citizen)
if changes, err := schema.ApplyCommonUpgradeTransforms(spec); err != nil {
return fmt.Errorf("failed to upgrade spec: %w", err)
} else if len(changes) > 0 {
for _, change := range changes {
slog.Info(fmt.Sprintf("Applying backwards compatible upgrade to '%s': %s", workloadName, change))
}
}
if err := schema.Validate(spec); err != nil {
return fmt.Errorf("validation errors in workload '%s': %w", workloadName, err)
}
slog.Info(fmt.Sprintf("Validated workload '%s'", workloadName))

var out score.Workload
if err := loader.MapSpec(&out, spec); err != nil {
return fmt.Errorf("failed to convert '%s' to structure: %w", workloadName, err)
}

currentState, err = currentState.WithWorkload(&out, nil)
if err != nil {
return fmt.Errorf("failed to add workload '%s': %w", workloadName, err)
}
}

loadedProvisioners, err := provloader.LoadProvisionersFromDirectory(sd.Path, provloader.DefaultSuffix)
if err != nil {
return fmt.Errorf("failed to load provisioners: %w", err)
} else if len(loadedProvisioners) > 0 {
slog.Info(fmt.Sprintf("Successfully loaded %d resource provisioners", len(loadedProvisioners)))
}

currentState, err = currentState.WithPrimedResources()
if err != nil {
return fmt.Errorf("failed to prime resources: %w", err)
}

superProject := &types.Project{
Name: sd.State.ComposeProjectName,
Services: make(types.Services, 0),
Volumes: map[string]types.VolumeConfig{},
Networks: map[string]types.NetworkConfig{},
}

currentState, err = provisioners.ProvisionResources(context.Background(), currentState, loadedProvisioners, superProject)
if err != nil {
return fmt.Errorf("failed to provision: %w", err)
} else if len(currentState.Resources) > 0 {
slog.Info(fmt.Sprintf("Provisioned %d resources", len(currentState.Resources)))
}

for _, workloadName := range workloadNames {
outputFunctions, err := currentState.GetResourceOutputForWorkload(workloadName)
if err != nil {
return err
}

slog.Info(fmt.Sprintf("Converting workload '%s' to Docker compose", workloadName))
spec := currentState.Workloads[workloadName].Spec
converted, err := compose.ConvertSpec(&spec, outputFunctions)
if err != nil {
return fmt.Errorf("failed to convert workload '%s' to Docker compose: %w", workloadName, err)
}

for serviceName, service := range converted.Services {
if _, ok := superProject.Services[serviceName]; ok {
return fmt.Errorf("failed to add converted workload '%s': duplicate service name '%s'", workloadName, serviceName)
}
superProject.Services[serviceName] = service
}
for volumeName, volume := range converted.Volumes {
if _, ok := superProject.Volumes[volumeName]; ok {
return fmt.Errorf("failed to add converted workload '%s': duplicate volume name '%s'", workloadName, volumeName)
}
superProject.Volumes[volumeName] = volume
}
for networkName, network := range converted.Networks {
if _, ok := superProject.Networks[networkName]; ok {
return fmt.Errorf("failed to add converted workload '%s': duplicated network name '%s'", workloadName, networkName)
}
superProject.Networks[networkName] = network
}
}

sd.State = *currentState
if err := sd.Persist(); err != nil {
return fmt.Errorf("failed to persist updated state directory: %w", err)
}

raw, _ := yaml.Marshal(superProject)

v, _ := cmd.Flags().GetString("output")
if v == "" {
return fmt.Errorf("no output file specified")
} else if v == "-" {
_, _ = fmt.Fprint(cmd.OutOrStdout(), string(raw))
} else if err := os.WriteFile(v+".temp", raw, 0755); err != nil {
return fmt.Errorf("failed to write output file: %w", err)
} else if err := os.Rename(v+".temp", v); err != nil {
return fmt.Errorf("failed to complete writing output file: %w", err)
}
return nil
},
}

// loadRawScoreFiles loads raw score specs as yaml from the given files and finds all the workload names. It throws
// errors if it failed to read, load, or if names are duplicated.
func loadRawScoreFiles(fileNames []string) ([]string, map[string]map[string]interface{}, error) {
workloadNames := make([]string, 0, len(fileNames))
workloadToRawScore := make(map[string]map[string]interface{}, len(fileNames))

for _, fileName := range fileNames {
var out map[string]interface{}
raw, err := os.ReadFile(fileName)
if err != nil {
return nil, nil, fmt.Errorf("failed to read '%s': %w", fileName, err)
} else if err := yaml.Unmarshal(raw, &out); err != nil {
return nil, nil, fmt.Errorf("failed to decode '%s' as yaml: %w", fileName, err)
}

var workloadName string
if meta, ok := out["metadata"].(map[string]interface{}); ok {
workloadName, _ = meta["name"].(string)
if _, ok := workloadToRawScore[workloadName]; ok {
return nil, nil, fmt.Errorf("workload name '%s' in file '%s' is used more than once", workloadName, fileName)
}
}
workloadNames = append(workloadNames, workloadName)
workloadToRawScore[workloadName] = out
}
return workloadNames, workloadToRawScore, nil
}

func init() {
generateCommand.Flags().StringP("output", "o", "compose.yaml", "The output file to write the composed compose file to")
generateCommand.Flags().String(generateCmdOverridesFileFlag, "", "An optional file of Score overrides to merge in")
generateCommand.Flags().StringArray(generateCmdOverridePropertyFlag, []string{}, "An optional set of path=key overrides to set or remove")
rootCmd.AddCommand(generateCommand)
}

func parseAndApplyOverrideFile(entry string, flagName string, spec map[string]interface{}) error {
if raw, err := os.ReadFile(entry); err != nil {
return fmt.Errorf("--%s '%s' is invalid, failed to read file: %w", flagName, entry, err)
} else {
slog.Info(fmt.Sprintf("Applying overrides from %s to workload", entry))
var out map[string]interface{}
if err := yaml.Unmarshal(raw, &out); err != nil {
return fmt.Errorf("--%s '%s' is invalid: failed to decode yaml: %w", flagName, entry, err)
} else if err := mergo.Merge(&spec, out, mergo.WithOverride); err != nil {
return fmt.Errorf("--%s '%s' failed to apply: %w", flagName, entry, err)
}
}
return nil
}

func parseAndApplyOverrideProperty(entry string, flagName string, spec map[string]interface{}) error {
parts := strings.SplitN(entry, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("--%s '%s' is invalid, expected a =-separated path and value", flagName, entry)
}
if parts[1] == "" {
slog.Info(fmt.Sprintf("Overriding '%s' in workload", parts[0]))
if err := writePathInStruct(spec, parseDotPathParts(parts[0]), true, nil); err != nil {
return fmt.Errorf("--%s '%s' could not be applied: %w", flagName, entry, err)
}
} else {
var value interface{}
if err := yaml.Unmarshal([]byte(parts[1]), &value); err != nil {
return fmt.Errorf("--%s '%s' is invalid, failed to unmarshal value as json: %w", flagName, entry, err)
}
slog.Info(fmt.Sprintf("Overriding '%s' in workload", parts[0]))
if err := writePathInStruct(spec, parseDotPathParts(parts[0]), false, value); err != nil {
return fmt.Errorf("--%s '%s' could not be applied: %w", flagName, entry, err)
}
}
return nil
}
Loading

0 comments on commit 72146d8

Please sign in to comment.