diff --git a/CHANGES.txt b/CHANGES.txt index e47ae317..d3a7bec0 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,45 @@ +1.5 + +Features +- Platform-independent user data provider option (formatted like a properties file) +- Ability to run Asgard in one AWS account and AssumeRole to manage a different account +- Automated deployment can accept arbitrary user-chosen steps +- Each application can have an optional application group +- Each application can have optional tags +- Enable configurable maximum number of ASGs per cluster +- REST endpoints for retrieving a deployment workflow plan JSON blob, and for starting a workflow with that JSON +- Additional ways to configure Asgard to get AWS credentials, including from an IAM profile +- Updated list of AWS instance types +- Links to security groups include both name and ID +- Made it possible to skip cache loading during cache loading, using runtime flag API +- Support for HTTPS ELB listeners (Thanks Greg Dziemidowicz) +- Configurable AWS API socket timeout value +- Shared visibility all of in-memory running tasks across Asgard instances in a cluster +- Filterable instance reservations by offering type + + +Infrastructure +- Turn off noisy, legacy task completion emails +- Introduced AngularJS for auto deployment screens, with Grunt build and JavaScript unit tests +- Asgard's health check should fail while initializing, but then should pass forever, for desired load balancer behavior +- Asgard can register with Eureka service for conventional discoverability +- "Wither" function to have Asgard delete itself after it finishes all running in-memory tasks +- Separate SimpleDB service from Application service +- Store SWF tokens in database +- Ability to set arbitrary headers via RestClientService (Thanks e0d) +- Upgraded frigga library to allow for letters in version strings of AMI appversion tags +- Converted more JUnit tests to Spock + + +Bug Fixes +- List of VPCs for security group creation only includes VPCs that have conventionally labeled subnets +- List of ELBs over 400 can be listed successfully +- List of RDS DB instances over 100 can be listed successfully +- Rolling push can work with spot instances (Thanks Dale Wijnand) +- Force U.S. dollars for Amazon-listed currency amounts (Thanks Dale Wijnand) +- Allow an initial size of 0 for creating the next ASG in a cluster + + 1.4.2 Features diff --git a/app/scripts/controllers/deployment/detail.js b/app/scripts/controllers/deployment/detail.js index 08c302d5..ec178fe6 100644 --- a/app/scripts/controllers/deployment/detail.js +++ b/app/scripts/controllers/deployment/detail.js @@ -4,18 +4,21 @@ angular.module('asgardApp') .controller('DeploymentDetailCtrl', function ($scope, $routeParams, $http, $timeout) { var deploymentId = $routeParams.deploymentId; var shouldPoll = true; + $scope.readOnlyDeploymentSteps = true; + $scope.targetAsgTypes = ["Previous", "Next"]; var retrieveDeployment = function() { $http.get('deployment/show/' + deploymentId + '.json').success(function(data, status, headers, config) { $scope.deployment = data; shouldPoll = !$scope.deployment.done; - var text =''; + var text = ''; angular.forEach($scope.deployment.log, function(value) { text = text + value + '\n'; }); $scope.logText = text; }); }; + var poll = function() { retrieveDeployment(); if (shouldPoll) { @@ -24,6 +27,14 @@ angular.module('asgardApp') }; poll(); + $scope.getLogForStep = function(stepIndex) { + return $scope.deployment.logForSteps[stepIndex]; + }; + + $scope.stepUrl = function(type) { + return '/views/deployment/' + type + 'Step.html'; + }; + $scope.encodedWorkflowExecutionIds = function() { var runId = $scope.deployment.workflowExecution.runId; var workflowId = $scope.deployment.workflowExecution.workflowId; @@ -42,6 +53,27 @@ angular.module('asgardApp') judgeDeployment('proceed'); }; + $scope.getCurrentStep = function() { + return $scope.deployment.logForSteps.length - 1; + }; + + $scope.getStepStatus = function(stepIndex) { + var currentStep = $scope.getCurrentStep(); + if (stepIndex < currentStep) { + return "success"; + } + if (stepIndex === currentStep) { + if ($scope.deployment.status === "completed" && currentStep === $scope.deployment.steps.length - 1) { + return "success"; + } + if ($scope.deployment.status !== "running") { + return "failure"; + } + return "running"; + } + return "queued"; + }; + var judgeDeployment = function(judgment) { $http.post('deployment/' + judgment, { id: deploymentId, diff --git a/app/scripts/controllers/deployment/new.js b/app/scripts/controllers/deployment/new.js index ba944b2f..5eccb5ee 100644 --- a/app/scripts/controllers/deployment/new.js +++ b/app/scripts/controllers/deployment/new.js @@ -4,10 +4,221 @@ angular.module("asgardApp") .controller("DeploymentNewCtrl", function ($scope, $routeParams, $http, $location) { $scope.clusterName = $routeParams.clusterName; $scope.hideAdvancedItems = true; + $scope.hideJsonSteps = true; + $scope.hideHtmlSteps = false; + $scope.hideShowMoreAmisLink = false; + $scope.targetAsgTypes = ["Previous", "Next"]; + $scope.count= 0; + $scope.selectionsForSubnet = {} + + var isSameStepBeforeOrAfter = function(stepTypeName, index) { + if (index > 0 && index < $scope.generated.stepsDisplay.length - 1) { + return $scope.generated.stepsDisplay[index - 1].type === stepTypeName || $scope.generated.stepsDisplay[index + 1].type === stepTypeName; + } + if (index > 1) { + return $scope.generated.stepsDisplay[index - 1].type === stepTypeName; + } + if (index < $scope.generated.stepsDisplay.length) { + return $scope.generated.stepsDisplay[index + 1].type === stepTypeName; + } + return true; + }; + + var isLastStep = function(index) { + return index === $scope.generated.stepsDisplay.length - 1; + }; + + var isFirstStep = function(index) { + return index === 0; + }; + + var firstIndexOfStepType = function(stepTypeName) { + var i, n = $scope.generated.stepsDisplay.length; + for (i = 0; i < n; ++i) { + var nextStep = $scope.generated.stepsDisplay[i]; + if ('type' in nextStep && nextStep.type === stepTypeName) { + return i; + } + } + return undefined; + }; + + var isBeforeCreateStep = function(index) { + return index < firstIndexOfStepType("CreateAsg"); + }; + + var isAfterDeleteStep = function(index) { + return index > firstIndexOfStepType("DeleteAsg"); + }; + + var stepTypes = { + "Wait": { + display: "Wait", + isAllowed: function(index) { + return !isSameStepBeforeOrAfter("Wait", index) && !isLastStep(index); + }, + add: function(index) { + $scope.generated.stepsDisplay.splice(index, 0, {"type":"Wait", "durationMinutes":60}); + } + }, + "Judgment": { + display: "Judgment", + isAllowed: function(index) { + return !isSameStepBeforeOrAfter("Judgment", index) && !isLastStep(index); + }, + add: function(index) { + $scope.generated.stepsDisplay.splice(index, 0, {"type":"Judgment", "durationMinutes":120}); + } + }, + "ResizeAsg": { + display: "Resize", + isAllowed: function(index) { + return !isBeforeCreateStep(index) && !isAfterDeleteStep(index); + }, + add: function(index) { + $scope.generated.stepsDisplay.splice(index, 0, + {"type":"Resize", "targetAsg":"Next", "capacity":0, "startUpTimeoutMinutes":40}); + } + }, + "DisableAsg": { + display: "Disable", + isAllowed: function(index) { + return !isBeforeCreateStep(index) && !isAfterDeleteStep(index); + }, + add: function(index) { + $scope.generated.stepsDisplay.splice(index, 0, {"type":"DisableAsg", "targetAsg":"Previous"}); + } + }, + "EnableAsg": { + display: "Enable", + isAllowed: function(index) { + return !isBeforeCreateStep(index) && !isAfterDeleteStep(index); + }, + add: function(index) { + $scope.generated.stepsDisplay.splice(index, 0, {"type":"EnableAsg", "targetAsg":"Next"}); + } + }, + "DeleteAsg": { + display: "Delete", + isAllowed: function(index) { + return !isBeforeCreateStep(index) && !isAfterDeleteStep(index) && !firstIndexOfStepType("DeleteAsg") + && isLastStep(index); + }, + add: function(index) { + $scope.generated.stepsDisplay.splice(index, 0, {"type":"DeleteAsg", "targetAsg":"Previous"}); + } + } + }; + + $scope.stepTypeNames = Object.keys(stepTypes); + + $scope.isStepAllowed = function(stepTypeName, index) { + return stepTypes[stepTypeName].isAllowed(index); + }; + + $scope.stepTypeDisplay = function(stepTypeName) { + return stepTypes[stepTypeName].display; + }; + + $scope.addStep = function(stepTypeName, index) { + resetStepsDisplay(); + stepTypes[stepTypeName].add(index); + $scope.generated.stepsDisplay.splice(index, 0, {showSteps: false}); + }; + + $scope.removeStep = function(index) { + resetStepsDisplay(); + $scope.generated.stepsDisplay.splice(index, 2); + }; + + var resetStepsDisplay = function() { + var i, n = $scope.generated.stepsDisplay.length; + for (i = 0; i < n; ++i) { + var nextStep = $scope.generated.stepsDisplay[i]; + if ('showSteps' in nextStep) { + nextStep.showSteps = false + } + } + }; + + var initStepsDisplay = function() { + $scope.generated = {}; + $scope.generated.stepsDisplay = [{showSteps: false}]; + var i, n = $scope.deploymentOptions.steps.length; + for (i = 0; i < n; ++i) { + var nextStep = $scope.deploymentOptions.steps[i]; + $scope.generated.stepsDisplay.push(nextStep); + $scope.generated.stepsDisplay.push({showSteps: false}); + } + }; + + $scope.editJsonSteps = function() { + $scope.hideHtmlSteps = true; + }; + + $scope.saveJsonSteps = function() { + $scope.jsonStepsParseError = null; + var jsonSteps; + try { + jsonSteps = angular.fromJson($scope.generated.jsonSteps); + } catch(e) { + $scope.jsonStepsParseError = e.stack; + return; + } + var steps = []; + var i, n = jsonSteps.length; + for (i = 0; i < n; ++i) { + var nextStep = jsonSteps[i]; + steps.push(nextStep); + } + $scope.deploymentOptions.steps = steps; + initStepsDisplay(); + $scope.hideHtmlSteps = false; + }; + + var constructStepsFromDisplay = function() { + var steps = []; + var i, n = $scope.generated.stepsDisplay.length; + for (i = 0; i < n; ++i) { + var nextStep = $scope.generated.stepsDisplay[i]; + if ('type' in nextStep) { + steps.push(nextStep); + } + } + $scope.deploymentOptions.steps = angular.fromJson(angular.toJson(steps)); + }; + + $scope.$watch("generated.stepsDisplay", function() { + if ($scope.deploymentOptions) { + constructStepsFromDisplay(); + } + }, true); + + $scope.$watch("deploymentOptions.steps", function() { + if ($scope.deploymentOptions) { + var text ='[\n'; + var i, n = $scope.deploymentOptions.steps.length; + for (i = 0; i < n; ++i) { + var nextStep = $scope.deploymentOptions.steps[i]; + text = text + ' ' + angular.toJson(nextStep); + if (i < n - 1) { + text = text + ',\n'; + } + } + text = text + '\n]'; + $scope.generated.jsonSteps = text; + } + }); + + $scope.toggleShowStepTypes = function(index) { + var value = $scope.generated.stepsDisplay[index].showSteps; + $scope.generated.stepsDisplay[index].showSteps = !value; + }; + var prepareParams = { params: { includeEnvironment: true, - deploymentTemplateName: "CreateJudgeAndCleanUp" + deploymentTemplateName: "CreateAndCleanUpPreviousAsg" } }; @@ -16,10 +227,21 @@ angular.module("asgardApp") $scope.environment = data.environment; $scope.asgOptions = data.asgOptions; $scope.lcOptions = data.lcOptions; - if ($scope.asgOptions) { - $scope.suspendAZRebalance = $scope.asgOptions.suspendedProcesses.indexOf("AZRebalance") > -1; - $scope.suspendAddToLoadBalancer = $scope.asgOptions.suspendedProcesses.indexOf("AddToLoadBalancer") > -1; - } + $scope.suspendAZRebalance = $scope.asgOptions.suspendedProcesses.indexOf("AZRebalance") > -1; + $scope.suspendAddToLoadBalancer = $scope.asgOptions.suspendedProcesses.indexOf("AddToLoadBalancer") > -1; + initStepsDisplay(); + angular.forEach($scope.environment.subnetPurposes.concat(""), function(value) { + $scope.selectionsForSubnet[value] = { + securityGroups: [], + availabilityZones: [], + loadBalancerNames: [] + } + }); + $scope.selectionsForSubnet[$scope.asgOptions.subnetPurpose] = { + securityGroups: $scope.lcOptions.securityGroups, + availabilityZones: $scope.asgOptions.availabilityZones, + loadBalancerNames: $scope.asgOptions.loadBalancerNames + }; }); $scope.$watch("asgOptions.subnetPurpose", function() { @@ -28,6 +250,14 @@ angular.module("asgardApp") } }); + $scope.$watch("selectionsForSubnet[asgOptions.subnetPurpose].securityGroups", function() { + if ($scope.asgOptions) { + $scope.selectedSecurityGroupNames = $scope.environment.securityGroups.filter(function(value) { + return $scope.selectionsForSubnet[$scope.asgOptions.subnetPurpose].securityGroups.indexOf(value.id) !== -1; + }).map(function(value) { return value.name }) + } + }); + $scope.$watch("suspendAZRebalance", function() { if ($scope.asgOptions) { toggleSuspendedProcess("AZRebalance", $scope.suspendAZRebalance); @@ -59,8 +289,21 @@ angular.module("asgardApp") $scope.hideAdvancedItems = !$scope.hideAdvancedItems }; + $scope.toggleJsonSteps = function() { + $scope.hideJsonSteps = !$scope.hideJsonSteps + }; + + $scope.stepUrl = function(type) { + return '/views/deployment/' + type + 'Step.html'; + }; + $scope.startDeployment = function() { $scope.startingDeployment = true; + constructStepsFromDisplay(); + var subnetSpecificSelections = $scope.selectionsForSubnet[$scope.asgOptions.subnetPurpose]; + $scope.lcOptions.securityGroups = subnetSpecificSelections.securityGroups; + $scope.asgOptions.availabilityZones = subnetSpecificSelections.availabilityZones; + $scope.asgOptions.loadBalancerNames = subnetSpecificSelections.loadBalancerNames; var deployment = { deploymentOptions: $scope.deploymentOptions, asgOptions: $scope.asgOptions, @@ -76,4 +319,11 @@ angular.module("asgardApp") }); }; + $scope.retrieveAllAmis = function() { + $http.get("deployment/allAmis/").success(function(data) { + $scope.environment.images = data; + $scope.hideShowMoreAmisLink = true + }); + }; + }); diff --git a/app/views/deployment/CreateAsgStep.html b/app/views/deployment/CreateAsgStep.html new file mode 100644 index 00000000..51c15b70 --- /dev/null +++ b/app/views/deployment/CreateAsgStep.html @@ -0,0 +1,10 @@ +
+

Create ASG

+
+
+
+ Creates the new Auto Scaling Group and Launch Configuration with 0 instances. +
+
+
{{logMessage}}
+
diff --git a/app/views/deployment/DeleteAsgStep.html b/app/views/deployment/DeleteAsgStep.html new file mode 100644 index 00000000..864c48bd --- /dev/null +++ b/app/views/deployment/DeleteAsgStep.html @@ -0,0 +1,13 @@ +
+

Delete {{step.targetAsg}} ASG

+
+
+
+
+ Delete the + {{step.targetAsg}} + ASG. +
+
+
{{logMessage}}
+
diff --git a/app/views/deployment/DisableAsgStep.html b/app/views/deployment/DisableAsgStep.html new file mode 100644 index 00000000..9e270d8e --- /dev/null +++ b/app/views/deployment/DisableAsgStep.html @@ -0,0 +1,13 @@ +
+

Disable {{step.targetAsg}} ASG

+
+
+
+
+ This turns off traffic to the + + ASG. +
+
+
{{logMessage}}
+
diff --git a/app/views/deployment/EnableAsgStep.html b/app/views/deployment/EnableAsgStep.html new file mode 100644 index 00000000..a05a68a2 --- /dev/null +++ b/app/views/deployment/EnableAsgStep.html @@ -0,0 +1,13 @@ +
+

Enable {{step.targetAsg}} ASG

+
+
+
+
+ This turns on traffic to the + + ASG and splits traffic between enabled ASGs in the cluster. +
+
+
{{logMessage}}
+
diff --git a/app/views/deployment/JudgmentStep.html b/app/views/deployment/JudgmentStep.html new file mode 100644 index 00000000..eb45d3cc --- /dev/null +++ b/app/views/deployment/JudgmentStep.html @@ -0,0 +1,19 @@ +
+

Wait For Judgment

+
+
+
+
+ Deployment will be paused and notifications will be sent now and after + + minutes. +
+
+
{{logMessage}}
+
+
+
+ + +
+
diff --git a/app/views/deployment/ResizeStep.html b/app/views/deployment/ResizeStep.html new file mode 100644 index 00000000..a5eca39f --- /dev/null +++ b/app/views/deployment/ResizeStep.html @@ -0,0 +1,19 @@ +
+

Resize {{step.targetAsg}} ASG

+
+
+
+
+
+ Resize + + to + + healthy instance(s) within + + minutes or roll back. +
+
+
+
{{logMessage}}
+
diff --git a/app/views/deployment/WaitStep.html b/app/views/deployment/WaitStep.html new file mode 100644 index 00000000..88d64ec7 --- /dev/null +++ b/app/views/deployment/WaitStep.html @@ -0,0 +1,13 @@ +
+

Wait

+
+
+
+
+ Wait + + minutes. +
+
+
{{logMessage}}
+
diff --git a/app/views/deployment/detail.html b/app/views/deployment/detail.html index 20b72717..0d8267c6 100644 --- a/app/views/deployment/detail.html +++ b/app/views/deployment/detail.html @@ -3,59 +3,49 @@

Deployment

-
- - -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Return to
Cluster:
- {{deployment.clusterName}} -
Description:{{deployment.description}}
Workflow Execution Details: - Workflow Execution -
Region:{{deployment.regionCode}}
Status:{{deployment.status}}
Start Time:{{deployment.startTime | date: 'yyyy-MM-dd HH:mm:ss Z'}}
Duration:{{deployment.durationString}}
Owner:{{deployment.owner}}
Log: - -
+
+
+
+
Return to Cluster:
+ +
+
+
Region:
+
{{deployment.regionCode}}
+
+
+
Status:
+
{{deployment.status}}
+
+
+
Start Time:
+
{{deployment.startTime | date: 'yyyy-MM-dd HH:mm:ss Z'}}
+
+
+
Duration:
+
{{deployment.durationString}}
+
+
+
Owner:
+
{{deployment.owner}}
+
+
+
+
+
+
+
+
+
diff --git a/app/views/deployment/new.html b/app/views/deployment/new.html index e0f46ce5..9d0f31c6 100644 --- a/app/views/deployment/new.html +++ b/app/views/deployment/new.html @@ -16,14 +16,10 @@

Deploy Next ASG for Cluster '{{clusterName}}'

-
-

{{environment.nextGroupName}}

-
-
-
- Advanced Options -
+
+ Advanced Options
+

{{environment.nextGroupName}}

@@ -54,7 +50,7 @@

{{environment.nextGroupName}}

+  Show more AMIs
@@ -80,8 +76,10 @@

{{environment.nextGroupName}}

-
@@ -214,7 +212,8 @@

{{environment.nextGroupName}}

- @@ -247,8 +246,9 @@

{{environment.nextGroupName}}

- @@ -273,86 +273,29 @@

Deployment Configuration

-

Steps

+
+ JSON +
+

Steps

-
-
-
-

Create ASG

-
- Wait - - minutes before creation. -
-
-
-

Resize ASG

-
- Resize to - - healthy instance(s) within - - minutes or roll back. -
-
-
- - -
-
-

Wait For Judgment

-
- Deployment will be paused and notifications will be sent now and after - - minutes. -
-
-
-

Resize ASG

-
- Resize to - - healthy instance(s) within - - minutes or roll back. -
-
-
-

Wait For Judgment

-
- Deployment will be paused and notifications will be sent now and after - - minutes. -
-
-
- - -
-
-

Disable Previous ASG

-
- This turns off traffic to the previous ASG and sends full traffic to the new ASG. -
-
-
-

Wait For Judgment

-
- Deployment will be paused and notifications will be sent now and after - - minutes. -
-
-
- - +
+
+
{{jsonStepsParseError}}
+
+
+ +
+
+
+
-
-

Clean Up

-
- Delete the previous ASG. -
+
+
+
diff --git a/application.properties b/application.properties index c7178e9c..f35ed7b2 100644 --- a/application.properties +++ b/application.properties @@ -1,6 +1,6 @@ #Grails Metadata file -#Wed Aug 07 12:22:31 PST 2013 +#Wed Jun 05 11:45:00 PST 2014 app.grails.version=2.2.4 app.name=asgard app.servlet.version=2.4 -app.version=1.4.2 +app.version=1.5 diff --git a/grails-app/conf/Config.groovy b/grails-app/conf/Config.groovy index 700fd13b..99b5f69c 100644 --- a/grails-app/conf/Config.groovy +++ b/grails-app/conf/Config.groovy @@ -73,12 +73,14 @@ log4j = { error 'com.amazonaws.services.simpleworkflow.flow.worker.DecisionTaskPoller' environments { - development { + def devConfig = { console name: 'stdout', layout: pattern(conversionPattern: '[%d{ISO8601}] %c{4} %m%n') root { info 'stdout' } } + development devConfig + mcetestLocalDev devConfig } } diff --git a/grails-app/conf/spring/resources.groovy b/grails-app/conf/spring/resources.groovy index d5f7771b..42b807c4 100644 --- a/grails-app/conf/spring/resources.groovy +++ b/grails-app/conf/spring/resources.groovy @@ -19,9 +19,9 @@ import com.google.common.base.CaseFormat import com.netflix.asgard.CachedMapBuilder import com.netflix.asgard.Caches import com.netflix.asgard.CsiAsgAnalyzer -import com.netflix.asgard.DefaultAdvancedUserDataProvider -import com.netflix.asgard.DefaultUserDataProvider -import com.netflix.asgard.NetflixAdvancedUserDataProvider +import com.netflix.asgard.userdata.DefaultAdvancedUserDataProvider +import com.netflix.asgard.userdata.DefaultUserDataProvider +import com.netflix.asgard.userdata.NetflixAdvancedUserDataProvider import com.netflix.asgard.NoOpAsgAnalyzer import com.netflix.asgard.Region import com.netflix.asgard.ServiceInitLoggingBeanPostProcessor @@ -34,6 +34,7 @@ import com.netflix.asgard.deployment.DeploymentActivitiesImpl import com.netflix.asgard.eureka.EurekaClientHolder import com.netflix.asgard.model.CsiScheduledAnalysisFactory import com.netflix.asgard.server.DeprecatedServerNames +import com.netflix.asgard.userdata.PropertiesUserDataProvider import groovy.io.FileType beans = { @@ -56,6 +57,10 @@ beans = { objectMapper(ObjectMapper) + propertiesUserDataProvider(PropertiesUserDataProvider) { bean -> + bean.lazyInit = true + } + defaultUserDataProvider(DefaultUserDataProvider) { bean -> bean.lazyInit = true } diff --git a/grails-app/controllers/com/netflix/asgard/AutoScalingController.groovy b/grails-app/controllers/com/netflix/asgard/AutoScalingController.groovy index 051cebca..6204e155 100644 --- a/grails-app/controllers/com/netflix/asgard/AutoScalingController.groovy +++ b/grails-app/controllers/com/netflix/asgard/AutoScalingController.groovy @@ -39,6 +39,7 @@ import com.netflix.asgard.model.GroupedInstance import com.netflix.asgard.model.InstancePriceType import com.netflix.asgard.model.SubnetTarget import com.netflix.asgard.model.Subnets +import com.netflix.frigga.Names import com.netflix.grails.contextParam.ContextParam import grails.converters.JSON import grails.converters.XML @@ -488,13 +489,13 @@ class AutoScalingController { } def generateName() { - withFormat { + request.withFormat { json { if (params.appName) { try { String groupName = Relationships.buildGroupName(params, true) - List envVars = Relationships.labeledEnvironmentVariables(groupName, - configService.userDataVarPrefix) + List envVars = Relationships.labeledEnvVarsMap(Names.parseName(groupName), + configService.userDataVarPrefix).collect { k, v -> "${k}=${v}" } Map result = [groupName: groupName, envVars: envVars] render(result as JSON) } catch (Exception e) { diff --git a/grails-app/controllers/com/netflix/asgard/DeploymentController.groovy b/grails-app/controllers/com/netflix/asgard/DeploymentController.groovy index e655ca1e..52e6dbf5 100644 --- a/grails-app/controllers/com/netflix/asgard/DeploymentController.groovy +++ b/grails-app/controllers/com/netflix/asgard/DeploymentController.groovy @@ -77,11 +77,7 @@ class DeploymentController { if (!deployment) { Requests.renderNotFound('Deployment', id, this) } else { - withFormat { - html { return [ deployment : deployment ] } - xml { new XML(deployment).render(response) } - json { new JSON(deployment).render(response) } - } + render objectMapper.writer().writeValueAsString(deployment) } } @@ -147,6 +143,7 @@ class DeploymentController { asgOptions.with { autoScalingGroupName = null launchConfigurationName = null + subnetPurpose = subnetPurpose ?: "" } LaunchConfiguration lc = awsAutoScalingService.getLaunchConfiguration(userContext, @@ -159,6 +156,10 @@ class DeploymentController { instanceMonitoringIsEnabled = instanceMonitoringIsEnabled != null ? instanceMonitoringIsEnabled : configService.enableInstanceMonitoring blockDeviceMappings = null // SWF can not handle serializing this, and Asgard builds them per instance type. + securityGroups = lcOptions.securityGroups.collect { + // all security groups should be ids rather than names + awsEc2Service.getSecurityGroup(userContext, it) + }.sort { it.groupName }.collect { it.groupId } } Map attributes = [ @@ -167,7 +168,7 @@ class DeploymentController { ] DeploymentTemplate deploymentTemplate = DeploymentTemplate.of(deploymentTemplateName) if (deploymentTemplate) { - DeploymentWorkflowOptions deploymentOptions = deploymentTemplate.deployment + DeploymentWorkflowOptions deploymentOptions = deploymentTemplate.getDeployment(asgOptions.desiredCapacity) String groupName = lastGroup.autoScalingGroupName String appName = Relationships.appNameFromGroupName(groupName) String email = applicationService.getEmailFromApp(userContext, appName) @@ -214,8 +215,7 @@ class DeploymentController { price: it.monthlyLinuxOnDemandPrice ? it.monthlyLinuxOnDemandPrice + '/mo' : ''] }, securityGroups: effectiveSecurityGroups.collect { - [id: it.groupId, name: it.groupName, selection: it.vpcId ? it.groupId : it.groupName, - vpcId: it.vpcId ?: ''] + [id: it.groupId, name: it.groupName, vpcId: it.vpcId ?: ''] }, images: images.sort { it.imageLocation.toLowerCase() }.collect { [id: it.imageId, imageLocation: it.imageLocation] @@ -227,6 +227,18 @@ class DeploymentController { render objectMapper.writer().writeValueAsString(attributes) } + /** + * @return all AMIs for account + */ + def allAmis() { + UserContext userContext = UserContext.of(request) + Collection images = awsEc2Service.getAccountImages(userContext) + List imageDetails = images.sort { it.imageLocation.toLowerCase() }.collect { + [id: it.imageId, imageLocation: it.imageLocation] + } + render objectMapper.writer().writeValueAsString(imageDetails) + } + /** * Start a deployment. * diff --git a/grails-app/controllers/com/netflix/asgard/HostedZoneController.groovy b/grails-app/controllers/com/netflix/asgard/HostedZoneController.groovy index b2f86ff0..d9bb1ac2 100644 --- a/grails-app/controllers/com/netflix/asgard/HostedZoneController.groovy +++ b/grails-app/controllers/com/netflix/asgard/HostedZoneController.groovy @@ -184,9 +184,21 @@ class HostedZoneController { } private resourceRecordSetFromCommandObject(ResourceRecordSetCommand cmd) { - String hostedZoneId = cmd.hostedZoneId - List resourceRecordStrings = Requests.ensureList(cmd.resourceRecords?.split('\n')).collect { it.trim() } - String aliasTarget = cmd.aliasTarget + List resourceRecords + String resourceRecordsString = cmd.resourceRecords?.trim() + if (resourceRecordsString) { + resourceRecords = Requests.ensureList(resourceRecordsString.split('\n')).collect { + new ResourceRecord(it.trim()) + } + } + AliasTarget aliasTarget = null + if (cmd.aliasTarget) { + String aliasTargetHostedZoneId = params.aliasTargetHostedZoneId ?: cmd.hostedZoneId + aliasTarget = new AliasTarget(aliasTargetHostedZoneId, cmd.aliasTarget) + if (params.evaluateTargetHealth) { + aliasTarget.setEvaluateTargetHealth(params.evaluateTargetHealth == "Yes") + } + } new ResourceRecordSet( name: cmd.resourceRecordSetName, type: cmd.type, @@ -195,8 +207,8 @@ class HostedZoneController { region: cmd.resourceRecordSetRegion ?: null, failover: cmd.failover ?: null, tTL: cmd.ttl ?: null, - resourceRecords: resourceRecordStrings.collect { new ResourceRecord(it) } ?: null, - aliasTarget: aliasTarget ? new AliasTarget(hostedZoneId, aliasTarget) : null, + resourceRecords: resourceRecords, + aliasTarget: aliasTarget, healthCheckId: cmd.healthCheckId ?: null ) } @@ -224,6 +236,8 @@ class ResourceRecordSetCommand { Long ttl String resourceRecords String aliasTarget + String aliasTargetHostedZoneId + String evaluateTargetHealth String healthCheckId String comment } diff --git a/grails-app/controllers/com/netflix/asgard/ImageController.groovy b/grails-app/controllers/com/netflix/asgard/ImageController.groovy index 598e371f..286ab70f 100644 --- a/grails-app/controllers/com/netflix/asgard/ImageController.groovy +++ b/grails-app/controllers/com/netflix/asgard/ImageController.groovy @@ -422,12 +422,14 @@ class ImageDeleteCommand { AwsAutoScalingService awsAutoScalingService AwsEc2Service awsEc2Service RestClientService restClientService + ConfigService configService def grailsApplication @SuppressWarnings("GroovyAssignabilityCheck") static constraints = { id(nullable: false, blank: false, size: 12..12, validator: { String value, ImageDeleteCommand command -> UserContext userContext = UserContext.of(Requests.request) + List promotionTargetServerRootUrls = configService.promotionTargetServerRootUrls String promotionTargetServer = command.grailsApplication.config.promote.targetServer String env = command.grailsApplication.config.cloud.accountName @@ -439,18 +441,20 @@ class ImageDeleteCommand { if (instances || launchConfigurations) { String reason = constructReason(instances, launchConfigurations) return ['image.imageId.used', value, env, reason] - } else if (promotionTargetServer) { + } else if (promotionTargetServerRootUrls) { // If the AMI is not in use on master server, check promoted data. - String url = "${promotionTargetServer}/${userContext.region}/image/references/${value}" - JSONElement json = command.restClientService.getAsJson(url) - if (json == null) { - return ['image.imageId.prodInaccessible', value, url] - } - Collection remoteInstances = json.instances - Collection remoteLaunchConfigurations = json.launchConfigurations - if (remoteInstances || remoteLaunchConfigurations) { - String reason = constructReason(remoteInstances, remoteLaunchConfigurations) - return ['image.imageId.used', value, 'prod', reason] + for (String remoteServer in promotionTargetServerRootUrls) { + String url = "${remoteServer}/${userContext.region}/image/references/${value}" + JSONElement json = command.restClientService.getAsJson(url) + if (json == null) { + return ['image.imageId.remoteInaccessible', value, url] + } + Collection remoteInstances = json.instances + Collection remoteLaunchConfigurations = json.launchConfigurations + if (remoteInstances || remoteLaunchConfigurations) { + String reason = constructReason(remoteInstances, remoteLaunchConfigurations) + return ['image.imageId.used', value, remoteServer, reason] + } } } null diff --git a/grails-app/controllers/com/netflix/asgard/SecurityController.groovy b/grails-app/controllers/com/netflix/asgard/SecurityController.groovy index a078b986..f6216359 100644 --- a/grails-app/controllers/com/netflix/asgard/SecurityController.groovy +++ b/grails-app/controllers/com/netflix/asgard/SecurityController.groovy @@ -23,6 +23,7 @@ import com.amazonaws.services.ec2.model.SecurityGroup import com.amazonaws.services.ec2.model.UserIdGroupPair import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription import com.amazonaws.services.elasticloadbalancing.model.SourceSecurityGroup +import com.netflix.asgard.model.Subnets import com.netflix.grails.contextParam.ContextParam import grails.converters.JSON import grails.converters.XML @@ -115,9 +116,11 @@ class SecurityController { } else { applications = applicationService.getRegisteredApplications(userContext) } + Subnets subnets = awsEc2Service.getSubnets(userContext) + Collection vpcIds = subnets.mapPurposeToVpcId().values() as Set [ applications: applications, - vpcIds: awsEc2Service.getVpcs(userContext)*.vpcId, + vpcIds: vpcIds, selectedVpcIds: params.selectedVpcIds, enableVpc: params.enableVpc, name: name, diff --git a/grails-app/i18n/messages.properties b/grails-app/i18n/messages.properties index ece1a386..4d9e5f3d 100644 --- a/grails-app/i18n/messages.properties +++ b/grails-app/i18n/messages.properties @@ -46,7 +46,7 @@ typeMismatch.java.math.BigDecimal=Property {0} must be a valid number typeMismatch.java.math.BigInteger=Property {0} must be a valid number image.imageId.used=Image {3} cannot be deleted because it is in use in {4} environment by {5} -image.imageId.prodInaccessible=Error: Image {3} cannot be deleted because AMI prod usage check failed on URL {4} +image.imageId.remoteInaccessible=Error: Image {3} cannot be deleted because AMI remote usage check failed on URL {4} application.name.illegalChar=Name can only contain letters, numbers, underscores, and dots application.name.nonexistent=The specified application does not exist in the application registry diff --git a/grails-app/services/com/netflix/asgard/AwsLoadBalancerService.groovy b/grails-app/services/com/netflix/asgard/AwsLoadBalancerService.groovy index c299e81e..bc424acf 100644 --- a/grails-app/services/com/netflix/asgard/AwsLoadBalancerService.groovy +++ b/grails-app/services/com/netflix/asgard/AwsLoadBalancerService.groovy @@ -28,6 +28,7 @@ import com.amazonaws.services.elasticloadbalancing.model.DeleteLoadBalancerReque import com.amazonaws.services.elasticloadbalancing.model.DeregisterInstancesFromLoadBalancerRequest import com.amazonaws.services.elasticloadbalancing.model.DescribeInstanceHealthRequest import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersRequest +import com.amazonaws.services.elasticloadbalancing.model.DescribeLoadBalancersResult import com.amazonaws.services.elasticloadbalancing.model.DetachLoadBalancerFromSubnetsRequest import com.amazonaws.services.elasticloadbalancing.model.DisableAvailabilityZonesForLoadBalancerRequest import com.amazonaws.services.elasticloadbalancing.model.EnableAvailabilityZonesForLoadBalancerRequest @@ -43,6 +44,7 @@ import com.netflix.asgard.cache.CacheInitializer import com.netflix.asgard.model.InstanceStateData import com.netflix.asgard.model.SubnetTarget import com.netflix.asgard.model.Subnets +import com.netflix.asgard.retriever.AwsResultsRetriever import org.springframework.beans.factory.InitializingBean class AwsLoadBalancerService implements CacheInitializer, InitializingBean { @@ -85,8 +87,28 @@ class AwsLoadBalancerService implements CacheInitializer, InitializingBean { // Load Balancers + final AwsResultsRetriever loadBalancerRetriever = new AwsResultsRetriever() { + @Override + protected DescribeLoadBalancersResult makeRequest(Region region, DescribeLoadBalancersRequest request) { + awsClient.by(region).describeLoadBalancers(request) + } + @Override + protected List accessResult(DescribeLoadBalancersResult result) { + result.loadBalancerDescriptions + } + @Override + protected void setNextToken(DescribeLoadBalancersRequest request, String nextToken) { + request.withMarker(nextToken) + } + @Override + protected String getNextToken(DescribeLoadBalancersResult result) { + result.nextMarker + } + } + private List retrieveLoadBalancers(Region region) { - awsClient.by(region).describeLoadBalancers(new DescribeLoadBalancersRequest()).getLoadBalancerDescriptions() + loadBalancerRetriever.retrieve(region, new DescribeLoadBalancersRequest()) } Collection getLoadBalancers(UserContext userContext) { diff --git a/grails-app/services/com/netflix/asgard/ConfigService.groovy b/grails-app/services/com/netflix/asgard/ConfigService.groovy index ef680f8d..a0c76e9d 100644 --- a/grails-app/services/com/netflix/asgard/ConfigService.groovy +++ b/grails-app/services/com/netflix/asgard/ConfigService.groovy @@ -619,6 +619,23 @@ class ConfigService { grailsApplication.config.cloud?.userDataVarPrefix ?: 'CLOUD_' } + /** + * Only used by the example {@link com.netflix.asgard.userdata.NetflixAdvancedUserDataProvider}. + * + * @return true if {@link com.netflix.asgard.userdata.NetflixAdvancedUserDataProvider} should use property file + * formatted user data for deploying Windows images + */ + boolean getUsePropertyFileUserDataForWindowsImages() { + grailsApplication.config.cloud?.usePropertyFileUserDataForWindowsImages ?: false + } + + /** + * @return the list of server root URLs for copying data such as image tags from a source account to target accounts + */ + List getPromotionTargetServerRootUrls() { + grailsApplication.config.promote?.targetServerRootUrls ?: [] + } + /** * @return the base server URL for generating links to the current Asgard instance in outgoing emails */ diff --git a/grails-app/services/com/netflix/asgard/ImageService.groovy b/grails-app/services/com/netflix/asgard/ImageService.groovy index 03bc5da9..942a927f 100644 --- a/grails-app/services/com/netflix/asgard/ImageService.groovy +++ b/grails-app/services/com/netflix/asgard/ImageService.groovy @@ -187,11 +187,13 @@ class ImageService implements BackgroundProcessInitializer { private Set getRemoteImageIdsInUse(Region region, Task task) { Set remoteImageIdsInUse = Sets.newHashSet() - String remoteServer = grailsApplication.config.promote.targetServer - String url = "${remoteServer}/${region.code}/image/used.json" - task.tryUntilSuccessful { - JSONArray jsonListOfImageIds = restClientService.getAsJson(url) as JSONArray - remoteImageIdsInUse.addAll(jsonListOfImageIds) + List promotionTargetServerRootUrls = configService.promotionTargetServerRootUrls + for (String remoteServer in promotionTargetServerRootUrls) { + String url = "${remoteServer}/${region.code}/image/used.json" + task.tryUntilSuccessful { + JSONArray jsonListOfImageIds = restClientService.getAsJson(url) as JSONArray + remoteImageIdsInUse.addAll(jsonListOfImageIds) + } } remoteImageIdsInUse } @@ -216,10 +218,10 @@ class ImageService implements BackgroundProcessInitializer { // Config values that might not be defined mustn't be assumed to be of type String. If you change def to // String here then the falsy value becomes "{}" which is a truthy value and breaks the if expression. Boolean imageTagPromotionEnabled = grailsApplication.config.promote.imageTags ?: false - String promotionTargetServer = grailsApplication.config.promote.targetServer ?: '' + List promotionTargetServers = configService.promotionTargetServerRootUrls String canonicalServerForBakeEnvironment = grailsApplication.config.promote.canonicalServerForBakeEnvironment ?: '' - if (!imageTagPromotionEnabled || !promotionTargetServer || !canonicalServerForBakeEnvironment) { + if (!imageTagPromotionEnabled || !promotionTargetServers || !canonicalServerForBakeEnvironment) { log.debug 'Environment not configured for tag replication.' return false } @@ -248,14 +250,21 @@ class ImageService implements BackgroundProcessInitializer { return } - log.info 'ImageTagReplicator starting' + List targetRootUrls = configService.promotionTargetServerRootUrls + log.info "ImageTagReplicator starting for promotion target servers ${targetRootUrls}" - // Try to connect to promotion target server. Abort if server is unavailable. - String promotionTargetServer = grailsApplication.config.promote.targetServer - checkServerHealth(promotionTargetServer) + for (String promotionTargetServer in targetRootUrls) { + try { + // Try to connect to promotion target server. Abort if server is unavailable. + checkServerHealth(promotionTargetServer) - Region.values().each { replicateTagsForRegion(promotionTargetServer, it) } - log.info 'ImageTagReplicator done' + Region.values().each { replicateTagsForRegion(promotionTargetServer, it) } + log.info "Image tag replication done for ${promotionTargetServer}" + } catch (Exception e) { + log.error "ImageTagReplicator failed for ${promotionTargetServer}", e + } + } + log.info "ImageTagReplicator done for all promotion target servers ${targetRootUrls}" } catch (Exception e) { log.error "ImageTagReplicator failed: ${e}" } @@ -277,7 +286,7 @@ class ImageService implements BackgroundProcessInitializer { Multimap deletableTagNamesToImageIds = ArrayListMultimap.create() Multimap addableTagsToImageIds = ArrayListMultimap.create() - // Look through all the prod images. For each one, find its counterpart in the test images. + // Look through all the remote images. For each one, find its counterpart in the local images. // Correct any mismatches. prodImages.each { Image prodImage -> Image testImage = testImages.find { it.imageId == prodImage.imageId } @@ -298,11 +307,11 @@ class ImageService implements BackgroundProcessInitializer { } } deletableTagNamesToImageIds.keySet().each { tagKey -> - deleteRemoteTags(region, deletableTagNamesToImageIds.get(tagKey), tagKey) + deleteRemoteTags(promotionTargetServer, region, deletableTagNamesToImageIds.get(tagKey), tagKey) Time.sleepCancellably(grailsApplication.config.cloud.throttleMillis ?: 250) } addableTagsToImageIds.keySet().each { tag -> - addRemoteTags(region, addableTagsToImageIds.get(tag), tag) + addRemoteTags(promotionTargetServer, region, addableTagsToImageIds.get(tag), tag) Time.sleepCancellably(grailsApplication.config.cloud.throttleMillis ?: 250) } log.info "ImageTagReplicator finished in region ${region}" @@ -336,18 +345,16 @@ class ImageService implements BackgroundProcessInitializer { tagMap } - private deleteRemoteTags(Region region, Collection remoteImageIds, String name) { - log.info "Deleting prod image tags ${name} for ${remoteImageIds} in ${region}" - String promotionTargetServer = grailsApplication.config.promote.targetServer - String url = "${promotionTargetServer}/${region.code}/image/removeTags" + private deleteRemoteTags(String remoteServer, Region region, Collection remoteImageIds, String name) { + log.info "Deleting prod image tags ${name} for ${remoteImageIds} in ${remoteServer} ${region}" + String url = "${remoteServer}/${region.code}/image/removeTags" Map query = ['imageIds': remoteImageIds.join(','), 'name': name] postForReplication(url, query) } - private addRemoteTags(Region region, Collection remoteImageIds, Tag tag) { - log.info "Adding tag ${tag.key}=${tag.value} to remote images ${remoteImageIds} in ${region}" - String promotionTargetServer = grailsApplication.config.promote.targetServer - String url = "${promotionTargetServer}/${region.code}/image/addTags" + private addRemoteTags(String remoteServer, Region region, Collection remoteImageIds, Tag tag) { + log.info "Adding tag ${tag.key}=${tag.value} to remote images ${remoteImageIds} in ${remoteServer} ${region}" + String url = "${remoteServer}/${region.code}/image/addTags" Map query = ['imageIds': remoteImageIds.join(','), 'name': tag.key, 'value': tag.value] postForReplication(url, query) } @@ -357,6 +364,7 @@ class ImageService implements BackgroundProcessInitializer { log.debug "Calling ${url} with params ${query} for tag replication." int responseCode = restClientService.post(url, query) if (responseCode >= 300) { + String msg = "Call to ${url} with params ${query} returned status code ${responseCode}" throw new ServerNotActiveException(msg) } diff --git a/grails-app/services/com/netflix/asgard/ServerService.groovy b/grails-app/services/com/netflix/asgard/ServerService.groovy index 6cc5132f..759cebb7 100644 --- a/grails-app/services/com/netflix/asgard/ServerService.groovy +++ b/grails-app/services/com/netflix/asgard/ServerService.groovy @@ -21,6 +21,7 @@ import com.netflix.asgard.server.Environment import com.netflix.asgard.server.Server import com.netflix.asgard.server.ServerState import com.netflix.asgard.server.SwitchAttemptResult +import com.netflix.asgard.userdata.UserDataPropertyKeys import org.joda.time.DateTime import org.joda.time.Hours import org.joda.time.Minutes @@ -340,7 +341,7 @@ class ServerService implements InitializingBean { return otherServers } - String regionName = environmentService.getEnvironmentVariable(DefaultUserDataProvider.REGION_ENV_KEY) + String regionName = environmentService.getEnvironmentVariable(UserDataPropertyKeys.EC2_REGION) String prefix = configService.userDataVarPrefix String clusterName = environmentService.getEnvironmentVariable("${prefix}CLUSTER") diff --git a/grails-app/services/com/netflix/asgard/WitherService.groovy b/grails-app/services/com/netflix/asgard/WitherService.groovy index 064d0103..7a8eeee0 100644 --- a/grails-app/services/com/netflix/asgard/WitherService.groovy +++ b/grails-app/services/com/netflix/asgard/WitherService.groovy @@ -17,6 +17,7 @@ package com.netflix.asgard import com.amazonaws.services.autoscaling.model.AutoScalingGroup import com.netflix.asgard.push.AsgDeletionMode +import com.netflix.asgard.userdata.UserDataPropertyKeys /** * Handles the work involved in making an Asgard instance "wither", that is, waiting for all in-memory tasks to end and @@ -64,7 +65,7 @@ class WitherService { String prefix = configService.userDataVarPrefix String asgName = environmentService.getEnvironmentVariable("${prefix}AUTO_SCALE_GROUP") - String regionCode = environmentService.getEnvironmentVariable(DefaultUserDataProvider.REGION_ENV_KEY) + String regionCode = environmentService.getEnvironmentVariable(UserDataPropertyKeys.EC2_REGION) Region region = Region.withCode(regionCode) if (!asgName || !region) { throw new IllegalStateException("Cannot wither in ASG '${asgName}' in region '${regionCode}'") diff --git a/grails-app/views/hostedZone/prepareResourceRecordSet.gsp b/grails-app/views/hostedZone/prepareResourceRecordSet.gsp index 50c80ffe..f134c9ea 100644 --- a/grails-app/views/hostedZone/prepareResourceRecordSet.gsp +++ b/grails-app/views/hostedZone/prepareResourceRecordSet.gsp @@ -120,6 +120,29 @@ + + + + + + + + + + + + + +
+ + +
+
+ + +
+ + diff --git a/grails-app/views/hostedZone/show.gsp b/grails-app/views/hostedZone/show.gsp index 0f14782d..7fc9a114 100644 --- a/grails-app/views/hostedZone/show.gsp +++ b/grails-app/views/hostedZone/show.gsp @@ -75,6 +75,7 @@ TTL Region Alias
Target + Evaluate
Target
Health Set
ID Weight Failover @@ -96,6 +97,7 @@ ${resourceRecordSet.TTL} ${resourceRecordSet.region} ${resourceRecordSet.aliasTarget?.dNSName} + ${resourceRecordSet.aliasTarget ? (resourceRecordSet.aliasTarget.evaluateTargetHealth ? "Yes" : "No") : ''} ${resourceRecordSet.setIdentifier} ${resourceRecordSet.weight} ${resourceRecordSet.failover} @@ -116,6 +118,8 @@ + + diff --git a/src/groovy/com/netflix/asgard/DefaultUserDataProvider.groovy b/src/groovy/com/netflix/asgard/DefaultUserDataProvider.groovy deleted file mode 100644 index df900d42..00000000 --- a/src/groovy/com/netflix/asgard/DefaultUserDataProvider.groovy +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2012 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.asgard - -import com.netflix.asgard.plugin.UserDataProvider -import com.netflix.frigga.Names -import javax.xml.bind.DatatypeConverter -import org.springframework.beans.factory.annotation.Autowired - -class DefaultUserDataProvider implements UserDataProvider { - - static final String REGION_ENV_KEY = 'EC2_REGION' - - @Autowired - ConfigService configService - - @Autowired - ApplicationService applicationService - - String buildUserDataForVariables(UserContext userContext, String appName, String autoScalingGroupName, - String launchConfigName) { - Names names = Relationships.dissectCompoundName(autoScalingGroupName) - String result = exportVar('ENVIRONMENT', configService.accountName) + - exportVar('MONITOR_BUCKET', applicationService.getMonitorBucket(userContext, appName, names.cluster)) + - exportVar('APP', appName) + - exportVar('APP_GROUP', applicationService.getRegisteredApplication(userContext, appName)?.group) + - exportVar('STACK', names.stack) + - exportVar('CLUSTER', names.cluster) + - exportVar('AUTO_SCALE_GROUP', autoScalingGroupName) + - exportVar('LAUNCH_CONFIG', launchConfigName) + - exportVar(REGION_ENV_KEY, userContext.region.code, false) - List additionalEnvVars = Relationships.labeledEnvironmentVariables(names, - configService.userDataVarPrefix) - result += additionalEnvVars ? additionalEnvVars.join('\n') : '' - DatatypeConverter.printBase64Binary(result.bytes) - } - - private String exportVar(String name, String val, boolean includePrefix = true) { - "export ${includePrefix ? configService.userDataVarPrefix : ''}${name}=${val ?: ''}\n" - } -} diff --git a/src/groovy/com/netflix/asgard/Relationships.groovy b/src/groovy/com/netflix/asgard/Relationships.groovy index 6e315c2a..edb9e3a4 100644 --- a/src/groovy/com/netflix/asgard/Relationships.groovy +++ b/src/groovy/com/netflix/asgard/Relationships.groovy @@ -150,7 +150,7 @@ class Relationships { * @return true if the detail is valid */ static Boolean checkDetail(String detail) { - NameValidation.checkDetail(detail) + NameValidation.checkNameWithHyphen(detail) } static String buildLaunchConfigurationName(String autoScalingGroupName) { @@ -235,16 +235,29 @@ class Relationships { labeledEnvironmentVariables(dissectCompoundName(asgName), prefix) } - static List labeledEnvironmentVariables(Names names, String prefix) { + /** + * Gets the environment variables as a map of name value pairs, gathered from the fields in a Names object derived + * from an Auto Scaling Group name. The fields are the ones that are meant as special labels for specific dimensions + * of difference between ASGs. + * + * @param names the container of special label fields + * @param prefix the namespace string that should be appended to each label key to create a full environment + * variable key + * @return a map of environment variable keys to values + */ + static Map labeledEnvVarsMap(Names names, String prefix) { Check.notNull(prefix, String, 'prefix') - List envVars = [] - - LABELED_ENV_VAR_FIELDS.each { String field -> + Map props = [:] + for (String field in LABELED_ENV_VAR_FIELDS) { if (names[field]) { - envVars << "export ${prefix}${Meta.splitCamelCase(field, "_").toUpperCase()}=${names[field]}" + props["${prefix}${Meta.splitCamelCase(field, "_").toUpperCase()}"] = names[field] } } - envVars + props + } + + static List labeledEnvironmentVariables(Names names, String prefix) { + labeledEnvVarsMap(names, prefix).collect { k, v -> "export ${k}=${v}" }.toList() } private static PARTS_FIELDS = (LABELED_ENV_VAR_FIELDS + ['stack', 'detail']).sort() diff --git a/src/groovy/com/netflix/asgard/cred/KeyManagementServiceAssumeRoleCredentialsProvider.groovy b/src/groovy/com/netflix/asgard/cred/KeyManagementServiceAssumeRoleCredentialsProvider.groovy index 7c9fab2e..60484f78 100644 --- a/src/groovy/com/netflix/asgard/cred/KeyManagementServiceAssumeRoleCredentialsProvider.groovy +++ b/src/groovy/com/netflix/asgard/cred/KeyManagementServiceAssumeRoleCredentialsProvider.groovy @@ -66,8 +66,6 @@ class KeyManagementServiceAssumeRoleCredentialsProvider extends AbstractCredenti * @param configService the means for looking up key management service endpoint and the location of the local * keystore file * @param restClientService the means to make a call over HTTPS to the key management service - * @param localFileReader used to read in a local file - * @param clock used to check the time to predict session expiration * @param keyManagementServiceCredentialsProvider the kms provider to use for fetching the initial credentials (can * be null to create a new one) */ @@ -100,7 +98,7 @@ class KeyManagementServiceAssumeRoleCredentialsProvider extends AbstractCredenti String roleArn = configService.assumeRoleArn String roleSessionName = configService.assumeRoleSessionName - if (roleArn && roleSessionName) { + if (credsForSts && roleArn && roleSessionName) { log.debug 'Fetching AssumeRole AWS credentials from STS based on credentials from key management service' AWSSecurityTokenService securityTokenService = new AWSSecurityTokenServiceClient(credsForSts) AssumeRoleRequest request = new AssumeRoleRequest(roleArn: roleArn, roleSessionName: roleSessionName) @@ -127,7 +125,7 @@ class KeyManagementServiceAssumeRoleCredentialsProvider extends AbstractCredenti return true } - long currentTimeMillis = keyManagementServiceCredentialsProvider.clock.currentTimeMillis() + long currentTimeMillis = keyManagementServiceCredentialsProvider.currentTimeMillis() long millisecondsRemaining = sessionCredentialsExpiration.time - currentTimeMillis millisecondsRemaining < (60 * 1000) } diff --git a/src/groovy/com/netflix/asgard/cred/KeyManagementServiceCredentialsProvider.groovy b/src/groovy/com/netflix/asgard/cred/KeyManagementServiceCredentialsProvider.groovy index 17e75b11..4b46d75e 100644 --- a/src/groovy/com/netflix/asgard/cred/KeyManagementServiceCredentialsProvider.groovy +++ b/src/groovy/com/netflix/asgard/cred/KeyManagementServiceCredentialsProvider.groovy @@ -75,7 +75,7 @@ class KeyManagementServiceCredentialsProvider extends AbstractCredentialsProvide private RestClientService restClientService /** - * Mechanism for checking time, overridable for each of unit testing. + * Mechanism for checking time, overridable for ease of unit testing. */ protected Clock clock @@ -166,4 +166,11 @@ class KeyManagementServiceCredentialsProvider extends AbstractCredentialsProvide long millisecondsRemaining = sessionCredentialsExpiration.time - clock.currentTimeMillis() millisecondsRemaining < THIRTY_MINUTES_IN_MILLISECONDS } + + /** + * @return the number of milliseconds since Jan 1, 1970 (can be mocked for unit testing) + */ + long currentTimeMillis() { + clock.currentTimeMillis() + } } diff --git a/src/groovy/com/netflix/asgard/deployment/AsgDeploymentNames.groovy b/src/groovy/com/netflix/asgard/deployment/AsgDeploymentNames.groovy index b57173e5..e06f32c4 100644 --- a/src/groovy/com/netflix/asgard/deployment/AsgDeploymentNames.groovy +++ b/src/groovy/com/netflix/asgard/deployment/AsgDeploymentNames.groovy @@ -16,7 +16,9 @@ package com.netflix.asgard.deployment import com.fasterxml.jackson.annotation.JsonCreator +import com.fasterxml.jackson.annotation.JsonIgnore import com.fasterxml.jackson.annotation.JsonProperty +import com.netflix.asgard.model.AsgRoleInCluster import groovy.transform.Immutable /** @@ -49,4 +51,16 @@ import groovy.transform.Immutable ) } + @JsonIgnore + /** Name of specific ASG based on the role that it has in the Cluster */ + String getAsgName(AsgRoleInCluster asgRole) { + if (asgRole == AsgRoleInCluster.Previous) { + return previousAsgName + } + if (asgRole == AsgRoleInCluster.Next) { + return nextAsgName + } + null + } + } diff --git a/src/groovy/com/netflix/asgard/deployment/DeploymentActivities.groovy b/src/groovy/com/netflix/asgard/deployment/DeploymentActivities.groovy index 433c0ffd..3aca91c1 100644 --- a/src/groovy/com/netflix/asgard/deployment/DeploymentActivities.groovy +++ b/src/groovy/com/netflix/asgard/deployment/DeploymentActivities.groovy @@ -105,16 +105,18 @@ interface DeploymentActivities { * * @param userContext who, where, why * @param asgName of the ASG to modify + * @return boolean representing the success of the activity */ - void enableAsg(UserContext userContext, String asgName) + Boolean enableAsg(UserContext userContext, String asgName) /** * Disables scaling behavior for the ASG and traffic to its instances. * * @param userContext who, where, why * @param asgName of the ASG to modify + * @return boolean representing the success of the activity */ - void disableAsg(UserContext userContext, String asgName) + Boolean disableAsg(UserContext userContext, String asgName) /** * Deletes an ASG. diff --git a/src/groovy/com/netflix/asgard/deployment/DeploymentActivitiesImpl.groovy b/src/groovy/com/netflix/asgard/deployment/DeploymentActivitiesImpl.groovy index 6e1c3ab1..9e216155 100644 --- a/src/groovy/com/netflix/asgard/deployment/DeploymentActivitiesImpl.groovy +++ b/src/groovy/com/netflix/asgard/deployment/DeploymentActivitiesImpl.groovy @@ -140,7 +140,7 @@ class DeploymentActivitiesImpl implements DeploymentActivities { } @Override - void enableAsg(UserContext userContext, String asgName) { + Boolean enableAsg(UserContext userContext, String asgName) { Task task = new Task() AutoScalingGroup group = awsAutoScalingService.getAutoScalingGroup(userContext, asgName) String appName = Relationships.appNameFromGroupName(asgName) @@ -158,10 +158,11 @@ class DeploymentActivitiesImpl implements DeploymentActivities { discoveryService.enableAppInstances(userContext, appName, instanceIds, task) } } + true } @Override - void disableAsg(UserContext userContext, String asgName) { + Boolean disableAsg(UserContext userContext, String asgName) { Task task = new Task() AutoScalingGroup group = awsAutoScalingService.getAutoScalingGroup(userContext, asgName) String appName = Relationships.appNameFromGroupName(asgName) @@ -181,6 +182,7 @@ class DeploymentActivitiesImpl implements DeploymentActivities { discoveryService.disableAppInstances(userContext, appName, instanceIds, task) } } + true } @Override diff --git a/src/groovy/com/netflix/asgard/deployment/DeploymentTemplate.groovy b/src/groovy/com/netflix/asgard/deployment/DeploymentTemplate.groovy index 90da00c4..71ba6b81 100644 --- a/src/groovy/com/netflix/asgard/deployment/DeploymentTemplate.groovy +++ b/src/groovy/com/netflix/asgard/deployment/DeploymentTemplate.groovy @@ -15,45 +15,42 @@ */ package com.netflix.asgard.deployment +import com.netflix.asgard.deployment.steps.CreateAsgStep +import com.netflix.asgard.deployment.steps.DeleteAsgStep +import com.netflix.asgard.deployment.steps.DisableAsgStep +import com.netflix.asgard.deployment.steps.ResizeStep +import com.netflix.asgard.model.AsgRoleInCluster + /** * Creates instances of Deployment by name for use as templates. */ enum DeploymentTemplate { - CreateJudgeAndCleanUp({ - newDeploymentWithDefaults - }), CreateOnly({ - newDeploymentWithDefaults.with{ - scaleUp = ProceedPreference.Yes - disablePreviousAsg = ProceedPreference.No - deletePreviousAsg = ProceedPreference.No - it - } - }) - - static private DeploymentWorkflowOptions getNewDeploymentWithDefaults() { + CreateAndCleanUpPreviousAsg({ int capacity -> new DeploymentWorkflowOptions( - delayDurationMinutes: 0, - doCanary: false, - canaryCapacity: 1, - canaryStartUpTimeoutMinutes: 30, - canaryJudgmentPeriodMinutes: 60, - scaleUp: ProceedPreference.Ask, - desiredCapacityStartUpTimeoutMinutes: 40, - desiredCapacityJudgmentPeriodMinutes: 120, - disablePreviousAsg: ProceedPreference.Ask, - fullTrafficJudgmentPeriodMinutes: 240, - deletePreviousAsg: ProceedPreference.Ask + steps: [ + new CreateAsgStep(), + new ResizeStep(targetAsg: AsgRoleInCluster.Next, capacity: capacity, startUpTimeoutMinutes: 40), + new DisableAsgStep(targetAsg: AsgRoleInCluster.Previous), + new DeleteAsgStep(targetAsg: AsgRoleInCluster.Previous) + ], ) - } + }), CreateOnly({ int capacity -> + new DeploymentWorkflowOptions( + steps: [ + new CreateAsgStep(), + new ResizeStep(targetAsg: AsgRoleInCluster.Next, capacity: capacity, startUpTimeoutMinutes: 40) + ], + ) + }) - Closure customizeDeployment + Closure constructDeployment - DeploymentTemplate(Closure customizeDeployment) { - this.customizeDeployment = customizeDeployment + DeploymentTemplate(Closure constructDeployment) { + this.constructDeployment = constructDeployment } - DeploymentWorkflowOptions getDeployment() { - customizeDeployment() + DeploymentWorkflowOptions getDeployment(int capacity = 0) { + constructDeployment(capacity) } static DeploymentTemplate of(String name) { diff --git a/src/groovy/com/netflix/asgard/deployment/DeploymentWorkflowImpl.groovy b/src/groovy/com/netflix/asgard/deployment/DeploymentWorkflowImpl.groovy index c180cc67..2e791c4c 100644 --- a/src/groovy/com/netflix/asgard/deployment/DeploymentWorkflowImpl.groovy +++ b/src/groovy/com/netflix/asgard/deployment/DeploymentWorkflowImpl.groovy @@ -17,6 +17,7 @@ package com.netflix.asgard.deployment import com.amazonaws.services.simpleworkflow.flow.ActivitySchedulingOptions import com.amazonaws.services.simpleworkflow.flow.core.Promise +import com.amazonaws.services.simpleworkflow.flow.core.Settable import com.amazonaws.services.simpleworkflow.flow.interceptors.ExponentialRetryPolicy import com.amazonaws.services.simpleworkflow.flow.interceptors.RetryPolicy import com.netflix.asgard.DiscoveryService @@ -24,7 +25,17 @@ import com.netflix.asgard.GlobalSwfWorkflowAttributes import com.netflix.asgard.Relationships import com.netflix.asgard.ServiceUnavailableException import com.netflix.asgard.UserContext +import com.netflix.asgard.deployment.steps.CreateAsgStep +import com.netflix.asgard.deployment.steps.DeleteAsgStep +import com.netflix.asgard.deployment.steps.DeploymentStep +import com.netflix.asgard.deployment.steps.DisableAsgStep +import com.netflix.asgard.deployment.steps.EnableAsgStep +import com.netflix.asgard.deployment.steps.JudgmentStep +import com.netflix.asgard.deployment.steps.ResizeStep +import com.netflix.asgard.deployment.steps.WaitStep import com.netflix.asgard.model.AutoScalingGroupBeanOptions +import com.netflix.asgard.model.AutoScalingProcessType +import com.netflix.asgard.model.Deployment import com.netflix.asgard.model.LaunchConfigurationBeanOptions import com.netflix.asgard.model.ScheduledAsgAnalysis import com.netflix.asgard.push.PushException @@ -50,23 +61,18 @@ class DeploymentWorkflowImpl implements DeploymentWorkflow, WorkflowOperator delay = timer(minutesToSeconds(deploymentOptions.delayDurationMinutes), 'delay') String clusterName = deploymentOptions.clusterName - Promise asgDeploymentNamesPromise = waitFor(delay) { - promiseFor(activities.getAsgDeploymentNames(userContext, clusterName)) - } + Promise asgDeploymentNamesPromise = promiseFor(activities.getAsgDeploymentNames(userContext, + clusterName)) Throwable rollbackCause = null List> runningAsgAnalyses = [] Promise deploymentComplete = waitFor(asgDeploymentNamesPromise) { AsgDeploymentNames asgDeploymentNames -> - status "Starting deployment for Cluster '${clusterName}'." doTry { AutoScalingGroupBeanOptions nextAsgTemplate = AutoScalingGroupBeanOptions.from(asgInputs) nextAsgTemplate.with { autoScalingGroupName = asgDeploymentNames.nextAsgName launchConfigurationName = asgDeploymentNames.nextLaunchConfigName + suspendedProcesses?.removeAll(AutoScalingProcessType.getDisableProcesses()) } Promise nextLcTemplateConstructed = promiseFor(activities. constructLaunchConfigForNextAsg(userContext, nextAsgTemplate, lcInputs)) @@ -105,101 +111,86 @@ class DeploymentWorkflowImpl implements DeploymentWorkflow, WorkflowOperator startDeployment(UserContext userContext, DeploymentWorkflowOptions deploymentOptions, AsgDeploymentNames asgDeploymentNames, AutoScalingGroupBeanOptions nextAsgTemplate, LaunchConfigurationBeanOptions nextLcTemplate, List> runningAsgAnalyses) { - status "Creating Launch Configuration '${asgDeploymentNames.nextLaunchConfigName}'." - Promise launchConfigCreated = promiseFor(activities.createLaunchConfigForNextAsg(userContext, - nextAsgTemplate, nextLcTemplate)) - Promise asgCreated = waitFor(launchConfigCreated) { - status "Creating Auto Scaling Group '${asgDeploymentNames.nextAsgName}' initially with 0 instances." - waitFor(activities.createNextAsgForClusterWithoutInstances(userContext, nextAsgTemplate)) { - status 'Copying Scaling Policies and Scheduled Actions.' - Promise scalingPolicyCount = promiseFor( - activities.copyScalingPolicies(userContext, asgDeploymentNames)) - Promise scheduledActionCount = promiseFor( - activities.copyScheduledActions(userContext, asgDeploymentNames)) - allPromises(scalingPolicyCount, scheduledActionCount) - } - } - - String clusterName = deploymentOptions.clusterName - DoTry startAsgAnalysis = startScheduledAsgAnalysis(asgCreated, clusterName, - deploymentOptions.notificationDestination) - runningAsgAnalyses << startAsgAnalysis - - Promise scaleToDesiredCapacity = waitFor(asgCreated) { - status "New ASG '${asgDeploymentNames.nextAsgName}' was successfully created." - if (!deploymentOptions.doCanary) { - return promiseFor(true) - } - String operationDescription = 'canary capacity' - int canaryCapacity = deploymentOptions.canaryCapacity - Promise scaleAsgPromise = scaleAsg(userContext, asgDeploymentNames.nextAsgName, - deploymentOptions.canaryStartUpTimeoutMinutes, canaryCapacity, canaryCapacity, - canaryCapacity, operationDescription) - waitFor(scaleAsgPromise) { - determineWhetherToProceedToNextStep(userContext, asgDeploymentNames.nextAsgName, - deploymentOptions.canaryJudgmentPeriodMinutes, deploymentOptions.notificationDestination, - deploymentOptions.scaleUp, operationDescription) - } - } - Promise disablePreviousAsg = waitFor(scaleToDesiredCapacity) { - if (!it) { return promiseFor(false) } - String operationDescription = 'full capacity' - Promise scaleAsgPromise = scaleAsg(userContext, asgDeploymentNames.nextAsgName, - deploymentOptions.desiredCapacityStartUpTimeoutMinutes, nextAsgTemplate.minSize, - nextAsgTemplate.desiredCapacity, nextAsgTemplate.maxSize, operationDescription) - waitFor(scaleAsgPromise) { - determineWhetherToProceedToNextStep(userContext, asgDeploymentNames.nextAsgName, - deploymentOptions.desiredCapacityJudgmentPeriodMinutes, - deploymentOptions.notificationDestination, deploymentOptions.disablePreviousAsg, - operationDescription) - } - } - - Promise isPreviousAsgDisabled = waitFor(disablePreviousAsg) { - stopScheduledAsgAnalysis(startAsgAnalysis) - runningAsgAnalyses.remove(startAsgAnalysis) - if (!it) { return promiseFor(false) } - if (deploymentOptions.disablePreviousAsg) { - String previousAsgName = asgDeploymentNames.previousAsgName - status "Disabling ASG '${previousAsgName}'." - activities.disableAsg(userContext, previousAsgName) - } - Promise.asPromise(true) - } - - waitFor(isPreviousAsgDisabled) { - String previousAsgName = asgDeploymentNames.previousAsgName - if (!it) { - status "ASG '${previousAsgName}' was not disabled. The new ASG is not taking full traffic." - } else { - long secondsToWaitAfterEurekaChange = DiscoveryService.SECONDS_TO_WAIT_AFTER_EUREKA_CHANGE - status "Waiting ${secondsToWaitAfterEurekaChange} seconds for clients to stop using instances." - Promise waitAfterEurekaChange = timer(secondsToWaitAfterEurekaChange, 'waitAfterEurekaChange') - waitFor(waitAfterEurekaChange) { - Promise deleteAsg = determineWhetherToProceedToNextStep(userContext, - asgDeploymentNames.nextAsgName, deploymentOptions.fullTrafficJudgmentPeriodMinutes, - deploymentOptions.notificationDestination, deploymentOptions.deletePreviousAsg, - 'full traffic') - waitFor(deleteAsg) { - if (it) { - activities.deleteAsg(userContext, previousAsgName) - status "Deleting ASG '${previousAsgName}'." + Map, Closure> stepsToOperations = [ + (WaitStep): { WaitStep step -> + status "Waiting ${unit(step.durationMinutes, 'minute')} before next step." + timer(minutesToSeconds(step.durationMinutes), step.description) + }, + (CreateAsgStep): { CreateAsgStep step -> + status "Creating Launch Configuration '${asgDeploymentNames.nextLaunchConfigName}'." + Promise launchConfigCreated = promiseFor( + activities.createLaunchConfigForNextAsg(userContext, nextAsgTemplate, nextLcTemplate)) + waitFor(launchConfigCreated) { + status "Creating Auto Scaling Group '${asgDeploymentNames.nextAsgName}' \ +initially with 0 instances." + waitFor(activities.createNextAsgForClusterWithoutInstances(userContext, nextAsgTemplate)) { + status 'Copying Scaling Policies and Scheduled Actions.' + Promise scalingPolicyCount = promiseFor( + activities.copyScalingPolicies(userContext, asgDeploymentNames)) + Promise scheduledActionCount = promiseFor( + activities.copyScheduledActions(userContext, asgDeploymentNames)) + allPromises(scalingPolicyCount, scheduledActionCount) } - Promise.Void() } + }, + (ResizeStep): { ResizeStep step -> + scaleAsg(userContext, asgDeploymentNames.nextAsgName, step.startUpTimeoutMinutes, + nextAsgTemplate.minSize, step.capacity, nextAsgTemplate.maxSize) + }, + (JudgmentStep): { JudgmentStep step -> + DoTry startAsgAnalysis = startScheduledAsgAnalysis( + deploymentOptions.clusterName, deploymentOptions.notificationDestination) + runningAsgAnalyses << startAsgAnalysis + Promise judgment = determineWhetherToProceedToNextStep(userContext, + asgDeploymentNames.nextAsgName, step.durationMinutes, + deploymentOptions.notificationDestination) + waitFor(judgment) { + stopScheduledAsgAnalysis(startAsgAnalysis) + runningAsgAnalyses.remove(startAsgAnalysis) + judgment + } + }, + (DisableAsgStep): { DisableAsgStep step -> + String asgName = asgDeploymentNames.getAsgName(step.targetAsg) + status "Disabling ASG '${asgName}'." + activities.disableAsg(userContext, asgName) + long secondsToWaitAfterEurekaChange = DiscoveryService.SECONDS_TO_WAIT_AFTER_EUREKA_CHANGE + status "Waiting ${secondsToWaitAfterEurekaChange} seconds for clients to stop using instances." + timer(secondsToWaitAfterEurekaChange, 'waitAfterEurekaChange') + }, + (EnableAsgStep): { EnableAsgStep step -> + String asgName = asgDeploymentNames.getAsgName(step.targetAsg) + status "Enabling ASG '${asgName}'." + activities.enableAsg(userContext, asgName) + Promise.Void() + }, + (DeleteAsgStep): { DeleteAsgStep step -> + String asgName = asgDeploymentNames.getAsgName(step.targetAsg) + activities.deleteAsg(userContext, asgName) + status "Deleting ASG '${asgName}'." } + ] + + List stepPromises = [] + (deploymentOptions.steps.size() + 1).times { stepPromises.add(new Settable()) } + stepPromises[0].chain(Promise.Void()) + (0..(deploymentOptions.steps.size() - 1)).each { stepIndex -> + waitFor(stepPromises[stepIndex]) { + status Deployment.constructStepJson(stepIndex) + DeploymentStep step = deploymentOptions.steps[stepIndex] + Settable stepPromise = stepPromises[stepIndex + 1] + stepPromise.chain(stepsToOperations[step.class](step)) + Promise.Void() } } + stepPromises[deploymentOptions.steps.size()] } - private DoTry startScheduledAsgAnalysis(Promise trigger, String clusterName, - String notificationDestination) { + private DoTry startScheduledAsgAnalysis(String clusterName, String notificationDestination) { doTry { - waitFor(trigger) { - retry(getRemoteServiceRetryPolicy()) { - promiseFor(activities.startAsgAnalysis(clusterName, notificationDestination)) - } + retry(getRemoteServiceRetryPolicy()) { + promiseFor(activities.startAsgAnalysis(clusterName, notificationDestination)) } } withCatch { Throwable t -> status "Error starting ASG analyzer: ${t}" @@ -221,10 +212,9 @@ class DeploymentWorkflowImpl implements DeploymentWorkflow, WorkflowOperator scaleAsg(UserContext userContext, String asgName, - int startupLimitMinutes, int min, int capacity, int max, String operationDescription) { - status "Scaling new ASG to ${operationDescription}. " + - "Waiting up to ${unit(startupLimitMinutes, 'minute')} for ${unit(capacity, 'instance')}." + private Promise scaleAsg(UserContext userContext, String asgName, int startupLimitMinutes, int min, + int capacity, int max) { + status "Waiting up to ${unit(startupLimitMinutes, 'minute')} while resizing to ${unit(capacity, 'instance')}." activities.resizeAsg(userContext, asgName, min, capacity, max) RetryPolicy retryPolicy = new ExponentialRetryPolicy(30L).withBackoffCoefficient(1). withExceptionsToRetry([PushException]) @@ -258,12 +248,9 @@ class DeploymentWorkflowImpl implements DeploymentWorkflow, WorkflowOperator determineWhetherToProceedToNextStep(UserContext userContext, String asgName, - int judgmentPeriodMinutes, String notificationDestination, ProceedPreference continueWithNextStep, - String operationDescription) { - if (continueWithNextStep == ProceedPreference.Yes) { return promiseFor(true) } - if (continueWithNextStep == ProceedPreference.No) { return promiseFor(false) } + int judgmentPeriodMinutes, String notificationDestination) { String judgmentMessage = "ASG will now be evaluated for up to ${unit(judgmentPeriodMinutes, 'minute')}" + - " during the ${operationDescription} judgment period." + " during the judgment period." status judgmentMessage Promise proceed = promiseFor(activities.askIfDeploymentShouldProceed(notificationDestination, asgName, judgmentMessage)) @@ -271,7 +258,7 @@ class DeploymentWorkflowImpl implements DeploymentWorkflow, WorkflowOperator judgmentTimeout = timer(minutesToSeconds(judgmentPeriodMinutes), 'judgmentTimeout') waitFor(judgmentTimeout) { String clusterName = Relationships.clusterFromGroupName(asgName) - String subject = "${operationDescription.capitalize()} judgment period for ASG '${asgName}' has ended." + String subject = "Judgment period for ASG '${asgName}' has ended." String message = 'Please make a decision to proceed or roll back.' activities.sendNotification(userContext, notificationDestination, clusterName, subject, message) Promise.Void() @@ -288,7 +275,17 @@ class DeploymentWorkflowImpl implements DeploymentWorkflow, WorkflowOperator previousAsgEnabled = doTry { + promiseFor(activities.enableAsg(userContext, asgDeploymentNames.previousAsgName)) + } withCatch { Throwable t -> + status "Previous ASG '${asgDeploymentNames.previousAsgName}' could not be enabled." + promiseFor(false) + } result + waitFor(previousAsgEnabled) { + if (it) { + activities.disableAsg(userContext, asgDeploymentNames.nextAsgName) + } + Promise.Void() + } } } diff --git a/src/groovy/com/netflix/asgard/deployment/DeploymentWorkflowOptions.groovy b/src/groovy/com/netflix/asgard/deployment/DeploymentWorkflowOptions.groovy index d10ccc09..75d3485e 100644 --- a/src/groovy/com/netflix/asgard/deployment/DeploymentWorkflowOptions.groovy +++ b/src/groovy/com/netflix/asgard/deployment/DeploymentWorkflowOptions.groovy @@ -15,6 +15,7 @@ */ package com.netflix.asgard.deployment +import com.netflix.asgard.deployment.steps.DeploymentStep import groovy.transform.Canonical /** @@ -28,37 +29,7 @@ import groovy.transform.Canonical /** Endpoint where deployment notifications will be sent */ String notificationDestination - /** Delay before deployment will begin */ - int delayDurationMinutes - - /** Specify if canary testing be done which will scale the ASG up to a minimal number of instances */ - Boolean doCanary - - /** Number of instances used for canary testing */ - int canaryCapacity - - /** Time limit for having healthy instances at the canary capacity */ - int canaryStartUpTimeoutMinutes - - /** Time allowed for the canary test */ - int canaryJudgmentPeriodMinutes - - /** How to proceed after the canary test */ - ProceedPreference scaleUp - - /** Time limit for having healthy instances at the desired capacity */ - int desiredCapacityStartUpTimeoutMinutes - - /** Time allowed for the desired capacity assessment */ - int desiredCapacityJudgmentPeriodMinutes - - /** How to proceed after the desired capacity assessment */ - ProceedPreference disablePreviousAsg - - /** Time allowed for the full traffic assessment */ - int fullTrafficJudgmentPeriodMinutes - - /** How to proceed after the full traffic assessment */ - ProceedPreference deletePreviousAsg + /** Ordered steps that describe a deployment */ + List steps } diff --git a/src/groovy/com/netflix/asgard/deployment/StartDeploymentRequest.groovy b/src/groovy/com/netflix/asgard/deployment/StartDeploymentRequest.groovy index e2781153..599688d7 100644 --- a/src/groovy/com/netflix/asgard/deployment/StartDeploymentRequest.groovy +++ b/src/groovy/com/netflix/asgard/deployment/StartDeploymentRequest.groovy @@ -16,6 +16,7 @@ package com.netflix.asgard.deployment import com.fasterxml.jackson.annotation.JsonIgnore +import com.netflix.asgard.deployment.steps.ResizeStep import com.netflix.asgard.model.AutoScalingGroupBeanOptions import com.netflix.asgard.model.LaunchConfigurationBeanOptions import groovy.transform.Canonical @@ -35,10 +36,9 @@ class StartDeploymentRequest { @JsonIgnore List getValidationErrors() { List errors = [] - if (deploymentOptions.doCanary) { - errors.addAll(checkCapacityBounds(deploymentOptions.canaryCapacity, asgOptions)) + deploymentOptions.steps.findAll { it instanceof ResizeStep }.each { ResizeStep resizeStep -> + errors.addAll(checkCapacityBounds(resizeStep.capacity, asgOptions)) } - errors.addAll(checkCapacityBounds(asgOptions.desiredCapacity, asgOptions)) errors } diff --git a/src/groovy/com/netflix/asgard/deployment/steps/CreateAsgStep.groovy b/src/groovy/com/netflix/asgard/deployment/steps/CreateAsgStep.groovy new file mode 100644 index 00000000..2dfa37d9 --- /dev/null +++ b/src/groovy/com/netflix/asgard/deployment/steps/CreateAsgStep.groovy @@ -0,0 +1,22 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.deployment.steps + +import groovy.transform.Canonical + +@Canonical +class CreateAsgStep implements DeploymentStep { +} diff --git a/src/groovy/com/netflix/asgard/deployment/steps/DeleteAsgStep.groovy b/src/groovy/com/netflix/asgard/deployment/steps/DeleteAsgStep.groovy new file mode 100644 index 00000000..7783787e --- /dev/null +++ b/src/groovy/com/netflix/asgard/deployment/steps/DeleteAsgStep.groovy @@ -0,0 +1,25 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.deployment.steps + +import com.netflix.asgard.model.AsgRoleInCluster +import groovy.transform.Canonical + +@Canonical +class DeleteAsgStep implements DeploymentStep { + /** Indicates the ASG that will be targeted by this operation */ + AsgRoleInCluster targetAsg +} diff --git a/src/groovy/com/netflix/asgard/deployment/steps/DeploymentStep.groovy b/src/groovy/com/netflix/asgard/deployment/steps/DeploymentStep.groovy new file mode 100644 index 00000000..fec4a78b --- /dev/null +++ b/src/groovy/com/netflix/asgard/deployment/steps/DeploymentStep.groovy @@ -0,0 +1,34 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.deployment.steps + +import com.fasterxml.jackson.annotation.JsonSubTypes +import com.fasterxml.jackson.annotation.JsonSubTypes.Type +import com.fasterxml.jackson.annotation.JsonTypeInfo +import com.fasterxml.jackson.annotation.JsonTypeInfo.Id + +@JsonTypeInfo(use = Id.NAME, property = "type") +@JsonSubTypes([ + @Type(name = "CreateAsg", value = CreateAsgStep), + @Type(name = "DeleteAsg", value = DeleteAsgStep), + @Type(name = "DisableAsg", value = DisableAsgStep), + @Type(name = "EnableAsg", value = EnableAsgStep), + @Type(name = "Judgment", value = JudgmentStep), + @Type(name = "Resize", value = ResizeStep), + @Type(name = "Wait", value = WaitStep) +]) +interface DeploymentStep { +} diff --git a/src/groovy/com/netflix/asgard/deployment/steps/DisableAsgStep.groovy b/src/groovy/com/netflix/asgard/deployment/steps/DisableAsgStep.groovy new file mode 100644 index 00000000..5160f52a --- /dev/null +++ b/src/groovy/com/netflix/asgard/deployment/steps/DisableAsgStep.groovy @@ -0,0 +1,25 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.deployment.steps + +import com.netflix.asgard.model.AsgRoleInCluster +import groovy.transform.Canonical + +@Canonical +class DisableAsgStep implements DeploymentStep { + /** Indicates the ASG that will be targeted by this operation */ + AsgRoleInCluster targetAsg +} diff --git a/src/groovy/com/netflix/asgard/deployment/steps/EnableAsgStep.groovy b/src/groovy/com/netflix/asgard/deployment/steps/EnableAsgStep.groovy new file mode 100644 index 00000000..2b051543 --- /dev/null +++ b/src/groovy/com/netflix/asgard/deployment/steps/EnableAsgStep.groovy @@ -0,0 +1,25 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.deployment.steps + +import com.netflix.asgard.model.AsgRoleInCluster +import groovy.transform.Canonical + +@Canonical +class EnableAsgStep implements DeploymentStep { + /** Indicates the ASG that will be targeted by this operation */ + AsgRoleInCluster targetAsg +} diff --git a/src/groovy/com/netflix/asgard/deployment/steps/JudgmentStep.groovy b/src/groovy/com/netflix/asgard/deployment/steps/JudgmentStep.groovy new file mode 100644 index 00000000..34546458 --- /dev/null +++ b/src/groovy/com/netflix/asgard/deployment/steps/JudgmentStep.groovy @@ -0,0 +1,24 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.deployment.steps + +import groovy.transform.Canonical + +@Canonical +class JudgmentStep implements DeploymentStep { + /** Time allowed before notifying judge */ + int durationMinutes +} diff --git a/src/groovy/com/netflix/asgard/deployment/steps/ResizeStep.groovy b/src/groovy/com/netflix/asgard/deployment/steps/ResizeStep.groovy new file mode 100644 index 00000000..4b62f40c --- /dev/null +++ b/src/groovy/com/netflix/asgard/deployment/steps/ResizeStep.groovy @@ -0,0 +1,31 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.deployment.steps + +import com.netflix.asgard.model.AsgRoleInCluster +import groovy.transform.Canonical + +@Canonical +class ResizeStep implements DeploymentStep { + /** Indicates the ASG that will be targeted by this operation */ + AsgRoleInCluster targetAsg + + /** Number of instances to resize to */ + int capacity + + /** Time limit for having operational instances at capacity */ + int startUpTimeoutMinutes +} diff --git a/src/groovy/com/netflix/asgard/deployment/steps/WaitStep.groovy b/src/groovy/com/netflix/asgard/deployment/steps/WaitStep.groovy new file mode 100644 index 00000000..97bc8028 --- /dev/null +++ b/src/groovy/com/netflix/asgard/deployment/steps/WaitStep.groovy @@ -0,0 +1,27 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.deployment.steps + +import groovy.transform.Canonical + +@Canonical +class WaitStep implements DeploymentStep { + /** How long to wait before continuing with next step */ + int durationMinutes + + /** Description of what is being waited for */ + String description +} diff --git a/src/groovy/com/netflix/asgard/mock/Mocks.groovy b/src/groovy/com/netflix/asgard/mock/Mocks.groovy index a29d5abf..b8d51366 100644 --- a/src/groovy/com/netflix/asgard/mock/Mocks.groovy +++ b/src/groovy/com/netflix/asgard/mock/Mocks.groovy @@ -35,7 +35,7 @@ import com.netflix.asgard.AwsSqsService import com.netflix.asgard.CachedMapBuilder import com.netflix.asgard.Caches import com.netflix.asgard.ConfigService -import com.netflix.asgard.DefaultUserDataProvider +import com.netflix.asgard.userdata.DefaultUserDataProvider import com.netflix.asgard.DiscoveryService import com.netflix.asgard.DnsService import com.netflix.asgard.EmailerService @@ -138,7 +138,6 @@ class Mocks { awsAccounts: [TEST_AWS_ACCOUNT_ID, PROD_AWS_ACCOUNT_ID] ], promote: [ - targetServer: 'http://prod', imageTags: true, canonicalServerForBakeEnvironment: 'http://test' ], diff --git a/src/groovy/com/netflix/asgard/model/AsgRoleInCluster.groovy b/src/groovy/com/netflix/asgard/model/AsgRoleInCluster.groovy new file mode 100644 index 00000000..8b7f997f --- /dev/null +++ b/src/groovy/com/netflix/asgard/model/AsgRoleInCluster.groovy @@ -0,0 +1,18 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.model + +enum AsgRoleInCluster { Previous, Current, Next } diff --git a/src/groovy/com/netflix/asgard/model/Deployment.groovy b/src/groovy/com/netflix/asgard/model/Deployment.groovy index 15bb250e..117123ea 100644 --- a/src/groovy/com/netflix/asgard/model/Deployment.groovy +++ b/src/groovy/com/netflix/asgard/model/Deployment.groovy @@ -18,7 +18,10 @@ package com.netflix.asgard.model import com.amazonaws.services.simpleworkflow.model.WorkflowExecution import com.netflix.asgard.Region import com.netflix.asgard.Time +import com.netflix.asgard.deployment.steps.DeploymentStep import groovy.transform.Canonical +import java.util.regex.Matcher +import java.util.regex.Pattern import org.joda.time.DateTime /** @@ -36,6 +39,24 @@ class Deployment { final Date updateTime final String status final List log + final List steps + + static Pattern stepPattern = ~/.*\{"step":([0-9]+)\}/ + + /** Construct JSON that represents a step index */ + static constructStepJson(int stepIndex) { + """{"step":${stepIndex}}""" + } + + /** @return step index from JSON */ + static Integer parseStepIndex(String logMessage) { + Matcher matcher = logMessage =~ stepPattern + if (matcher.matches()) { + String stepIndex = matcher[0][1] + return Integer.parseInt(stepIndex) + } + null + } /** AWS Simple Workflow Service activity token needed to complete a manual activity */ String token @@ -58,4 +79,20 @@ class Deployment { String getRegionCode() { region.code } + + /** @return list of lists of log messages grouped by step */ + List> getLogForSteps() { + List> logForSteps = [[]] + int currentStepIndex = 0 + log.each { + Integer stepIndex = parseStepIndex(it) + if (stepIndex != null) { + currentStepIndex = stepIndex + logForSteps[currentStepIndex] = [] + } else { + logForSteps[currentStepIndex] << it + } + } + logForSteps + } } diff --git a/src/groovy/com/netflix/asgard/model/WorkflowExecutionBeanOptions.groovy b/src/groovy/com/netflix/asgard/model/WorkflowExecutionBeanOptions.groovy index 029e96cb..cb71d1b3 100644 --- a/src/groovy/com/netflix/asgard/model/WorkflowExecutionBeanOptions.groovy +++ b/src/groovy/com/netflix/asgard/model/WorkflowExecutionBeanOptions.groovy @@ -15,11 +15,14 @@ */ package com.netflix.asgard.model +import com.amazonaws.services.simpleworkflow.flow.JsonDataConverter import com.amazonaws.services.simpleworkflow.model.HistoryEvent import com.amazonaws.services.simpleworkflow.model.WorkflowExecutionInfo import com.netflix.asgard.EntityType import com.netflix.asgard.Region import com.netflix.asgard.Task +import com.netflix.asgard.deployment.DeploymentWorkflowOptions +import com.netflix.asgard.deployment.steps.DeploymentStep import com.netflix.glisten.HistoryAnalyzer import com.netflix.glisten.LogMessage import groovy.transform.Canonical @@ -61,7 +64,8 @@ import groovy.transform.Canonical List logMessages = HistoryAnalyzer.of(events).logMessages boolean isDone = executionInfo.closeTimestamp != null String currentOperation = isDone || !logMessages ? '' : logMessages.last().text - Date lastTime = isDone ? executionInfo.closeTimestamp : logMessages.last().timestamp + Date lastUpdate = logMessages ? logMessages.last().timestamp : executionInfo.startTimestamp + Date lastTime = isDone ? executionInfo.closeTimestamp : lastUpdate task.with { log = logMessages*.toString() updateTime = lastTime @@ -77,6 +81,12 @@ import groovy.transform.Canonical * @return deployment that represents the workflow execution */ Deployment asDeployment() { + List steps = [] + def input = getInput() + if (input) { + DeploymentWorkflowOptions deploymentWorkflowOptions = input[1] // get the second argument to the workflow + steps = deploymentWorkflowOptions.steps + } SwfWorkflowTags swfWorkflowTags = new SwfWorkflowTags() swfWorkflowTags.withTags(executionInfo.tagList) String status = executionInfo.closeStatus?.toLowerCase() ?: 'running' @@ -87,6 +97,14 @@ import groovy.transform.Canonical Region region = swfWorkflowTags?.user?.region new Deployment(swfWorkflowTags.id, clusterName, region, executionInfo.execution, swfWorkflowTags.desc, swfWorkflowTags?.user?.username, executionInfo.startTimestamp, lastTime, status, - logMessages*.toString()) + logMessages*.toString(), steps) + } + + private Object getInput() { + if (!events) { return [] } + JsonDataConverter dataConverter = new JsonDataConverter() + String workflowInputString = events[0].workflowExecutionStartedEventAttributes?.input + workflowInputString ? dataConverter.fromData(workflowInputString, Object) : null } + } diff --git a/src/groovy/com/netflix/asgard/DefaultAdvancedUserDataProvider.groovy b/src/groovy/com/netflix/asgard/userdata/DefaultAdvancedUserDataProvider.groovy similarity index 91% rename from src/groovy/com/netflix/asgard/DefaultAdvancedUserDataProvider.groovy rename to src/groovy/com/netflix/asgard/userdata/DefaultAdvancedUserDataProvider.groovy index ff234754..3916b76f 100644 --- a/src/groovy/com/netflix/asgard/DefaultAdvancedUserDataProvider.groovy +++ b/src/groovy/com/netflix/asgard/userdata/DefaultAdvancedUserDataProvider.groovy @@ -13,8 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.netflix.asgard +package com.netflix.asgard.userdata +import com.netflix.asgard.PluginService +import com.netflix.asgard.Relationships +import com.netflix.asgard.UserContext import com.netflix.asgard.model.LaunchContext import com.netflix.asgard.plugin.AdvancedUserDataProvider import org.springframework.beans.factory.annotation.Autowired diff --git a/src/groovy/com/netflix/asgard/userdata/DefaultUserDataProvider.groovy b/src/groovy/com/netflix/asgard/userdata/DefaultUserDataProvider.groovy new file mode 100644 index 00000000..56cbef78 --- /dev/null +++ b/src/groovy/com/netflix/asgard/userdata/DefaultUserDataProvider.groovy @@ -0,0 +1,44 @@ +/* + * Copyright 2012 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.userdata + +import com.netflix.asgard.ApplicationService +import com.netflix.asgard.ConfigService +import com.netflix.asgard.UserContext +import com.netflix.asgard.plugin.UserDataProvider +import javax.xml.bind.DatatypeConverter +import org.springframework.beans.factory.annotation.Autowired + +class DefaultUserDataProvider implements UserDataProvider { + + @Autowired + ConfigService configService + + @Autowired + ApplicationService applicationService + + @Override + String buildUserDataForVariables(UserContext userContext, String appName, String autoScalingGroupName, + String launchConfigName) { + + PropertiesUserDataProvider propertiesUserDataProvider = new PropertiesUserDataProvider( + configService: configService, applicationService: applicationService) + Map props = propertiesUserDataProvider.mapProperties(userContext, appName, autoScalingGroupName, + launchConfigName) + String result = props.collect { k, v -> "export ${k}=${v}" }.join('\n') + '\n' + DatatypeConverter.printBase64Binary(result.bytes) + } +} diff --git a/src/groovy/com/netflix/asgard/NetflixAdvancedUserDataProvider.groovy b/src/groovy/com/netflix/asgard/userdata/NetflixAdvancedUserDataProvider.groovy similarity index 67% rename from src/groovy/com/netflix/asgard/NetflixAdvancedUserDataProvider.groovy rename to src/groovy/com/netflix/asgard/userdata/NetflixAdvancedUserDataProvider.groovy index 9ef60256..1dfd5752 100644 --- a/src/groovy/com/netflix/asgard/NetflixAdvancedUserDataProvider.groovy +++ b/src/groovy/com/netflix/asgard/userdata/NetflixAdvancedUserDataProvider.groovy @@ -13,11 +13,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.netflix.asgard +package com.netflix.asgard.userdata import com.amazonaws.services.ec2.model.Image +import com.netflix.asgard.ApplicationService +import com.netflix.asgard.ConfigService +import com.netflix.asgard.PluginService +import com.netflix.asgard.Relationships +import com.netflix.asgard.UserContext import com.netflix.asgard.model.LaunchContext import com.netflix.asgard.plugin.AdvancedUserDataProvider +import com.netflix.asgard.plugin.UserDataProvider import java.util.regex.Matcher import org.springframework.beans.factory.annotation.Autowired @@ -27,8 +33,8 @@ import org.springframework.beans.factory.annotation.Autowired * * This user data creation plugin is used by Netflix, and not recommended for use by people outside Netflix. * - * For the newer AMIs created by Aminator, this implementation creates a short, simple user data string consisting of - * Unix-style export statements for name value pairs only. For other AMIs, this plugin delegates to the complex, legacy, + * For the newer AMIs created by Aminator, this implementation creates a short, simple user data string consisting only + * of name value pairs formatted like a properties file. For other AMIs, this plugin delegates to the complex, legacy, * closed-source UserDataProvider plugin used at Netflix for deployments of an older Base AMI that has different startup * behavior. */ @@ -54,18 +60,34 @@ class NetflixAdvancedUserDataProvider implements AdvancedUserDataProvider { String appName = appNameFromApplication ?: Relationships.appNameFromGroupName(groupName) ?: Relationships.packageFromAppVersion(image.appVersion) ?: '' + if (shouldUsePropertiesUserData(image)) { + UserDataProvider simpleProvider = new PropertiesUserDataProvider(configService: configService, + applicationService: applicationService) + return simpleProvider.buildUserDataForVariables(userContext, appName, groupName, launchConfigName) + } + + // If the AMI lacks a nflx-base version 2 or greater, use the complex legacy user data format. + pluginService.userDataProvider.buildUserDataForVariables(userContext, appName, groupName, launchConfigName) + } + + /** + * Determines whether or not the deployment of the specified image should have user data in properties file format. + * + * @return true if Asgard's configuration and the image have the characteristics that indicate the need for a + * user data in a properties file format + */ + boolean shouldUsePropertiesUserData(Image image) { + if (configService.usePropertyFileUserDataForWindowsImages && image?.platform?.toLowerCase() == 'windows') { + return true + } // If the AMI's description shows a nflx-base version of 2 or greater, use the simple user data format. Matcher matcher = image?.description =~ /.*ancestor_version=nflx-base-([0-9]+)[^0-9].*/ if (matcher.matches()) { Integer majorVersion = matcher.group(1) as Integer if (majorVersion >= 2) { - DefaultUserDataProvider simpleProvider = new DefaultUserDataProvider(configService: configService, - applicationService: applicationService) - return simpleProvider.buildUserDataForVariables(userContext, appName, groupName, launchConfigName) + return true } } - - // If the AMI lacks a nflx-base version 2 or greater, use the complex legacy user data format. - pluginService.userDataProvider.buildUserDataForVariables(userContext, appName, groupName, launchConfigName) + false } } diff --git a/src/groovy/com/netflix/asgard/userdata/PropertiesUserDataProvider.groovy b/src/groovy/com/netflix/asgard/userdata/PropertiesUserDataProvider.groovy new file mode 100644 index 00000000..6b4b532f --- /dev/null +++ b/src/groovy/com/netflix/asgard/userdata/PropertiesUserDataProvider.groovy @@ -0,0 +1,78 @@ +/* + * Copyright 2012 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.userdata + +import com.netflix.asgard.ApplicationService +import com.netflix.asgard.ConfigService +import com.netflix.asgard.Relationships +import com.netflix.asgard.UserContext +import com.netflix.asgard.plugin.UserDataProvider +import com.netflix.frigga.Names +import javax.xml.bind.DatatypeConverter +import org.springframework.beans.factory.annotation.Autowired + +/** + * Providers of user data strings in a format similar to a properties file. + */ +class PropertiesUserDataProvider implements UserDataProvider { + + @Autowired + ConfigService configService + + @Autowired + ApplicationService applicationService + + @Override + String buildUserDataForVariables(UserContext userContext, String appName, String autoScalingGroupName, + String launchConfigName) { + + Map props = mapProperties(userContext, appName, autoScalingGroupName, launchConfigName) + String result = props.collect { k, v -> "${k}=${v}" }.join('\n') + '\n' + DatatypeConverter.printBase64Binary(result.bytes) + } + + /** + * Creates a map of environment keys to values for use in constructing user data strings, based on the specified + * cloud objects associated with the current deployment. + * + * @param userContext who, where, why + * @param appName the name of the application being deployed + * @param autoScalingGroupName the name of the ASG which will launch and manage the instances + * @param launchConfigName the name of the launch configuration for launching the instances + * @return a map of keys to values for the deployment environment + */ + Map mapProperties(UserContext userContext, String appName, String autoScalingGroupName, + String launchConfigName) { + Names names = Names.parseName(autoScalingGroupName) + String monitorBucket = applicationService.getMonitorBucket(userContext, appName, names.cluster) + String appGroup = applicationService.getRegisteredApplication(userContext, appName)?.group + [ + (prependNamespace(UserDataPropertyKeys.ENVIRONMENT)): configService.accountName ?: '', + (prependNamespace(UserDataPropertyKeys.MONITOR_BUCKET)): monitorBucket ?: '', + (prependNamespace(UserDataPropertyKeys.APP)): appName ?: '', + (prependNamespace(UserDataPropertyKeys.APP_GROUP)): appGroup ?: '', + (prependNamespace(UserDataPropertyKeys.STACK)): names.stack ?: '', + (prependNamespace(UserDataPropertyKeys.CLUSTER)): names.cluster ?: '', + (prependNamespace(UserDataPropertyKeys.AUTO_SCALE_GROUP)): autoScalingGroupName ?: '', + (prependNamespace(UserDataPropertyKeys.LAUNCH_CONFIG)): launchConfigName ?: '', + (UserDataPropertyKeys.EC2_REGION): userContext.region.code ?: '', + ] + Relationships.labeledEnvVarsMap(names, configService.userDataVarPrefix) + } + + private String prependNamespace(String key) { + "${configService.userDataVarPrefix}${key}" + } +} diff --git a/src/groovy/com/netflix/asgard/userdata/UserDataPropertyKeys.groovy b/src/groovy/com/netflix/asgard/userdata/UserDataPropertyKeys.groovy new file mode 100644 index 00000000..00ae66fe --- /dev/null +++ b/src/groovy/com/netflix/asgard/userdata/UserDataPropertyKeys.groovy @@ -0,0 +1,32 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.userdata + +/** + * Keys used in user data key-value pairs. Some of these keys are sometimes prepended with a configured namespace such + * as CLOUD_ or NETFLIX_. + */ +class UserDataPropertyKeys { + static final String EC2_REGION = 'EC2_REGION' + static final String ENVIRONMENT = 'ENVIRONMENT' + static final String MONITOR_BUCKET = 'MONITOR_BUCKET' + static final String APP = 'APP' + static final String APP_GROUP = 'APP_GROUP' + static final String STACK = 'STACK' + static final String CLUSTER = 'CLUSTER' + static final String AUTO_SCALE_GROUP = 'AUTO_SCALE_GROUP' + static final String LAUNCH_CONFIG = 'LAUNCH_CONFIG' +} diff --git a/test/spec/controllers/deployment/detail.js b/test/spec/controllers/deployment/detail.js index 19a73dc2..b2a5a5c6 100644 --- a/test/spec/controllers/deployment/detail.js +++ b/test/spec/controllers/deployment/detail.js @@ -84,4 +84,26 @@ describe('Controller: DeploymentDetailCtrl', function () { $httpBackend.flush(); }); + it('should return step status', function () { + scope.deployment = { + status: "running", + steps: [0, 1, 2], + logForSteps: [0, 1] + }; + expect(scope.getStepStatus(0)).toEqual("success"); + expect(scope.getStepStatus(1)).toEqual("running"); + expect(scope.getStepStatus(2)).toEqual("queued"); + + scope.deployment.status = "failure"; + expect(scope.getStepStatus(0)).toEqual("success"); + expect(scope.getStepStatus(1)).toEqual("failure"); + expect(scope.getStepStatus(2)).toEqual("queued"); + + scope.deployment.status = "completed"; + scope.deployment.logForSteps = [0, 1, 2]; + expect(scope.getStepStatus(0)).toEqual("success"); + expect(scope.getStepStatus(1)).toEqual("success"); + expect(scope.getStepStatus(2)).toEqual("success"); + }); + }); diff --git a/test/spec/controllers/deployment/new.js b/test/spec/controllers/deployment/new.js index c1bc7ceb..04b0e809 100644 --- a/test/spec/controllers/deployment/new.js +++ b/test/spec/controllers/deployment/new.js @@ -19,37 +19,26 @@ describe('Controller: DeploymentNewCtrl', function () { }); })); - it('should set initial scope', function () { - $httpBackend.expectGET( - 'deployment/prepare/helloworld?deploymentTemplateName=CreateJudgeAndCleanUp&includeEnvironment=true').respond({ - deploymentOptions: 'deploymentOptions1', - environment: 'environment1', - lcOptions: 'lcOptions1', - asgOptions: { - name: "asgOptions1", - suspendedProcesses: ["AddToLoadBalancer"] - } - }); - $httpBackend.flush(); - expect(scope.clusterName).toEqual('helloworld'); - expect(scope.hideAdvancedItems).toEqual(true); - expect(scope.deploymentOptions).toEqual('deploymentOptions1'); - expect(scope.environment).toEqual('environment1'); - expect(scope.asgOptions.name).toEqual('asgOptions1'); - expect(scope.lcOptions).toEqual('lcOptions1'); - expect(scope.vpcId).toEqual(undefined); - expect(scope.suspendAZRebalance).toEqual(false); - expect(scope.suspendAddToLoadBalancer).toEqual(true); - }); - it('should set VPC id based on subnet purpose', function () { $httpBackend.expectGET( - 'deployment/prepare/helloworld?deploymentTemplateName=CreateJudgeAndCleanUp&includeEnvironment=true').respond({ + 'deployment/prepare/helloworld?deploymentTemplateName=CreateAndCleanUpPreviousAsg&includeEnvironment=true').respond({ + deploymentOptions: { steps: [] }, + asgOptions: { + subnetPurpose: "", + suspendedProcesses: [], + availabilityZones: [], + loadBalancerNames: [] + }, + lcOptions: { + securityGroups: [] + }, environment: { + subnetPurposes: [], purposeToVpcId: { 'internal': 'vpc1', 'external': 'vpc2' - } + }, + securityGroups: [] } }); $httpBackend.flush(); @@ -67,10 +56,22 @@ describe('Controller: DeploymentNewCtrl', function () { it('should toggle suspended processes', function () { $httpBackend.expectGET( - 'deployment/prepare/helloworld?deploymentTemplateName=CreateJudgeAndCleanUp&includeEnvironment=true').respond({ - asgOptions: { - suspendedProcesses: [] - } + 'deployment/prepare/helloworld?deploymentTemplateName=CreateAndCleanUpPreviousAsg&includeEnvironment=true').respond({ + deploymentOptions: { steps: [] }, + asgOptions: { + subnetPurpose: "", + suspendedProcesses: [], + availabilityZones: [], + loadBalancerNames: [] + }, + lcOptions: { + securityGroups: [] + }, + environment: { + purposeToVpcId: {}, + subnetPurposes: [], + securityGroups: [] + } }); $httpBackend.flush(); expect(scope.suspendAZRebalance).toEqual(false); @@ -107,14 +108,39 @@ describe('Controller: DeploymentNewCtrl', function () { it('should start deployment', function () { $httpBackend.expectGET( - 'deployment/prepare/helloworld?deploymentTemplateName=CreateJudgeAndCleanUp&includeEnvironment=true').respond({ - deploymentOptions: 'deploymentOptions1' + 'deployment/prepare/helloworld?deploymentTemplateName=CreateAndCleanUpPreviousAsg&includeEnvironment=true').respond({ + deploymentOptions: { steps: [] }, + asgOptions: { + subnetPurpose: "", + suspendedProcesses: [], + availabilityZones: [], + loadBalancerNames: [] + }, + lcOptions: { + securityGroups: [] + }, + environment: { + purposeToVpcId: {}, + subnetPurposes: [], + securityGroups: [] + } }); $httpBackend.flush(); expect(scope.startingDeployment).toEqual(undefined); scope.startDeployment(); expect(scope.startingDeployment).toEqual(true); - $httpBackend.expectPOST('deployment/start', {"deploymentOptions":"deploymentOptions1"}).respond(200, { + $httpBackend.expectPOST('deployment/start', { + deploymentOptions: { steps: [] }, + asgOptions: { + subnetPurpose: "", + suspendedProcesses: [], + availabilityZones: [], + loadBalancerNames: [] + }, + lcOptions: { + securityGroups: [] + } + }).respond(200, { deploymentId: "123" }); $httpBackend.flush(); @@ -123,14 +149,39 @@ describe('Controller: DeploymentNewCtrl', function () { it('should show errors on failure to start deployment', function () { $httpBackend.expectGET( - 'deployment/prepare/helloworld?deploymentTemplateName=CreateJudgeAndCleanUp&includeEnvironment=true').respond({ - deploymentOptions: 'deploymentOptions1' + 'deployment/prepare/helloworld?deploymentTemplateName=CreateAndCleanUpPreviousAsg&includeEnvironment=true').respond({ + deploymentOptions: { steps: [] }, + asgOptions: { + subnetPurpose: "", + suspendedProcesses: [], + availabilityZones: [], + loadBalancerNames: [] + }, + lcOptions: { + securityGroups: [] + }, + environment: { + purposeToVpcId: {}, + subnetPurposes: [], + securityGroups: [] + } }); $httpBackend.flush(); expect(scope.startingDeployment).toEqual(undefined); scope.startDeployment(); expect(scope.startingDeployment).toEqual(true); - $httpBackend.expectPOST('deployment/start', {"deploymentOptions":"deploymentOptions1"}).respond(422, { + $httpBackend.expectPOST('deployment/start', { + deploymentOptions: { steps: [] }, + asgOptions: { + subnetPurpose: "", + suspendedProcesses: [], + availabilityZones: [], + loadBalancerNames: [] + }, + lcOptions: { + securityGroups: [] + } + }).respond(422, { validationErrors: 'errors' }); $httpBackend.flush(); @@ -139,4 +190,212 @@ describe('Controller: DeploymentNewCtrl', function () { expect($location.path()).toEqual(''); }); + it('should conditionally allow steps', function() { + $httpBackend.expectGET( + 'deployment/prepare/helloworld?deploymentTemplateName=CreateAndCleanUpPreviousAsg&includeEnvironment=true').respond({ + deploymentOptions: { + steps: [ + {"type":"Wait","durationMinutes":60}, + {"type":"CreateAsg"}, + {"type":"Resize","targetAsg":"Next","capacity":0,"startUpTimeoutMinutes":40}, + {"type":"Judgment","durationMinutes":120}, + {"type":"DisableAsg","targetAsg":"Previous"} + ] + }, + asgOptions: { + subnetPurpose: "", + suspendedProcesses: [], + availabilityZones: [], + loadBalancerNames: [] + }, + lcOptions: { + securityGroups: [] + }, + environment: { + purposeToVpcId: {}, + subnetPurposes: [], + securityGroups: [] + } + }); + $httpBackend.flush(); + expect(scope.isStepAllowed("Wait", 4)).toEqual(true); + expect(scope.isStepAllowed("Wait", 0)).toEqual(false); // before another Wait + expect(scope.isStepAllowed("Wait", 2)).toEqual(false); // after another Wait + expect(scope.isStepAllowed("Wait", 10)).toEqual(false); // at the end + expect(scope.isStepAllowed("Judgment", 4)).toEqual(true); + expect(scope.isStepAllowed("Judgment", 6)).toEqual(false); // before another Judgment + expect(scope.isStepAllowed("Judgment", 8)).toEqual(false); // after another Judgment + expect(scope.isStepAllowed("Judgment", 10)).toEqual(false); // at the end + expect(scope.isStepAllowed("ResizeAsg", 4)).toEqual(true); + expect(scope.isStepAllowed("ResizeAsg", 2)).toEqual(false); // before Create + expect(scope.isStepAllowed("DisableAsg", 8)).toEqual(true); + expect(scope.isStepAllowed("DisableAsg", 2)).toEqual(false); // before Create + expect(scope.isStepAllowed("EnableAsg", 4)).toEqual(true); + expect(scope.isStepAllowed("EnableAsg", 2)).toEqual(false); // before Create + expect(scope.isStepAllowed("DeleteAsg", 2)).toEqual(false); // before Create + expect(scope.isStepAllowed("DeleteAsg", 10)).toEqual(true); // at the end + expect(scope.isStepAllowed("DeleteAsg", 8)).toEqual(false); // not at the end + }); + + it('should conditionally allow steps with DeleteAsg step', function() { + $httpBackend.expectGET( + 'deployment/prepare/helloworld?deploymentTemplateName=CreateAndCleanUpPreviousAsg&includeEnvironment=true').respond({ + deploymentOptions: { + steps: [ + {"type":"CreateAsg"}, + {"type":"DisableAsg","targetAsg":"Previous"}, + {"type":"DeleteAsg","targetAsg":"Previous"} + ] + }, + asgOptions: { + subnetPurpose: "", + suspendedProcesses: [], + availabilityZones: [], + loadBalancerNames: [] + }, + lcOptions: { + securityGroups: [] + }, + environment: { + purposeToVpcId: {}, + subnetPurposes: [], + securityGroups: [] + } + }); + $httpBackend.flush(); + expect(scope.isStepAllowed("DeleteAsg", 4)).toEqual(false); // there can be only one + expect(scope.isStepAllowed("DeleteAsg", 6)).toEqual(false); // there can be only one + expect(scope.isStepAllowed("DisableAsg", 4)).toEqual(true); + expect(scope.isStepAllowed("DisableAsg", 6)).toEqual(false); // after Delete + expect(scope.isStepAllowed("EnableAsg", 4)).toEqual(true); + expect(scope.isStepAllowed("EnableAsg", 6)).toEqual(false); // after Delete + expect(scope.isStepAllowed("ResizeAsg", 4)).toEqual(true); + expect(scope.isStepAllowed("ResizeAsg", 6)).toEqual(false); // after Delete + }); + + it('should add step', function() { + $httpBackend.expectGET( + 'deployment/prepare/helloworld?deploymentTemplateName=CreateAndCleanUpPreviousAsg&includeEnvironment=true').respond({ + deploymentOptions: { + steps: [ + {"type":"CreateAsg"}, + {"type":"DeleteAsg","targetAsg":"Previous"} + ] + }, + asgOptions: { + subnetPurpose: "", + suspendedProcesses: [], + availabilityZones: [], + loadBalancerNames: [] + }, + lcOptions: { + securityGroups: [] + }, + environment: { + purposeToVpcId: {}, + subnetPurposes: [], + securityGroups: [] + } + }); + $httpBackend.flush(); + scope.toggleShowStepTypes(2); + + expect(scope.deploymentOptions.steps).toEqual([ + { type : 'CreateAsg' }, + { type : 'DeleteAsg', targetAsg : 'Previous' } + ]); + expect(scope.generated.stepsDisplay).toEqual([ + { showSteps : false }, + { type : 'CreateAsg' }, + { showSteps : true }, + { type : 'DeleteAsg', targetAsg : 'Previous' }, + { showSteps : false } + ]); + expect(scope.generated.jsonSteps). + toEqual('[\n {"type":"CreateAsg"},\n {"type":"DeleteAsg","targetAsg":"Previous"}\n]'); + + scope.addStep("DisableAsg", 2); + scope.$apply(); + + expect(scope.deploymentOptions.steps).toEqual([ + { type : 'CreateAsg' }, + { type : 'DisableAsg', targetAsg : 'Previous' }, + { type : 'DeleteAsg', targetAsg : 'Previous' } + ]); + expect(scope.generated.stepsDisplay).toEqual([ + { showSteps : false }, + { type : 'CreateAsg' }, + { showSteps : false }, + { type : 'DisableAsg', targetAsg : 'Previous' }, + { showSteps : false }, + { type : 'DeleteAsg', targetAsg : 'Previous' }, + { showSteps : false } + ]); + expect(scope.generated.jsonSteps). + toEqual('[\n {"type":"CreateAsg"},\n {"type":"DisableAsg","targetAsg":"Previous"},\n {"type":"DeleteAsg","targetAsg":"Previous"}\n]'); + }); + + it('should remove step', function() { + $httpBackend.expectGET( + 'deployment/prepare/helloworld?deploymentTemplateName=CreateAndCleanUpPreviousAsg&includeEnvironment=true').respond({ + deploymentOptions: { + steps: [ + {"type":"CreateAsg"}, + { type : 'DisableAsg', targetAsg : 'Previous' }, + {"type":"DeleteAsg","targetAsg":"Previous"} + ] + }, + asgOptions: { + subnetPurpose: "", + suspendedProcesses: [], + availabilityZones: [], + loadBalancerNames: [] + }, + lcOptions: { + securityGroups: [] + }, + environment: { + purposeToVpcId: {}, + subnetPurposes: [], + securityGroups: [] + } + }); + $httpBackend.flush(); + scope.toggleShowStepTypes(2); + + expect(scope.deploymentOptions.steps).toEqual([ + { type : 'CreateAsg' }, + { type : 'DisableAsg', targetAsg : 'Previous' }, + { type : 'DeleteAsg', targetAsg : 'Previous' } + ]); + expect(scope.generated.stepsDisplay).toEqual([ + { showSteps : false }, + { type : 'CreateAsg' }, + { showSteps : true }, + { type : 'DisableAsg', targetAsg : 'Previous' }, + { showSteps : false }, + { type : 'DeleteAsg', targetAsg : 'Previous' }, + { showSteps : false } + ]); + expect(scope.generated.jsonSteps). + toEqual('[\n {"type":"CreateAsg"},\n {"type":"DisableAsg","targetAsg":"Previous"},\n {"type":"DeleteAsg","targetAsg":"Previous"}\n]'); + + scope.removeStep(2); + scope.$apply(); + + expect(scope.deploymentOptions.steps).toEqual([ + { type : 'CreateAsg' }, + { type : 'DeleteAsg', targetAsg : 'Previous' } + ]); + expect(scope.generated.stepsDisplay).toEqual([ + { showSteps : false }, + { type : 'CreateAsg' }, + { showSteps : false }, + { type : 'DeleteAsg', targetAsg : 'Previous' }, + { showSteps : false } + ]); + expect(scope.generated.jsonSteps). + toEqual('[\n {"type":"CreateAsg"},\n {"type":"DeleteAsg","targetAsg":"Previous"}\n]'); + }); + }); diff --git a/test/unit/com/netflix/asgard/AutoScalingControllerSpec.groovy b/test/unit/com/netflix/asgard/AutoScalingControllerSpec.groovy index 075b2ca0..5d62ad68 100644 --- a/test/unit/com/netflix/asgard/AutoScalingControllerSpec.groovy +++ b/test/unit/com/netflix/asgard/AutoScalingControllerSpec.groovy @@ -24,8 +24,8 @@ import com.amazonaws.services.autoscaling.model.ScalingPolicy import com.amazonaws.services.cloudwatch.model.MetricAlarm import com.amazonaws.services.ec2.model.GroupIdentifier import com.amazonaws.services.ec2.model.Image -import com.amazonaws.services.ec2.model.SecurityGroup import com.amazonaws.services.elasticloadbalancing.model.LoadBalancerDescription +import com.fasterxml.jackson.databind.ObjectMapper import com.google.common.collect.ImmutableSet import com.google.common.collect.Multiset import com.google.common.collect.TreeMultiset @@ -390,4 +390,17 @@ class AutoScalingControllerSpec extends Specification { 'true' | true 'false' | false } + + void 'should generate group name and environment variables from ASG form inputs'() { + request.format = 'json' + params.appName = 'hello-c0latam' + configService.userDataVarPrefix >> 'CLOUD_' + + when: + controller.generateName() + + then: + new ObjectMapper().readValue(response.contentAsString, Map) == + [groupName: 'hello-c0latam', envVars: ['CLOUD_COUNTRIES=latam']] + } } diff --git a/test/unit/com/netflix/asgard/DeploymentControllerSpec.groovy b/test/unit/com/netflix/asgard/DeploymentControllerSpec.groovy index 19544ae4..e053c441 100644 --- a/test/unit/com/netflix/asgard/DeploymentControllerSpec.groovy +++ b/test/unit/com/netflix/asgard/DeploymentControllerSpec.groovy @@ -28,7 +28,13 @@ import com.amazonaws.services.simpleworkflow.flow.ManualActivityCompletionClient import com.amazonaws.services.simpleworkflow.model.WorkflowExecution import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.asgard.deployment.DeploymentWorkflowOptions -import com.netflix.asgard.deployment.ProceedPreference +import com.netflix.asgard.deployment.steps.CreateAsgStep +import com.netflix.asgard.deployment.steps.DeleteAsgStep +import com.netflix.asgard.deployment.steps.DisableAsgStep +import com.netflix.asgard.deployment.steps.JudgmentStep +import com.netflix.asgard.deployment.steps.ResizeStep +import com.netflix.asgard.deployment.steps.WaitStep +import com.netflix.asgard.model.AsgRoleInCluster import com.netflix.asgard.model.AutoScalingGroupBeanOptions import com.netflix.asgard.model.AutoScalingGroupData import com.netflix.asgard.model.AutoScalingGroupHealthCheckType @@ -61,7 +67,7 @@ class DeploymentControllerSpec extends Specification { "subnetPurpose":"internal","healthCheckType":"EC2", "placementGroup":null,"launchConfigurationName":null,"desiredCapacity":"3", "availabilityZones":["us-west-1c","us-west-1a"],"loadBalancerNames":["helloclay--frontend"], - "minSize":0,"healthCheckGracePeriod":600,"defaultCooldown":10,"maxSize":"${maxSize}", + "minSize":0,"healthCheckGracePeriod":600,"defaultCooldown":10,"maxSize":${maxSize}, "terminationPolicies":["OldestLaunchConfiguration"]}, "lcOptions": {"securityGroups":["sg-12345678"],"kernelId":"","launchConfigurationName":null, @@ -69,12 +75,17 @@ class DeploymentControllerSpec extends Specification { "imageId":"ami-12345678","keyName":"keypair","ramdiskId":"","instanceMonitoringIsEnabled":false, "iamInstanceProfile":"BaseIAMRole","ebsOptimized":false}, "deploymentOptions": - {"clusterName":"helloclay--test", - "desiredCapacityStartUpTimeoutMinutes":"41","disablePreviousAsg":"Ask", - "canaryCapacity":1,"scaleUp":"Ask","deletePreviousAsg":"Ask","delayDurationMinutes":"5","doCanary":true, - "canaryStartUpTimeoutMinutes":"31","notificationDestination":"cmccoy@netflix.com", - "desiredCapacityJudgmentPeriodMinutes":"121","canaryJudgmentPeriodMinutes":"61", - "fullTrafficJudgmentPeriodMinutes":"241"}}""" as String + {"clusterName":"helloclay--test","notificationDestination":"cmccoy@netflix.com", "steps": [ + {"type": "Wait", "durationMinutes": 5, "description": "delay"}, + {"type": "CreateAsg"}, + {"type": "Resize", "capacity": 1, "startUpTimeoutMinutes": 31}, + {"type": "Judgment", "durationMinutes": 61}, + {"type": "Resize", "capacity": 3, "startUpTimeoutMinutes": 41}, + {"type": "Judgment", "durationMinutes": 121}, + {"type": "DisableAsg", "targetAsg": "Previous"}, + {"type": "Judgment", "durationMinutes": 241}, + {"type": "DeleteAsg", "targetAsg": "Previous"} + ]}}""" as String } void setup() { @@ -142,17 +153,18 @@ class DeploymentControllerSpec extends Specification { new DeploymentWorkflowOptions( clusterName: "helloclay--test", notificationDestination: "cmccoy@netflix.com", - delayDurationMinutes: 5, - doCanary: true, - canaryCapacity: 1, - canaryStartUpTimeoutMinutes: 31, - canaryJudgmentPeriodMinutes: 61, - scaleUp: ProceedPreference.Ask, - desiredCapacityStartUpTimeoutMinutes: 41, - desiredCapacityJudgmentPeriodMinutes: 121, - disablePreviousAsg: ProceedPreference.Ask, - fullTrafficJudgmentPeriodMinutes: 241, - deletePreviousAsg: ProceedPreference.Ask), + steps: [ + new WaitStep(durationMinutes: 5, description: "delay"), + new CreateAsgStep(), + new ResizeStep(capacity: 1, startUpTimeoutMinutes: 31), + new JudgmentStep(durationMinutes: 61), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 41), + new JudgmentStep(durationMinutes: 121), + new DisableAsgStep(targetAsg: AsgRoleInCluster.Previous), + new JudgmentStep(durationMinutes: 241), + new DeleteAsgStep(targetAsg: AsgRoleInCluster.Previous) + ] + ), new LaunchConfigurationBeanOptions( launchConfigurationName: null, imageId: "ami-12345678", @@ -238,13 +250,17 @@ class DeploymentControllerSpec extends Specification { def 'should show deployment'() { when: - def result = controller.show('123') + controller.show('123') then: - result.deployment == new Deployment('123') + response.status == 200 + objectMapper.readValue(response.text, Map) == objectMapper.readValue("""{"id":"123","clusterName":"cluster", + "region":"us-west-1","workflowExecution":null,"description":null,"owner":null,"startTime":null, + "updateTime":null,"status":null,"log":null,"steps":null,"token":null,"durationString":"0s","done":true, + "regionCode":"us-west-1","logForSteps":[[]]}""", Map) and: - 1 * controller.deploymentService.getDeploymentById('123') >> new Deployment('123') + 1 * controller.deploymentService.getDeploymentById('123') >> new Deployment('123', 'cluster', Region.US_WEST_1) } def 'should not show missing deployment'() { @@ -360,6 +376,7 @@ class DeploymentControllerSpec extends Specification { } with(controller.awsEc2Service) { 1 * getSubnets(_) >> subnets + 1 * getSecurityGroup(_, 'sg-12345678') >> new SecurityGroup(groupId: 'sg-12345678') } 0 * _ } @@ -404,7 +421,7 @@ class DeploymentControllerSpec extends Specification { ]) when: - controller.prepare("helloclay--test", true, "CreateJudgeAndCleanUp") + controller.prepare("helloclay--test", true, "CreateAndCleanUpPreviousAsg") then: response.status == 200 @@ -412,17 +429,12 @@ class DeploymentControllerSpec extends Specification { deploymentOptions: [ clusterName: "helloclay--test", notificationDestination: "jdoe@netflix.com", - delayDurationMinutes: 0, - doCanary: false, - canaryCapacity: 1, - canaryStartUpTimeoutMinutes: 30, - canaryJudgmentPeriodMinutes: 60, - scaleUp: "Ask", - desiredCapacityStartUpTimeoutMinutes: 40, - desiredCapacityJudgmentPeriodMinutes: 120, - disablePreviousAsg: "Ask", - fullTrafficJudgmentPeriodMinutes: 240, - deletePreviousAsg: "Ask" + steps:[ + [type: "CreateAsg"], + [type: "Resize", targetAsg: "Next", capacity: 3, startUpTimeoutMinutes: 40], + [type: "DisableAsg", targetAsg: "Previous"], + [type: "DeleteAsg", targetAsg: "Previous"] + ] ], lcOptions: [ launchConfigurationName: null, @@ -472,7 +484,7 @@ class DeploymentControllerSpec extends Specification { [id: "ELB", description: "Replace instances that fail ELB health check"] ], instanceTypes: [[id: null, price:""]], - securityGroups: [[id: "sg-1", name: "hcsg", selection: "sg-1", vpcId: "vpc1"]], + securityGroups: [[id: "sg-1", name: "hcsg", vpcId: "vpc1"]], images: [[id: "img123", imageLocation: "imgloc"]], keys: ["key1"], spotUrl: "spotUrl" @@ -493,6 +505,7 @@ class DeploymentControllerSpec extends Specification { 1 * getEffectiveSecurityGroups(_) >> [new SecurityGroup(groupId: "sg-1", groupName: "hcsg", vpcId: "vpc1")] 1 * getAvailabilityZones(_) >> [new AvailabilityZone(zoneName: "us-east-1")] 1 * getKeys(_) >> [new KeyPairInfo(keyName: "key1")] + 1 * getSecurityGroup(_, 'sg-12345678') >> new SecurityGroup(groupId: 'sg-12345678') } with(controller.awsLoadBalancerService) { 1 * getLoadBalancers(_) >> [new LoadBalancerDescription(loadBalancerName: "lb1", vPCId: "vpc1")] diff --git a/test/unit/com/netflix/asgard/DeploymentServiceUnitSpec.groovy b/test/unit/com/netflix/asgard/DeploymentServiceUnitSpec.groovy index c5c9b884..a8fee621 100644 --- a/test/unit/com/netflix/asgard/DeploymentServiceUnitSpec.groovy +++ b/test/unit/com/netflix/asgard/DeploymentServiceUnitSpec.groovy @@ -51,7 +51,7 @@ class DeploymentServiceUnitSpec extends Specification { Closure newDeployment = { int sequenceNumber -> new Deployment(sequenceNumber as String, null, null, null, null, null, new Date(sequenceNumber), - new Date(sequenceNumber), 'running', []) + new Date(sequenceNumber), 'running', [], []) } def setup() { @@ -174,7 +174,7 @@ class DeploymentServiceUnitSpec extends Specification { Deployment deployment = deploymentService.getRunningDeploymentForCluster('helloworld-example') then: - deployment == new Deployment('123', null, null, null, null, null, null, null, 'running', []) + deployment == new Deployment('123', null, null, null, null, null, null, null, 'running', [], []) 1 * awsSimpleWorkflowService.getOpenWorkflowExecutionForObjectLink(link) >> new WorkflowExecutionInfo( tagList: new SwfWorkflowTags(id: '123').constructTags()) } diff --git a/test/unit/com/netflix/asgard/ImageServiceLastReferencedTaggingSpec.groovy b/test/unit/com/netflix/asgard/ImageServiceLastReferencedTaggingSpec.groovy index 4cbe11f8..a1dbbb01 100644 --- a/test/unit/com/netflix/asgard/ImageServiceLastReferencedTaggingSpec.groovy +++ b/test/unit/com/netflix/asgard/ImageServiceLastReferencedTaggingSpec.groovy @@ -20,6 +20,7 @@ import com.amazonaws.services.ec2.model.Image import com.amazonaws.services.ec2.model.Instance import grails.converters.JSON +@SuppressWarnings("GroovyAssignabilityCheck") class ImageServiceLastReferencedTaggingSpec extends ImageServiceSpec { def 'should tag if image is referenced in test instance'() { diff --git a/test/unit/com/netflix/asgard/ImageServiceReplicateTagsSpec.groovy b/test/unit/com/netflix/asgard/ImageServiceReplicateTagsSpec.groovy index a715c7a8..c007ba5a 100644 --- a/test/unit/com/netflix/asgard/ImageServiceReplicateTagsSpec.groovy +++ b/test/unit/com/netflix/asgard/ImageServiceReplicateTagsSpec.groovy @@ -37,7 +37,7 @@ class ImageServiceReplicateTagsSpec extends ImageServiceSpec { imageService.runReplicateImageTags() then: - 1 * restClientService.post({ it =~ /\/image\/addTags/ }, expectedPostData) >> 200 + 2 * restClientService.post({ it =~ /\/image\/addTags/ }, expectedPostData) >> 200 } def 'should call separate updates for same key and different value'() { @@ -54,8 +54,8 @@ class ImageServiceReplicateTagsSpec extends ImageServiceSpec { imageService.runReplicateImageTags() then: - 1 * restClientService.post({ it =~ /\/image\/addTags/ }, expectedPostData) >> 200 - 1 * restClientService.post({ it =~ /\/image\/addTags/ }, expectedPostData2) >> 200 + 2 * restClientService.post({ it =~ /\/image\/addTags/ }, expectedPostData) >> 200 + 2 * restClientService.post({ it =~ /\/image\/addTags/ }, expectedPostData2) >> 200 } def 'should delete tags if missing from production'() { @@ -71,7 +71,7 @@ class ImageServiceReplicateTagsSpec extends ImageServiceSpec { imageService.runReplicateImageTags() then: - 1 * restClientService.post({ it =~ /\/image\/removeTags/ }, expectedPostData) >> 200 + 2 * restClientService.post({ it =~ /\/image\/removeTags/ }, expectedPostData) >> 200 } private setupReplicateTestAndProdImages(List testImages, List prodImages) { @@ -94,8 +94,8 @@ class ImageServiceReplicateTagsSpec extends ImageServiceSpec { } GPathResult prodImagesXml = XML.parse(sw.toString()) as GPathResult awsEc2Service.getAccountImages(UserContext.auto()) >> testImages - 1 * restClientService.getAsXml({ it =~ /\/us-east-1\/image\/list\.xml/ }) >> prodImagesXml - + restClientService.getAsXml({ it =~ /\/us-east-1\/image\/list\.xml/ }) >> prodImagesXml + configService.getPromotionTargetServerRootUrls() >> ['http://staging', 'http://prod'] restClientService.getAsText(_, _) >> InetAddress.getLocalHost().getHostName() restClientService.getResponseCode(_) >> 200 awsEc2Service.getAccountImages(_) >> [] diff --git a/test/unit/com/netflix/asgard/ImageServiceSpec.groovy b/test/unit/com/netflix/asgard/ImageServiceSpec.groovy index e690dbcc..236c2ada 100644 --- a/test/unit/com/netflix/asgard/ImageServiceSpec.groovy +++ b/test/unit/com/netflix/asgard/ImageServiceSpec.groovy @@ -48,6 +48,7 @@ abstract class ImageServiceSpec extends Specification { void setupLastReferencedDefaults() { awsEc2Service.getInstances(_) >> [] awsAutoScalingService.getLaunchConfigurations(_) >> [] + configService.getPromotionTargetServerRootUrls() >> ['http://prod'] restClientService.getAsJson({ it =~ /\/image\/used.json/ }) >> JSON.parse('[]') } diff --git a/test/unit/com/netflix/asgard/LaunchTemplateServiceSpec.groovy b/test/unit/com/netflix/asgard/LaunchTemplateServiceSpec.groovy index af4c2867..dfa059fa 100644 --- a/test/unit/com/netflix/asgard/LaunchTemplateServiceSpec.groovy +++ b/test/unit/com/netflix/asgard/LaunchTemplateServiceSpec.groovy @@ -23,6 +23,7 @@ import com.netflix.asgard.model.LaunchContext import com.netflix.asgard.model.MonitorBucketType import com.netflix.asgard.plugin.AdvancedUserDataProvider import com.netflix.asgard.plugin.UserDataProvider +import com.netflix.asgard.userdata.DefaultAdvancedUserDataProvider import spock.lang.Specification @SuppressWarnings("GroovyAssignabilityCheck") diff --git a/test/unit/com/netflix/asgard/RelationshipsSpec.groovy b/test/unit/com/netflix/asgard/RelationshipsSpec.groovy new file mode 100644 index 00000000..e7d7174a --- /dev/null +++ b/test/unit/com/netflix/asgard/RelationshipsSpec.groovy @@ -0,0 +1,641 @@ +/* + * Copyright 2012 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard + +import com.netflix.frigga.Names +import com.netflix.frigga.ami.AppVersion +import org.joda.time.DateTime +import spock.lang.Specification + +@SuppressWarnings("GroovyAccessibility") +class RelationshipsSpec extends Specification { + + void setup() { + new MonkeyPatcherService().createDynamicMethods() + } + + void 'should build next auto scaling group name'() { + expect: + Relationships.buildNextAutoScalingGroupName(oldGroup) == newGroup + + where: + oldGroup | newGroup + "discovery-dev" | "discovery-dev-v000" + "discovery-dev-v999" | "discovery-dev-v000" + "discovery-dev-v998" | "discovery-dev-v999" + "discovery-dev-v997" | "discovery-dev-v998" + "discovery-dev-v000" | "discovery-dev-v001" + "discovery-dev-v001" | "discovery-dev-v002" + "discovery-dev-v002" | "discovery-dev-v003" + "discovery-dev-v521" | "discovery-dev-v522" + } + + void 'should parse a compound name that contains a dot'() { + + when: + Names names = Relationships.dissectCompoundName("chukwa.collector_1-v889") + + then: + names.group == "chukwa.collector_1-v889" + names.cluster == "chukwa.collector_1" + names.app == "chukwa.collector_1" + names.stack == null + names.detail == null + names.push == "v889" + names.sequence == 889 + } + + void 'should fail to parse an invalid compound name'() { + + when: + Names names = Relationships.dissectCompoundName('nccp-moviecontrol%27') + + then: + names.group == null + names.cluster == null + names.app == null + names.stack == null + names.detail == null + names.push == null + names.sequence == null + } + + void 'should parse names of auto scaling groups'() { + + when: + Names names = Relationships.dissectCompoundName(name) + + then: + names.group == group + names.cluster == cluster + names.app == app + names.stack == stack + names.detail == detail + names.push == push + names.sequence == seq + + where: + name | group | cluster | app | stack | detail | push | seq + null | null | null | null | null | null | null | null + 'actor' | 'actor' | 'actor' | 'actor' | null | null | null | null + 'actor-v003' | 'actor-v003' | 'actor' | 'actor' | null | null | 'v003' | 3 + 'actor--v003' | 'actor--v003' | 'actor-' | 'actor' | null | null | 'v003' | 3 + 'actor---v003' | 'actor---v003' | 'actor--' | 'actor' | null | null | 'v003' | 3 + 'api-test-A' | 'api-test-A' | 'api-test-A' | 'api' | 'test' | 'A' | null | null + 'api-test-A-v406' | 'api-test-A-v406' | 'api-test-A' | 'api' | 'test' | 'A' | 'v406' | 406 + 'api-test101' | 'api-test101' | 'api-test101' | 'api' | 'test101' | null | null | null + 'chip_1' | 'chip_1' | 'chip_1' | 'chip_1' | null | null | null | null + 'chip_1-v889' | 'chip_1-v889' | 'chip_1' | 'chip_1' | null | null | 'v889' | 889 + 'disc-dev' | 'disc-dev' | 'disc-dev' | 'disc' | 'dev' | null | null | null + 'disc-us-e-1d' | 'disc-us-e-1d' | 'disc-us-e-1d' | 'disc' | 'us' | 'e-1d' | null | null + 'disc-us-e-1d-0' | 'disc-us-e-1d-0' | 'disc-us-e-1d-0' | 'disc' | 'us' | 'e-1d-0' | null | null + 'd-us-e-1-0-v223' | 'd-us-e-1-0-v223' | 'd-us-e-1-0' | 'd' | 'us' | 'e-1-0' | 'v223' | 223 + } + + void 'should parse names of auto scaling groups with labeled variables'() { + + when: + Names names = Relationships.dissectCompoundName("actiondrainer") + + then: + "actiondrainer" == names.group + "actiondrainer" == names.cluster + "actiondrainer" == names.app + null == names.stack + null == names.detail + null == names.push + null == names.sequence + null == names.countries + null == names.devPhase + null == names.hardware + null == names.partners + null == names.revision + null == names.usedBy + null == names.redBlackSwap + null == names.zone + + when: + names = Relationships.dissectCompoundName( + 'cass-nccpint-random-junk-c0america-d0prod-h0xbox-p0vizio-r027-u0nccp-w0A-z0useast1a-v003') + + then: + 'cass-nccpint-random-junk-c0america-d0prod-h0xbox-p0vizio-r027-u0nccp-w0A-z0useast1a-v003' == names.group + 'cass-nccpint-random-junk-c0america-d0prod-h0xbox-p0vizio-r027-u0nccp-w0A-z0useast1a' == names.cluster + 'cass' == names.app + 'nccpint' == names.stack + 'random-junk' == names.detail + 'v003' == names.push + 3 == names.sequence + 'america' == names.countries + 'prod' == names.devPhase + 'xbox' == names.hardware + 'vizio' == names.partners + '27' == names.revision + 'nccp' == names.usedBy + 'A' == names.redBlackSwap + 'useast1a' == names.zone + + when: + names = Relationships.dissectCompoundName('cass-nccpintegration-c0northamerica-d0prod') + + then: + names.group == 'cass-nccpintegration-c0northamerica-d0prod' + names.cluster == 'cass-nccpintegration-c0northamerica-d0prod' + names.app == 'cass' + names.stack == 'nccpintegration' + names.detail == null + names.push == null + names.sequence == null + names.countries == 'northamerica' + names.devPhase == 'prod' + names.hardware == null + names.partners == null + names.revision == null + names.usedBy == null + names.redBlackSwap == null + names.zone == null + + when: + names = Relationships.dissectCompoundName('cass--my-stuff-c0northamerica-d0prod') + + then: + names.group == 'cass--my-stuff-c0northamerica-d0prod' + names.cluster == 'cass--my-stuff-c0northamerica-d0prod' + names.app == 'cass' + names.stack == null + names.detail == 'my-stuff' + names.push == null + names.sequence == null + names.countries == 'northamerica' + names.devPhase == 'prod' + names.hardware == null + names.partners == null + names.revision == null + names.usedBy == null + names.redBlackSwap == null + names.zone == null + + when: + names = Relationships.dissectCompoundName('cass-c0northamerica-d0prod') + + then: + names.group == 'cass-c0northamerica-d0prod' + names.cluster == 'cass-c0northamerica-d0prod' + names.app == 'cass' + names.stack == null + names.detail == null + names.push == null + names.sequence == null + names.countries == 'northamerica' + names.devPhase == 'prod' + names.hardware == null + names.partners == null + names.revision == null + names.usedBy == null + names.redBlackSwap == null + names.zone == null + + when: + names = Relationships.dissectCompoundName('cass-c0northamerica-d0prod-v102') + + then: + names.group == 'cass-c0northamerica-d0prod-v102' + names.cluster == 'cass-c0northamerica-d0prod' + names.app == 'cass' + names.stack == null + names.detail == null + names.push == 'v102' + names.sequence == 102 + names.countries == 'northamerica' + names.devPhase == 'prod' + names.hardware == null + names.partners == null + names.revision == null + names.usedBy == null + names.redBlackSwap == null + names.zone == null + + when: + names = Relationships.dissectCompoundName('cass-v102') + + then: + names.group == 'cass-v102' + names.cluster == 'cass' + names.app == 'cass' + names.stack == null + names.detail == null + names.push == 'v102' + names.sequence == 102 + names.countries == null + names.devPhase == null + names.hardware == null + names.partners == null + names.revision == null + names.usedBy == null + names.redBlackSwap == null + names.zone == null + } + + void 'should parse appversion string'() { + + when: + AppVersion appVersion = Relationships.dissectAppVersion(appversion) + + then: + appVersion.packageName == pack + appVersion.version == ver + appVersion.commit == commit + appVersion.buildNumber == buildNum + appVersion.buildJobName == job + + where: + appversion | pack | ver | commit | buildNum | job + "hello-1.0.0-592112" | "hello" | "1.0.0" | "592112" | null | null + "hello-1.0.0-592112.h154" | "hello" | "1.0.0" | "592112" | "154" | null + "hello-int-1.0.0-592112.h154/WE-WAPP-hello/154" | "hello-int" | "1.0.0" | "592112" | "154" | "WE-WAPP-hello" + "hello-1.0.0-592112.h154/WE-WAPP-hello/154" | "hello" | "1.0.0" | "592112" | "154" | "WE-WAPP-hello" + } + + void 'should fail to parse invalid appversion string'() { + expect: + Relationships.dissectAppVersion(appversion) == null + + where: + appversion << [null, '', 'blah', 'blah blah blah'] + } + + void 'should extract package name from appversion string'() { + expect: + Relationships.packageFromAppVersion(appversion) == pack + + where: + appversion | pack + 'dfjsdfkjsdjf sd' | null + '' | null + null | null + 'helloworld-1.0.0-592112.h154' | 'helloworld' + 'helloworld-1.0.0-592112.h154/WE-WAPP-helloworld/154' | 'helloworld' + } + + void 'should extract app name from group name'() { + expect: + Relationships.appNameFromGroupName(group) == app + + where: + group | app + "actiondrainer" | "actiondrainer" + "merchweb--loadtest" | "merchweb" + "merchweb-loadtest" | "merchweb" + "discovery-us-east-1d" | "discovery" + "discovery--us-east-1d" | "discovery" + "api-test-A" | "api" + "evcache-us-east-1d-0" | "evcache" + "evcache-us----east-1d-0" | "evcache" + "videometadata-navigator-integration-240-CAN" | "videometadata" + } + + void testAppNameFromLaunchConfigName() { + expect: + Relationships.appNameFromLaunchConfigName("actiondrainer-201010231745") == "actiondrainer" + + where: + launch | app + "actiondrainer-201010231745" | "actiondrainer" + "merchweb--loadtest-201010231745" | "merchweb" + "discovery--us-east-1d-201010231745" | "discovery" + "merchweb-loadtest-201010231745" | "merchweb" + "api-test-A-201010231745" | "api" + "discovery-dev-201010231745" | "discovery" + "discovery-us-east-1d-201010231745" | "discovery" + "evcache-us-east-1d-0-201010231745" | "evcache" + "evcache-us----east-1d-0-201010231745" | "evcache" + "videometadata-navigator-integration-240-CAN-201010231745" | "videometadata" + } + + void testAppNameFromLoadBalancerName() { + expect: + Relationships.appNameFromLoadBalancerName(loadBal) == app + + where: + app | loadBal + "actiondrainer" | "actiondrainer-frontend" + "merchweb" | "merchweb--loadtest-frontend" + "discovery" | "discovery--us-east-1d-frontend" + "merchweb" | "merchweb-loadtest-frontend" + "api" | "api-test-A-frontend" + "discovery" | "discovery-dev-frontend" + "discovery" | "discovery-us-east-1d-frontend" + "evcache" | "evcache-us-east-1d-0-frontend" + "evcache" | "evcache-us----east-1d-0-frontend" + "videometadata" | "videometadata-navigator-integration-240-CAN-frontend" + } + + void 'should extract stack name from group name'() { + expect: + Relationships.stackNameFromGroupName(group) == stack + + where: + stack | group + "" | "actiondrainer" + "" | "merchweb--loadtest" + "" | "discovery--us-east-1d" + "test" | "api-test-A" + "dev" | "discovery-dev" + "us" | "discovery-us-east-1d" + "us" | "evcache-us-east-1d-0" + "us" | "evcache-us----east-1d-0" + "navigator" | "videometadata-navigator-integration-240-CAN" + } + + void 'should extract cluster from group name'() { + expect: + Relationships.clusterFromGroupName(group) == cluster + + where: + cluster | group + "actiondrainer" | "actiondrainer" + "actiondrainer" | "actiondrainer-v301" + "merchweb--loadtest" | "merchweb--loadtest" + "discovery--us-east-1d-v" | "discovery--us-east-1d-v" + "discovery--us-east-1d-v1" | "discovery--us-east-1d-v1" + "discovery--us-east-1d-v11" | "discovery--us-east-1d-v11" + "discovery--us-east-1d" | "discovery--us-east-1d-v111" + "discovery--us-east-1d-v1111" | "discovery--us-east-1d-v1111" + "merchweb-loadtest" | "merchweb-loadtest" + "api-test-A" | "api-test-A" + "evcache-us-east-1d-0" | "evcache-us-east-1d-0" + "evcache-us----east-1d-0" | "evcache-us----east-1d-0" + "videometadata-navigator-integration-240-CAN" | "videometadata-navigator-integration-240-CAN" + } + + @SuppressWarnings("GroovyAssignabilityCheck") + void 'should check whether a name contains a reserved format'() { + expect: + Relationships.usesReservedFormat(groupName) == result + + where: + result | groupName + false | "abha" + true | "abha-v999" + false | "abha-v9999999" + true | "integration-240-usa-iphone-v001" + false | "integration-240-usa-iphone-v22" + true | 'cass-nccpint-random-junk-c0northamerica-d0prod-h0gamesystems-p0vizio-r027-u0nccp-x0A-z0useast1a-v003' + true | 'c0northamerica' + true | 'junk-c0northamerica' + true | 'random-c0northamerica-junk' + false | 'random-abc0northamerica-junk' + } + + void "should check whether a name follows the rules for a strict name"() { + expect: + Relationships.checkStrictName(name) == result + + where: + name | result + "abha" | true + "account_batch" | false + "account.batch" | false + "" | false + null | false + } + + void "should check whether an app name is suitable for use in load balancer names"() { + expect: + Relationships.checkAppNameForLoadBalancer(app) == result + + where: + app | result + "abha" | true + "account_batch" | false + "account.batch" | false + "account#batch" | false + "" | false + null | false + "abhav309" | false + "abhav309787" | true + "v309" | false + "v3111111" | true + } + + void "should check if a name is okay to use for an application"() { + expect: + Relationships.checkName(name) == result + + where: + name | result + "abha" | true + "account_batch" | true + "account.batch" | true + "account#batch" | false + "" | false + null | false + } + + @SuppressWarnings("GroovyAssignabilityCheck") + void "should check whether a details string is valid"() { + expect: + Relationships.checkDetail(detail) == result + + where: + result | detail + true | "A" + true | "0" + true | "east-1c-0" + true | "230CAN-next-A" + true | "integration-240-USA" + true | "integration-240-usa-iphone-ipad-ios5-even-numbered-days-not-weekends" + true | "----" + true | "__._._--_.." + false | "230CAN#next-A" + false | "" + false | null + } + + void "should build auto scaling group name from basic parts"() { + expect: + Relationships.buildGroupName([appName: app, stack: stack, detail: detail]) == group + + where: + app | stack | detail | group + "helloworld" | "asgardtest" | null | "helloworld-asgardtest" + "helloworld" | "asgardtest" | "" | "helloworld-asgardtest" + "helloworld" | "asgardtest" | "2" | "helloworld-asgardtest-2" + "helloworld" | "" | "" | "helloworld" + "helloworld" | null | null | "helloworld" + "discovery" | "us" | "east-1d" | "discovery-us-east-1d" + "discovery" | "" | "us-east-1d" | "discovery--us-east-1d" + "discovery" | null | "us-east-1d" | "discovery--us-east-1d" + "merchweb" | "" | "loadtest" | "merchweb--loadtest" + "merchweb" | null | "loadtest" | "merchweb--loadtest" + "merchweb" | null | "loadtest" | "merchweb--loadtest" + } + + void "should build auto scaling group from many parts including labeled properties"() { + + expect: + Relationships.buildGroupName(appName: "cass", stack: "nccpint", detail: "random-junk", + countries: "northamerica", devPhase: "prod", hardware: "gamesystems", partners: "vizio", revision: "27", + usedBy: "nccp", redBlackSwap: "A", zoneVar: "useast1a" + ) == 'cass-nccpint-random-junk-c0northamerica-d0prod-h0gamesystems-p0vizio-r027-u0nccp-w0A-z0useast1a' + + Relationships.buildGroupName(appName: "cass", stack: "", detail: "random-junk", countries: null, devPhase: "", + hardware: "gamesystems", partners: "", redBlackSwap: "A" + ) == 'cass--random-junk-h0gamesystems-w0A' + + Relationships.buildGroupName(appName: "cass", stack: null, detail: null, devPhase: "", hardware: "gamesystems", + partners: "", redBlackSwap: "A" + ) == 'cass-h0gamesystems-w0A' + } + + void "should fail to build an auto scaling group name based on invalid parts"() { + + when: + Relationships.buildGroupName([appName: app, stack: "asgardtest", detail: "2"]) + + then: + thrown(exception) + + where: + app | exception + "" | IllegalArgumentException + null | NullPointerException + } + + void "should build launch configuration name"() { + expect: + Relationships.buildLaunchConfigurationName(group) ==~ ~launch + + where: + group | launch + "helloworld" | /helloworld-[0-9]{14}/ + "integration-240-usa-iphone" | /integration-240-usa-iphone-[0-9]{14}/ + } + + void "should build load balancer name from parts"() { + expect: + Relationships.buildLoadBalancerName(app, stack, detail) == loadBal + + where: + app | stack | detail | loadBal + "helloworld" | "asgardtest" | null | "helloworld-asgardtest" + "helloworld" | "asgardtest" | "" | "helloworld-asgardtest" + "helloworld" | "asgardtest" | "frontend" | "helloworld-asgardtest-frontend" + "helloworld" | "" | "" | "helloworld" + "helloworld" | null | null | "helloworld" + "discovery" | "us" | "east-1d" | "discovery-us-east-1d" + "discovery" | "" | "frontend" | "discovery--frontend" + "discovery" | null | "us-east-1d" | "discovery--us-east-1d" + "merchweb" | "" | "frontend" | "merchweb--frontend" + "merchweb" | null | "frontend" | "merchweb--frontend" + } + + void "should fail to build load balancer name from invalid parts"() { + when: + Relationships.buildLoadBalancerName(app, "asgardtest", "frontend") + + then: + thrown(exception) + + where: + app | exception + "" | IllegalArgumentException + null | NullPointerException + } + + void "should parse base AMI ID from AMI description"() { + expect: + Relationships.baseAmiIdFromDescription(desc) == baseAmiId + + where: + baseAmiId | desc + null | '' + null | null + 'ami-50886239' | 'base_ami_id=ami-50886239,base_ami_name=servicenet-roku-qadd.dc.81210.10.44' + 'ami-1eb75c77' | 'base_ami_id=ami-1eb75c77,base_ami_name=servicenet-roku-qadd.dc.81210.10.44' + 'ami-1eb75c77' | 'base_ami_name=servicenet-roku-qadd.dc.81210.10.44,base_ami_id=ami-1eb75c77' + 'ami-7b4eb912' | 'store=ebs,ancestor_name=ebs-centosbase-x86_64-20101124,ancestor_id=ami-7b4eb912' + } + + void "should parse base AMI name from AMI description"() { + expect: + Relationships.baseAmiNameFromDescription(desc) == baseAmiName + + where: + baseAmiName | desc + 'servicenet-roku-qadd.dc.81210' | 'base_ami_id=ami-50886239,base_ami_name=servicenet-roku-qadd.dc.81210' + 'servicenet-roku-qadd.dc.81210' | 'base_ami_id=ami-1eb75c77,base_ami_name=servicenet-roku-qadd.dc.81210' + 'servicenet-roku-qadd.dc.81210' | 'base_ami_name=servicenet-roku-qadd.dc.81210,base_ami_id=ami-1eb75c77' + 'ebs-centosbase-x86_64-2010' | 'store=ebs,ancestor_name=ebs-centosbase-x86_64-2010,ancestor_id=ami-7b4eb912' + } + + void "should parse base AMI date from description"() { + expect: + Relationships.baseAmiDateFromDescription(desc) == dateTime + + where: + desc | dateTime + 'base_ami_id=ami-50886239,base_ami_name=servicenet-roku-qadd.dc.81210.10.44' | null + 'base_ami_id=ami-1eb75c77,base_ami_name=servicenet-roku-qadd.dc.81210.10.44' | null + 'base_ami_name=servicenet-roku-qadd.dc.81210.10.44,base_ami_id=ami-1eb75c77' | null + 'store=ebs,ancestor_name=centos-x86_64-20101124,ancestor_id=ami-7b4eb912' | new DateTime(2010, 11, 24, 0, 0) + 'ancestor_name=centos-x86_64-20101124,ancestor_id=ami-7b4eb912' | new DateTime(2010, 11, 24, 0, 0) + 'ancestor_id=ami-7b4eb912,ancestor_name=centos-x86_64-20101124' | new DateTime(2010, 11, 24, 0, 0) + 'store=ebs,ancestor_name=centos-x86_64-20101124' | new DateTime(2010, 11, 24, 0, 0) + } + + void "should build an alarm name for a scaling policy based on an auto scaling group name"() { + expect: + Relationships.buildAlarmName('helloworld--test-v000', '99999') == 'helloworld--test-v000-99999' + } + + void "should build a scaling policy name based on an auto scaling group name"() { + expect: + Relationships.buildScalingPolicyName('helloworld--test-v000', '99999') == 'helloworld--test-v000-99999' + } + + void "should create labeled environment variables"() { + + when: + Names names = new Names('test') + names.partners = 'sony' + names.devPhase = 'stage' + List envVars = Relationships.labeledEnvironmentVariables(names, 'NETFLIX_') + + then: + envVars == ['export NETFLIX_DEV_PHASE=stage', 'export NETFLIX_PARTNERS=sony'] + } + + void "should create labeled environment variables map"() { + + when: + Names names = new Names('test') + names.partners = 'sony' + names.devPhase = 'stage' + Map envVars = Relationships.labeledEnvVarsMap(names, 'NETFLIX_') + + then: + envVars == ['NETFLIX_DEV_PHASE': 'stage', 'NETFLIX_PARTNERS': 'sony'] + } + + void "should show pretty-formatted map keys for labeled variables"() { + when: + Names names = new Names('test-p0sony-d0stage') + + then: + Relationships.parts(names) == ['Dev Phase': 'stage', 'Partners': 'sony'] + } +} diff --git a/test/unit/com/netflix/asgard/RelationshipsTests.groovy b/test/unit/com/netflix/asgard/RelationshipsTests.groovy deleted file mode 100644 index 3850d746..00000000 --- a/test/unit/com/netflix/asgard/RelationshipsTests.groovy +++ /dev/null @@ -1,663 +0,0 @@ -/* - * Copyright 2012 Netflix, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.netflix.asgard - -import com.netflix.frigga.Names -import com.netflix.frigga.ami.AppVersion -import grails.test.GrailsUnitTestCase -import org.joda.time.DateTime - -@SuppressWarnings("GroovyAccessibility") -class RelationshipsTests extends GrailsUnitTestCase { - - void setUp() { - new MonkeyPatcherService().createDynamicMethods() - } - - void testBuildNextAutoScalingGroupName() { - assert "discovery-dev-v000" == Relationships.buildNextAutoScalingGroupName("discovery-dev") - assert "discovery-dev-v000" == Relationships.buildNextAutoScalingGroupName("discovery-dev-v999") - assert "discovery-dev-v999" == Relationships.buildNextAutoScalingGroupName("discovery-dev-v998") - assert "discovery-dev-v998" == Relationships.buildNextAutoScalingGroupName("discovery-dev-v997") - assert "discovery-dev-v001" == Relationships.buildNextAutoScalingGroupName("discovery-dev-v000") - assert "discovery-dev-v002" == Relationships.buildNextAutoScalingGroupName("discovery-dev-v001") - assert "discovery-dev-v003" == Relationships.buildNextAutoScalingGroupName("discovery-dev-v002") - assert "discovery-dev-v522" == Relationships.buildNextAutoScalingGroupName("discovery-dev-v521") - } - - void testDissectGroupNameWithDot() { - - Names names = Relationships.dissectCompoundName("chukwa.collector_1-v889") - assert "chukwa.collector_1-v889" == names.group - assert "chukwa.collector_1" == names.cluster - assert "chukwa.collector_1" == names.app - assert null == names.stack - assert null == names.detail - assert "v889" == names.push - assert 889 == names.sequence - } - - void testDissectGroupNameInvalid() { - - Names names = Relationships.dissectCompoundName('nccp-moviecontrol%27') - assert null == names.group - assert null == names.cluster - assert null == names.app - assert null == names.stack - assert null == names.detail - assert null == names.push - assert null == names.sequence - } - - void testDissectGroupName() { - - Names names = Relationships.dissectCompoundName(null) - assert null == names.group - assert null == names.cluster - assert null == names.app - assert null == names.stack - assert null == names.detail - assert null == names.push - assert null == names.sequence - - names = Relationships.dissectCompoundName("actiondrainer") - assert "actiondrainer" == names.group - assert "actiondrainer" == names.cluster - assert "actiondrainer" == names.app - assert null == names.stack - assert null == names.detail - assert null == names.push - assert null == names.sequence - - names = Relationships.dissectCompoundName("actiondrainer-v003") - assert "actiondrainer-v003" == names.group - assert "actiondrainer" == names.cluster - assert "actiondrainer" == names.app - assert null == names.stack - assert null == names.detail - assert "v003" == names.push - assert 3 == names.sequence - - names = Relationships.dissectCompoundName("actiondrainer--v003") - assert "actiondrainer--v003" == names.group - assert "actiondrainer-" == names.cluster - assert "actiondrainer" == names.app - assert null == names.stack - assert null == names.detail - assert "v003" == names.push - assert 3 == names.sequence - - names = Relationships.dissectCompoundName("actiondrainer---v003") - assert "actiondrainer---v003" == names.group - assert "actiondrainer--" == names.cluster - assert "actiondrainer" == names.app - assert null == names.stack - assert null == names.detail - assert "v003" == names.push - assert 3 == names.sequence - - names = Relationships.dissectCompoundName("api-test-A") - assert "api-test-A" == names.group - assert "api-test-A" == names.cluster - assert "api" == names.app - assert "test" == names.stack - assert "A" == names.detail - assert null == names.push - - names = Relationships.dissectCompoundName("api-test-A-v406") - assert "api-test-A-v406" == names.group - assert "api-test-A" == names.cluster - assert "api" == names.app - assert "test" == names.stack - assert "A" == names.detail - assert "v406" == names.push - assert 406 == names.sequence - - names = Relationships.dissectCompoundName("api-test101") - assert "api-test101" == names.group - assert "api-test101" == names.cluster - assert "api" == names.app - assert "test101" == names.stack - assert null == names.detail - assert null == names.push - assert null == names.sequence - - names = Relationships.dissectCompoundName("chukwacollector_1") - assert "chukwacollector_1" == names.group - assert "chukwacollector_1" == names.cluster - assert "chukwacollector_1" == names.app - assert null == names.stack - assert null == names.detail - assert null == names.push - assert null == names.sequence - - names = Relationships.dissectCompoundName("chukwacollector_1-v889") - assert "chukwacollector_1-v889" == names.group - assert "chukwacollector_1" == names.cluster - assert "chukwacollector_1" == names.app - assert null == names.stack - assert null == names.detail - assert "v889" == names.push - assert 889 == names.sequence - - names = Relationships.dissectCompoundName("api-test-A") - assert "api-test-A" == names.group - assert "api-test-A" == names.cluster - assert "api" == names.app - assert "test" == names.stack - assert "A" == names.detail - assert null == names.push - assert null == names.sequence - - names = Relationships.dissectCompoundName("discovery-dev") - assert "discovery-dev" == names.group - assert "discovery-dev" == names.cluster - assert "discovery" == names.app - assert "dev" == names.stack - assert null == names.detail - assert null == names.push - assert null == names.sequence - - names = Relationships.dissectCompoundName("discovery-us-east-1d") - assert "discovery-us-east-1d" == names.group - assert "discovery-us-east-1d" == names.cluster - assert "discovery" == names.app - assert "us" == names.stack - assert "east-1d" == names.detail - assert null == names.push - assert null == names.sequence - - names = Relationships.dissectCompoundName("evcache-us-east-1d-0") - assert "evcache-us-east-1d-0" == names.group - assert "evcache-us-east-1d-0" == names.cluster - assert "evcache" == names.app - assert "us" == names.stack - assert "east-1d-0" == names.detail - assert null == names.push - assert null == names.sequence - - names = Relationships.dissectCompoundName("evcache-us-east-1d-0-v223") - assert "evcache-us-east-1d-0-v223" == names.group - assert "evcache-us-east-1d-0" == names.cluster - assert "evcache" == names.app - assert "us" == names.stack - assert "east-1d-0" == names.detail - assert "v223" == names.push - assert 223 == names.sequence - - names = Relationships.dissectCompoundName("videometadata-navigator-integration-240-CAN") - assert "videometadata-navigator-integration-240-CAN" == names.group - assert "videometadata-navigator-integration-240-CAN" == names.cluster - assert "videometadata" == names.app - assert "navigator" == names.stack - assert "integration-240-CAN" == names.detail - assert null == names.push - assert null == names.sequence - } - - void testDissectGroupNameWithLabeledVariables() { - - Names names = Relationships.dissectCompoundName("actiondrainer") - assert "actiondrainer" == names.group - assert "actiondrainer" == names.cluster - assert "actiondrainer" == names.app - assert null == names.stack - assert null == names.detail - assert null == names.push - assert null == names.sequence - assert null == names.countries - assert null == names.devPhase - assert null == names.hardware - assert null == names.partners - assert null == names.revision - assert null == names.usedBy - assert null == names.redBlackSwap - assert null == names.zone - - names = Relationships.dissectCompoundName( - 'cass-nccpint-random-junk-c0america-d0prod-h0xbox-p0vizio-r027-u0nccp-w0A-z0useast1a-v003') - assert 'cass-nccpint-random-junk-c0america-d0prod-h0xbox-p0vizio-r027-u0nccp-w0A-z0useast1a-v003' == names.group - assert 'cass-nccpint-random-junk-c0america-d0prod-h0xbox-p0vizio-r027-u0nccp-w0A-z0useast1a' == names.cluster - assert 'cass' == names.app - assert 'nccpint' == names.stack - assert 'random-junk' == names.detail - assert 'v003' == names.push - assert 3 == names.sequence - assert 'america' == names.countries - assert 'prod' == names.devPhase - assert 'xbox' == names.hardware - assert 'vizio' == names.partners - assert '27' == names.revision - assert 'nccp' == names.usedBy - assert 'A' == names.redBlackSwap - assert 'useast1a' == names.zone - - names = Relationships.dissectCompoundName('cass-nccpintegration-c0northamerica-d0prod') - assert 'cass-nccpintegration-c0northamerica-d0prod' == names.group - assert 'cass-nccpintegration-c0northamerica-d0prod' == names.cluster - assert 'cass' == names.app - assert 'nccpintegration' == names.stack - assert null == names.detail - assert null == names.push - assert null == names.sequence - assert 'northamerica' == names.countries - assert 'prod' == names.devPhase - assert null == names.hardware - assert null == names.partners - assert null == names.revision - assert null == names.usedBy - assert null == names.redBlackSwap - assert null == names.zone - - names = Relationships.dissectCompoundName('cass--my-stuff-c0northamerica-d0prod') - assert 'cass--my-stuff-c0northamerica-d0prod' == names.group - assert 'cass--my-stuff-c0northamerica-d0prod' == names.cluster - assert 'cass' == names.app - assert null == names.stack - assert 'my-stuff' == names.detail - assert null == names.push - assert null == names.sequence - assert 'northamerica' == names.countries - assert 'prod' == names.devPhase - assert null == names.hardware - assert null == names.partners - assert null == names.revision - assert null == names.usedBy - assert null == names.redBlackSwap - assert null == names.zone - - names = Relationships.dissectCompoundName('cass-c0northamerica-d0prod') - assert 'cass-c0northamerica-d0prod' == names.group - assert 'cass-c0northamerica-d0prod' == names.cluster - assert 'cass' == names.app - assert null == names.stack - assert null == names.detail - assert null == names.push - assert null == names.sequence - assert 'northamerica' == names.countries - assert 'prod' == names.devPhase - assert null == names.hardware - assert null == names.partners - assert null == names.revision - assert null == names.usedBy - assert null == names.redBlackSwap - assert null == names.zone - - names = Relationships.dissectCompoundName('cass-c0northamerica-d0prod-v102') - assert 'cass-c0northamerica-d0prod-v102' == names.group - assert 'cass-c0northamerica-d0prod' == names.cluster - assert 'cass' == names.app - assert null == names.stack - assert null == names.detail - assert 'v102' == names.push - assert 102 == names.sequence - assert 'northamerica' == names.countries - assert 'prod' == names.devPhase - assert null == names.hardware - assert null == names.partners - assert null == names.revision - assert null == names.usedBy - assert null == names.redBlackSwap - assert null == names.zone - - names = Relationships.dissectCompoundName('cass-v102') - assert 'cass-v102' == names.group - assert 'cass' == names.cluster - assert 'cass' == names.app - assert null == names.stack - assert null == names.detail - assert 'v102' == names.push - assert 102 == names.sequence - assert null == names.countries - assert null == names.devPhase - assert null == names.hardware - assert null == names.partners - assert null == names.revision - assert null == names.usedBy - assert null == names.redBlackSwap - assert null == names.zone - } - - void testDissectAppVersion() { - - AppVersion appVersion = Relationships.dissectAppVersion("helloworld-1.0.0-592112.h154/WE-WAPP-helloworld/154") - assert "helloworld" == appVersion.packageName - assert "1.0.0" == appVersion.version - assert "592112" == appVersion.commit - assert "154" == appVersion.buildNumber - assert "WE-WAPP-helloworld" == appVersion.buildJobName - - appVersion = Relationships.dissectAppVersion("helloworld-server-1.0.0-592112.h154/WE-WAPP-helloworld/154") - assert "helloworld-server" == appVersion.packageName - assert "1.0.0" == appVersion.version - assert "592112" == appVersion.commit - assert "154" == appVersion.buildNumber - assert "WE-WAPP-helloworld" == appVersion.buildJobName - - appVersion = Relationships.dissectAppVersion("helloworld-1.0.0-592112.h154") - assert "helloworld" == appVersion.packageName - assert "1.0.0" == appVersion.version - assert "592112" == appVersion.commit - assert "154" == appVersion.buildNumber - assertNull appVersion.buildJobName - - appVersion = Relationships.dissectAppVersion("helloworld-1.0.0-592112") - assert "helloworld" == appVersion.packageName - assert "1.0.0" == appVersion.version - assert "592112" == appVersion.commit - assertNull appVersion.buildNumber - assertNull appVersion.buildJobName - - assertNull Relationships.dissectAppVersion(null) - assertNull Relationships.dissectAppVersion("") - assertNull Relationships.dissectAppVersion("blah blah blah") - } - - void testPackageFromAppVersion() { - assert 'helloworld' == Relationships.packageFromAppVersion( - 'helloworld-1.0.0-592112.h154/WE-WAPP-helloworld/154') - assert null == Relationships.packageFromAppVersion(null) - assert null == Relationships.packageFromAppVersion('') - assert null == Relationships.packageFromAppVersion('dfjsdfkjsdfkjsd fkjsdf kljsdf ksjdf klsdjf sd') - } - - void testAppNameFromGroupName() { - assert "actiondrainer" == Relationships.appNameFromGroupName("actiondrainer") - assert "merchweb" == Relationships.appNameFromGroupName("merchweb--loadtest") - assert "discovery" == Relationships.appNameFromGroupName("discovery--us-east-1d") - assert "merchweb" == Relationships.appNameFromGroupName("merchweb-loadtest") - assert "api" == Relationships.appNameFromGroupName("api-test-A") - assert "discovery" == Relationships.appNameFromGroupName("discovery-dev") - assert "discovery" == Relationships.appNameFromGroupName("discovery-us-east-1d") - assert "evcache" == Relationships.appNameFromGroupName("evcache-us-east-1d-0") - assert "evcache" == Relationships.appNameFromGroupName("evcache-us----east-1d-0") - assert "videometadata" == Relationships.appNameFromGroupName("videometadata-navigator-integration-240-CAN") - } - - void testAppNameFromLaunchConfigName() { - assert "actiondrainer" == Relationships.appNameFromLaunchConfigName("actiondrainer-201010231745") - assert "merchweb" == Relationships.appNameFromLaunchConfigName("merchweb--loadtest-201010231745") - assert "discovery" == Relationships.appNameFromLaunchConfigName("discovery--us-east-1d-201010231745") - assert "merchweb" == Relationships.appNameFromLaunchConfigName("merchweb-loadtest-201010231745") - assert "api" == Relationships.appNameFromLaunchConfigName("api-test-A-201010231745") - assert "discovery" == Relationships.appNameFromLaunchConfigName("discovery-dev-201010231745") - assert "discovery" == Relationships.appNameFromLaunchConfigName("discovery-us-east-1d-201010231745") - assert "evcache" == Relationships.appNameFromLaunchConfigName("evcache-us-east-1d-0-201010231745") - assert "evcache" == Relationships.appNameFromLaunchConfigName("evcache-us----east-1d-0-201010231745") - assert "videometadata" == Relationships.appNameFromLaunchConfigName( - "videometadata-navigator-integration-240-CAN-201010231745") - } - - void testAppNameFromLoadBalancerName() { - assert "actiondrainer" == Relationships.appNameFromLoadBalancerName("actiondrainer-frontend") - assert "merchweb" == Relationships.appNameFromLoadBalancerName("merchweb--loadtest-frontend") - assert "discovery" == Relationships.appNameFromLoadBalancerName("discovery--us-east-1d-frontend") - assert "merchweb" == Relationships.appNameFromLoadBalancerName("merchweb-loadtest-frontend") - assert "api" == Relationships.appNameFromLoadBalancerName("api-test-A-frontend") - assert "discovery" == Relationships.appNameFromLoadBalancerName("discovery-dev-frontend") - assert "discovery" == Relationships.appNameFromLoadBalancerName("discovery-us-east-1d-frontend") - assert "evcache" == Relationships.appNameFromLoadBalancerName("evcache-us-east-1d-0-frontend") - assert "evcache" == Relationships.appNameFromLoadBalancerName("evcache-us----east-1d-0-frontend") - assert "videometadata" == Relationships.appNameFromLoadBalancerName( - "videometadata-navigator-integration-240-CAN-frontend") - } - - void testStackNameFromGroupName() { - assert "" == Relationships.stackNameFromGroupName("actiondrainer") - assert "" == Relationships.stackNameFromGroupName("merchweb--loadtest") - assert "" == Relationships.stackNameFromGroupName("discovery--us-east-1d") - assert "loadtest" == Relationships.stackNameFromGroupName("merchweb-loadtest") - assert "test" == Relationships.stackNameFromGroupName("api-test-A") - assert "dev" == Relationships.stackNameFromGroupName("discovery-dev") - assert "us" == Relationships.stackNameFromGroupName("discovery-us-east-1d") - assert "us" == Relationships.stackNameFromGroupName("evcache-us-east-1d-0") - assert "us" == Relationships.stackNameFromGroupName("evcache-us----east-1d-0") - assert "navigator" == Relationships.stackNameFromGroupName("videometadata-navigator-integration-240-CAN") - } - - void testClusterFromGroupName() { - assert "actiondrainer" == Relationships.clusterFromGroupName("actiondrainer") - assert "actiondrainer" == Relationships.clusterFromGroupName("actiondrainer-v301") - assert "merchweb--loadtest" == Relationships.clusterFromGroupName("merchweb--loadtest") - assert "discovery--us-east-1d-v" == Relationships.clusterFromGroupName("discovery--us-east-1d-v") - assert "discovery--us-east-1d-v1" == Relationships.clusterFromGroupName("discovery--us-east-1d-v1") - assert "discovery--us-east-1d-v11" == Relationships.clusterFromGroupName("discovery--us-east-1d-v11") - assert "discovery--us-east-1d" == Relationships.clusterFromGroupName("discovery--us-east-1d-v111") - assert "discovery--us-east-1d-v1111" == Relationships.clusterFromGroupName("discovery--us-east-1d-v1111") - assert "merchweb-loadtest" == Relationships.clusterFromGroupName("merchweb-loadtest") - assert "api-test-A" == Relationships.clusterFromGroupName("api-test-A") - assert "evcache-us-east-1d-0" == Relationships.clusterFromGroupName("evcache-us-east-1d-0") - assert "evcache-us----east-1d-0" == Relationships.clusterFromGroupName("evcache-us----east-1d-0") - assert "videometadata-navigator-integration-240-CAN" == Relationships.clusterFromGroupName( - "videometadata-navigator-integration-240-CAN") - } - - void testAvoidsReservedFormat() { - assert !Relationships.usesReservedFormat("abha") - assert Relationships.usesReservedFormat("abha-v999") - assert !Relationships.usesReservedFormat("abha-v9999999") - assert Relationships.usesReservedFormat("integration-240-usa-iphone-v001") - assert !Relationships.usesReservedFormat("integration-240-usa-iphone-v22") - - assert Relationships.usesReservedFormat("integration-v001-usa-iphone") - assert Relationships.usesReservedFormat( - 'cass-nccpint-random-junk-c0northamerica-d0prod-h0gamesystems-p0vizio-r027-u0nccp-x0A-z0useast1a-v003') - assert Relationships.usesReservedFormat('c0northamerica') - assert Relationships.usesReservedFormat('junk-c0northamerica') - assert Relationships.usesReservedFormat('c0northamerica') - assert Relationships.usesReservedFormat('random-c0northamerica-junk') - assert !Relationships.usesReservedFormat('random-abc0northamerica-junk') - } - - void testCheckStrictName() { - assert Relationships.checkStrictName("abha") - assert !Relationships.checkStrictName("account_batch") - assert !Relationships.checkStrictName("account.batch") - assert !Relationships.checkStrictName("") - assert !Relationships.checkStrictName(null) - } - - void testCheckAppNameForLoadBalancer() { - assert Relationships.checkAppNameForLoadBalancer("abha") - assert !Relationships.checkAppNameForLoadBalancer("account_batch") - assert !Relationships.checkAppNameForLoadBalancer("account.batch") - assert !Relationships.checkAppNameForLoadBalancer("account#batch") - assert !Relationships.checkAppNameForLoadBalancer("") - assert !Relationships.checkAppNameForLoadBalancer(null) - assert !Relationships.checkAppNameForLoadBalancer("abhav309") - assert Relationships.checkAppNameForLoadBalancer("abhav309787") - assert !Relationships.checkAppNameForLoadBalancer("v309") - assert Relationships.checkAppNameForLoadBalancer("v3111111") - } - - void testCheckName() { - assert Relationships.checkName("abha") - assert Relationships.checkName("account_batch") - assert Relationships.checkName("account.batch") - assert !Relationships.checkName("account#batch") - assert !Relationships.checkName("") - assert !Relationships.checkName(null) - } - - void testDetail() { - assert Relationships.checkDetail("A") - assert Relationships.checkDetail("0") - assert Relationships.checkDetail("east-1c-0") - assert Relationships.checkDetail("230CAN-next-A") - assert Relationships.checkDetail("integration-240-USA") - assert Relationships.checkDetail("integration-240-usa-iphone-ipad-ios5-even-numbered-days-not-weekends") - assert Relationships.checkDetail("----") - assert Relationships.checkDetail("__._._--_..") - assert !Relationships.checkDetail("230CAN#next-A") - assert !Relationships.checkDetail("") - assert !Relationships.checkDetail(null) - } - - void testBuildAutoScalingGroupName() { - - assert "helloworld-asgardtest" == Relationships.buildGroupName([appName: "helloworld", stack: "asgardtest", - detail: null]) - assert "helloworld-asgardtest" == Relationships.buildGroupName([appName: "helloworld", stack: "asgardtest", - detail: ""]) - assert "helloworld-asgardtest-2" == Relationships.buildGroupName([appName: "helloworld", stack: "asgardtest", - detail: "2"]) - assert "helloworld" == Relationships.buildGroupName([appName: "helloworld", stack: "", detail: ""]) - assert "helloworld" == Relationships.buildGroupName([appName: "helloworld", stack: null, detail: null]) - assert "discovery-us-east-1d" == Relationships.buildGroupName([appName: "discovery", stack: "us", - detail: "east-1d"]) - assert "discovery--us-east-1d" == Relationships.buildGroupName([appName: "discovery", stack: "", - detail: "us-east-1d"]) - assert "discovery--us-east-1d" == Relationships.buildGroupName([appName: "discovery", stack: null, - detail: "us-east-1d"]) - assert "merchweb--loadtest" == Relationships.buildGroupName([appName: "merchweb", stack: "", - detail: "loadtest"]) - assert "merchweb--loadtest" == Relationships.buildGroupName([appName: "merchweb", stack: null, - detail: "loadtest"]) - - def exceptionThrown = false - try { - Relationships.buildGroupName([appName: "", stack: "asgardtest", detail: "2"]) - } catch (IllegalArgumentException ignored) { - exceptionThrown = true - } - assert exceptionThrown - - def npeThrown = false - try { - Relationships.buildGroupName([appName: null, stack: "asgardtest", detail: "2"]) - } catch (NullPointerException ignored) { - npeThrown = true - } - assert npeThrown - - assert "helloworld-asgardtest" == Relationships.buildGroupName([appName: "helloworld", stack: "asgardtest", - detail: null]) - - assert 'cass-nccpint-random-junk-c0northamerica-d0prod-h0gamesystems-p0vizio-r027-u0nccp-w0A-z0useast1a' == - Relationships.buildGroupName(appName: "cass", stack: "nccpint", - detail: "random-junk", countries: "northamerica", devPhase: "prod", - hardware: "gamesystems", partners: "vizio", revision: "27", usedBy: "nccp", redBlackSwap: "A", - zoneVar: "useast1a") - - assert 'cass--random-junk-h0gamesystems-w0A' == - Relationships.buildGroupName(appName: "cass", stack: "", - detail: "random-junk", countries: null, devPhase: "", - hardware: "gamesystems", partners: "", redBlackSwap: "A") - - assert 'cass-h0gamesystems-w0A' == - Relationships.buildGroupName(appName: "cass", stack: null, detail: null, devPhase: "", - hardware: "gamesystems", partners: "", redBlackSwap: "A") - } - - void testBuildLaunchConfigurationName() { - assert Relationships.buildLaunchConfigurationName("helloworld") ==~ ~/helloworld-[0-9]{14}/ - assert Relationships.buildLaunchConfigurationName("integration-240-usa-iphone") ==~ - ~/integration-240-usa-iphone-[0-9]{14}/ - } - - void testBuildLoadBalancerName() { - assert "helloworld-asgardtest" == Relationships.buildLoadBalancerName("helloworld", "asgardtest", null) - assert "helloworld-asgardtest" == Relationships.buildLoadBalancerName("helloworld", "asgardtest", "") - assert "helloworld-asgardtest-frontend" == Relationships.buildLoadBalancerName("helloworld", "asgardtest", - "frontend") - assert "helloworld" == Relationships.buildLoadBalancerName("helloworld", "", "") - assert "helloworld" == Relationships.buildLoadBalancerName("helloworld", null, null) - assert "discovery-us-east-1d" == Relationships.buildLoadBalancerName("discovery", "us", "east-1d") - assert "discovery--frontend" == Relationships.buildLoadBalancerName("discovery", "", "frontend") - assert "discovery--us-east-1d" == Relationships.buildLoadBalancerName("discovery", null, "us-east-1d") - assert "merchweb--frontend" == Relationships.buildLoadBalancerName("merchweb", "", "frontend") - assert "merchweb--frontend" == Relationships.buildLoadBalancerName("merchweb", null, "frontend") - - def exceptionThrown = false - try { - Relationships.buildLoadBalancerName("", "asgardtest", "frontend") - } catch (IllegalArgumentException ignored) { - exceptionThrown = true - } - assert exceptionThrown - - def npeThrown = false - try { - Relationships.buildLoadBalancerName(null, "asgardtest", "frontend") - } catch (NullPointerException ignored) { - npeThrown = true - } - assert npeThrown - } - - void testBaseAmiIdFromDescription() { - assertNull Relationships.baseAmiIdFromDescription('') - assertNull Relationships.baseAmiIdFromDescription(null) - assert 'ami-50886239' == Relationships.baseAmiIdFromDescription( - 'base_ami_id=ami-50886239,base_ami_name=servicenet-roku-qadd.dc.81210.10.44') - assert 'ami-1eb75c77' == Relationships.baseAmiIdFromDescription( - 'base_ami_id=ami-1eb75c77,base_ami_name=servicenet-roku-qadd.dc.81210.10.44') - assert 'ami-1eb75c77' == Relationships.baseAmiIdFromDescription( - 'base_ami_name=servicenet-roku-qadd.dc.81210.10.44,base_ami_id=ami-1eb75c77') - assert 'ami-7b4eb912' == Relationships.baseAmiIdFromDescription( - 'store=ebs,ancestor_name=ebs-centosbase-x86_64-20101124,ancestor_id=ami-7b4eb912') - } - - void testBaseAmiNameFromDescription() { - assert 'servicenet-roku-qadd.dc.81210.10.44' == Relationships.baseAmiNameFromDescription( - 'base_ami_id=ami-50886239,base_ami_name=servicenet-roku-qadd.dc.81210.10.44') - assert 'servicenet-roku-qadd.dc.81210.10.44' == Relationships.baseAmiNameFromDescription( - 'base_ami_id=ami-1eb75c77,base_ami_name=servicenet-roku-qadd.dc.81210.10.44') - assert 'servicenet-roku-qadd.dc.81210.10.44' == Relationships.baseAmiNameFromDescription( - 'base_ami_name=servicenet-roku-qadd.dc.81210.10.44,base_ami_id=ami-1eb75c77') - assert 'ebs-centosbase-x86_64-20101124' == Relationships.baseAmiNameFromDescription( - 'store=ebs,ancestor_name=ebs-centosbase-x86_64-20101124,ancestor_id=ami-7b4eb912') - } - - void testBaseAmiDateFromDescription() { - assertNull Relationships.baseAmiDateFromDescription( - 'base_ami_id=ami-50886239,base_ami_name=servicenet-roku-qadd.dc.81210.10.44') - assertNull Relationships.baseAmiDateFromDescription( - 'base_ami_id=ami-1eb75c77,base_ami_name=servicenet-roku-qadd.dc.81210.10.44') - assertNull Relationships.baseAmiDateFromDescription( - 'base_ami_name=servicenet-roku-qadd.dc.81210.10.44,base_ami_id=ami-1eb75c77') - assert new DateTime(2010, 11, 24, 0, 0, 0, 0) == Relationships.baseAmiDateFromDescription( - 'store=ebs,ancestor_name=ebs-centosbase-x86_64-20101124,ancestor_id=ami-7b4eb912') - } - - void testBuildAlarmNameForScalingPolicy() { - assert 'helloworld--scalingtest-v000-99999' == Relationships.buildAlarmName( - 'helloworld--scalingtest-v000', '99999') - } - - void testBuildPolicyName() { - assert 'helloworld--scalingtest-v000-99999' == Relationships.buildScalingPolicyName( - 'helloworld--scalingtest-v000', '99999') - } - - void testLabeledEnvironmentVariables() { - Names names = new Names('test') - names.partners = 'sony' - assert ['export NETFLIX_PARTNERS=sony'] == Relationships.labeledEnvironmentVariables(names, 'NETFLIX_') - names.devPhase = 'stage' - assert ['export NETFLIX_DEV_PHASE=stage', 'export NETFLIX_PARTNERS=sony'] == - Relationships.labeledEnvironmentVariables(names, 'NETFLIX_') - } - - void testParts() { - Names names = new Names('test') - names.partners = 'sony' - assert ['Partners': 'sony'] == Relationships.parts(names) - names.devPhase = 'stage' - assert ['Dev Phase': 'stage', 'Partners': 'sony'] == Relationships.parts(names) - } - -} diff --git a/test/unit/com/netflix/asgard/WorkflowExecutionBeanOptionsSpec.groovy b/test/unit/com/netflix/asgard/WorkflowExecutionBeanOptionsSpec.groovy index e3b65dfb..b8d4c636 100644 --- a/test/unit/com/netflix/asgard/WorkflowExecutionBeanOptionsSpec.groovy +++ b/test/unit/com/netflix/asgard/WorkflowExecutionBeanOptionsSpec.groovy @@ -98,7 +98,7 @@ class WorkflowExecutionBeanOptionsSpec extends Specification { new LogMessage(new Date(1372230631000), 'starting task').toString(), new LogMessage(new Date(1372230632000), 'doing task').toString(), new LogMessage(new Date(1372230633000), 'finished').toString() - ] + ], [] ) } } diff --git a/test/unit/com/netflix/asgard/cred/KeyManagementServiceAssumeRoleCredentialsProviderSpec.groovy b/test/unit/com/netflix/asgard/cred/KeyManagementServiceAssumeRoleCredentialsProviderSpec.groovy new file mode 100644 index 00000000..6ecc3520 --- /dev/null +++ b/test/unit/com/netflix/asgard/cred/KeyManagementServiceAssumeRoleCredentialsProviderSpec.groovy @@ -0,0 +1,139 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.cred + +import com.amazonaws.AmazonClientException +import com.amazonaws.auth.AWSCredentials +import com.amazonaws.auth.BasicSessionCredentials +import com.netflix.asgard.ConfigService +import com.netflix.asgard.RestClientService +import spock.lang.Specification + +/** + * Tests for KeyManagementServiceAssumeRoleCredentialsProvider. + */ +@SuppressWarnings(["GroovyAccessibility", "GroovyAssignabilityCheck"]) +class KeyManagementServiceAssumeRoleCredentialsProviderSpec extends Specification { + + ConfigService configService = Mock(ConfigService) + RestClientService restClientService = Mock(RestClientService) + LocalFileReader localFileReader = Mock(LocalFileReader) + Clock clock = Mock(Clock) + KeyManagementServiceCredentialsProvider initialProvider = Mock(KeyManagementServiceCredentialsProvider) + def provider = new KeyManagementServiceAssumeRoleCredentialsProvider(configService, restClientService, + initialProvider) + + void 'should need a new session if session credentials are missing'() { + provider.sessionCredentials = null + + expect: + provider.needsNewSession() + } + + void 'should need a new session if there are already creds but initial provider needs a new session'() { + provider.sessionCredentials = new BasicSessionCredentials('JABBATHEHUTT', 'H+HANSOLOwookiee', 'AQ*****Zn8lgU=') + + when: + boolean needsNewSession = provider.needsNewSession() + + then: + needsNewSession + 1 * initialProvider.needsNewSession() >> true + 0 * _ + } + + void 'should need a new session if the time is close to running out'() { + provider.sessionCredentials = new BasicSessionCredentials('JABBATHEHUTT', 'H+HANSOLOwookiee', 'AQ*****Zn8lgU=') + provider.sessionCredentialsExpiration = new Date(1390360888000) + + when: + boolean needsNewSession = provider.needsNewSession() + + then: + needsNewSession + 1 * initialProvider.needsNewSession() >> false + 1 * initialProvider.currentTimeMillis() >> 1390360887000 + } + + void 'should not need a new session if the expiration time is a long time in the future'() { + provider.sessionCredentials = new BasicSessionCredentials('JABBATHEHUTT', 'H+HANSOLOwookiee', 'AQ*****Zn8lgU=') + provider.sessionCredentialsExpiration = new Date(1400000000000) + + when: + boolean needsNewSession = provider.needsNewSession() + + then: + !needsNewSession + 1 * initialProvider.needsNewSession() >> false + 1 * initialProvider.currentTimeMillis() >> 1390360887000 + } + + void 'should return cached credentials if no new session is needed'() { + + AWSCredentials credentials = new BasicSessionCredentials('JABBA', 'HANSOLO', 'AQ****U=') + provider.sessionCredentials = credentials + provider.sessionCredentialsExpiration = new Date(1400000000000) + + when: + AWSCredentials result = provider.getCredentials() + + then: + result.is credentials + 1 * initialProvider.needsNewSession() >> false + 1 * initialProvider.currentTimeMillis() >> 1390360887000 + } + + void 'should fail to get new session credentials if not fully configured'() { + + when: + provider.getCredentials() + + then: + initialProvider.credentials >> creds + configService.assumeRoleArn >> arn + configService.assumeRoleSessionName >> session + thrown(AmazonClientException) + + where: + arn | session | creds + null | 'asgardtestsession' | new BasicSessionCredentials('JABBA', 'HANSOLO', 'AQ****U=') + '' | 'asgardtestsession' | new BasicSessionCredentials('JABBA', 'HANSOLO', 'AQ****U=') + 'rolearn1' | '' | new BasicSessionCredentials('JABBA', 'HANSOLO', 'AQ****U=') + 'rolearn1' | null | new BasicSessionCredentials('JABBA', 'HANSOLO', 'AQ****U=') + 'rolearn1' | 'asgardtestsession' | null + } + + void 'should fail to refresh credentials if not fully configured'() { + + when: + provider.refresh() + + then: + 1 * initialProvider.refresh() + initialProvider.credentials >> creds + configService.assumeRoleArn >> arn + configService.assumeRoleSessionName >> session + thrown(AmazonClientException) + + where: + arn | session | creds + null | 'asgardtestsession' | new BasicSessionCredentials('JABBA', 'HANSOLO', 'AQ****U=') + '' | 'asgardtestsession' | new BasicSessionCredentials('JABBA', 'HANSOLO', 'AQ****U=') + 'rolearn1' | '' | new BasicSessionCredentials('JABBA', 'HANSOLO', 'AQ****U=') + 'rolearn1' | null | new BasicSessionCredentials('JABBA', 'HANSOLO', 'AQ****U=') + 'rolearn1' | 'asgardtestsession' | null + } +} diff --git a/test/unit/com/netflix/asgard/cred/KeyManagementServiceCredentialsProviderSpec.groovy b/test/unit/com/netflix/asgard/cred/KeyManagementServiceCredentialsProviderSpec.groovy index 998cc011..9b1e8318 100644 --- a/test/unit/com/netflix/asgard/cred/KeyManagementServiceCredentialsProviderSpec.groovy +++ b/test/unit/com/netflix/asgard/cred/KeyManagementServiceCredentialsProviderSpec.groovy @@ -123,6 +123,7 @@ class KeyManagementServiceCredentialsProviderSpec extends Specification { true | 'https://kms/key' | 7103 | '/home/.ssl/keystore.jks' | '' } + @SuppressWarnings("GroovyAssignabilityCheck") private getCredentialsInteractions() { provider.sessionCredentialsExpiration == new Date(1390360309000) 1 * configService.isOnline() >> true diff --git a/test/unit/com/netflix/asgard/deployment/DeploymentWorkflowSpec.groovy b/test/unit/com/netflix/asgard/deployment/DeploymentWorkflowSpec.groovy index 61a50d8d..47b5d831 100644 --- a/test/unit/com/netflix/asgard/deployment/DeploymentWorkflowSpec.groovy +++ b/test/unit/com/netflix/asgard/deployment/DeploymentWorkflowSpec.groovy @@ -15,8 +15,16 @@ */ package com.netflix.asgard.deployment +import com.amazonaws.AmazonServiceException import com.netflix.asgard.Region import com.netflix.asgard.UserContext +import com.netflix.asgard.deployment.steps.CreateAsgStep +import com.netflix.asgard.deployment.steps.DeleteAsgStep +import com.netflix.asgard.deployment.steps.DisableAsgStep +import com.netflix.asgard.deployment.steps.JudgmentStep +import com.netflix.asgard.deployment.steps.ResizeStep +import com.netflix.asgard.deployment.steps.WaitStep +import com.netflix.asgard.model.AsgRoleInCluster import com.netflix.asgard.model.AutoScalingGroupBeanOptions import com.netflix.asgard.model.InstancePriceType import com.netflix.asgard.model.LaunchConfigurationBeanOptions @@ -46,27 +54,25 @@ class DeploymentWorkflowSpec extends Specification { instancePriceType: InstancePriceType.ON_DEMAND, launchConfigurationName: 'the_seaward-v003-20130626140848') AutoScalingGroupBeanOptions asgInputs = new AutoScalingGroupBeanOptions( - availabilityZones: ['us-west2a', 'us-west2b'], minSize: 2, desiredCapacity: 3, maxSize: 4, + availabilityZones: ['us-west2a', 'us-west2b'], minSize: 1, desiredCapacity: 3, maxSize: 4, subnetPurpose: 'internal') AutoScalingGroupBeanOptions asgTemplate = new AutoScalingGroupBeanOptions( - availabilityZones: ['us-west2a', 'us-west2b'], minSize: 2, desiredCapacity: 3, maxSize: 4, + availabilityZones: ['us-west2a', 'us-west2b'], minSize: 1, desiredCapacity: 3, maxSize: 4, subnetPurpose: 'internal', launchConfigurationName: 'the_seaward-v003-20130626140848', autoScalingGroupName: 'the_seaward-v003') List createAsgLog = [ - "Starting deployment for Cluster 'the_seaward'.", "Creating Launch Configuration 'the_seaward-v003-20130626140848'.", "Creating Auto Scaling Group 'the_seaward-v003' initially with 0 instances.", - 'Copying Scaling Policies and Scheduled Actions.', - "New ASG 'the_seaward-v003' was successfully created." + 'Copying Scaling Policies and Scheduled Actions.' ] - String canaryScaleUpLog = "Scaling new ASG to canary capacity. Waiting up to 30 minutes for 1 instance." + String canaryScaleUpLog = "Waiting up to 30 minutes while resizing to 1 instance." - String canaryJudgeLog = "ASG will now be evaluated for up to 60 minutes during the canary capacity judgment period." + String canaryJudgeLog = "ASG will now be evaluated for up to 60 minutes during the judgment period." - String fullCapacityScaleUpLog = "Scaling new ASG to full capacity. Waiting up to 40 minutes for 3 instances." + String fullCapacityScaleUpLog = "Waiting up to 40 minutes while resizing to 3 instances." private createAsgInteractions() { with(mockActivities) { @@ -76,28 +82,33 @@ class DeploymentWorkflowSpec extends Specification { 1 * createNextAsgForClusterWithoutInstances(userContext, asgTemplate) >> 'the_seaward-v003' 1 * copyScalingPolicies(userContext, asgDeploymentNames) >> 0 1 * copyScheduledActions(userContext, asgDeploymentNames) >> 0 - 1 * startAsgAnalysis('the_seaward', 'gob@bluth.com') >> new ScheduledAsgAnalysis( - "ASG analysis for 'the_seaward' cluster.", new DateTime()) } } def 'should execute full deployment'() { workflowOperations.addFiredTimerNames(['delay', 'waitAfterEurekaChange']) DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', delayDurationMinutes: 10, doCanary: true, - canaryCapacity: 1, canaryStartUpTimeoutMinutes: 30, canaryJudgmentPeriodMinutes: 60, - desiredCapacityStartUpTimeoutMinutes: 40, desiredCapacityJudgmentPeriodMinutes: 120, - fullTrafficJudgmentPeriodMinutes: 240, scaleUp: ProceedPreference.Yes, - disablePreviousAsg: ProceedPreference.Yes, deletePreviousAsg: ProceedPreference.Yes) + notificationDestination: 'gob@bluth.com', + steps: [ + new WaitStep(durationMinutes: 10, description: "delay"), + new CreateAsgStep(), + new ResizeStep(capacity: 1, startUpTimeoutMinutes: 30), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40), + new DisableAsgStep(targetAsg: AsgRoleInCluster.Previous), + new DeleteAsgStep(targetAsg: AsgRoleInCluster.Previous) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == ['Waiting 10 minutes before starting deployment.'] + - createAsgLog + canaryScaleUpLog + fullCapacityScaleUpLog + [ + workflowOperations.logHistory == ['{"step":0}', 'Waiting 10 minutes before next step.', '{"step":1}'] + + createAsgLog + '{"step":2}' + canaryScaleUpLog + '{"step":3}' + fullCapacityScaleUpLog + [ + '{"step":4}', "Disabling ASG 'the_seaward-v002'.", "Waiting 90 seconds for clients to stop using instances.", + '{"step":5}', "Deleting ASG 'the_seaward-v002'.", "Deployment was successful." ] @@ -107,11 +118,10 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 1) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 1) >> '' - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 2, 3, 4) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 3, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> '' - then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v002') then: 1 * mockActivities.deleteAsg(userContext, 'the_seaward-v002') then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', @@ -121,18 +131,22 @@ class DeploymentWorkflowSpec extends Specification { def 'should remind judge to decide at the end of judgment period'() { workflowOperations.addFiredTimerNames(['delay', 'judgmentTimeout']) DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', delayDurationMinutes: 10, doCanary: true, - canaryCapacity: 1, canaryStartUpTimeoutMinutes: 30, canaryJudgmentPeriodMinutes: 60, - desiredCapacityStartUpTimeoutMinutes: 40, desiredCapacityJudgmentPeriodMinutes: 120, - fullTrafficJudgmentPeriodMinutes: 240, scaleUp: ProceedPreference.Ask, - disablePreviousAsg: ProceedPreference.No) + notificationDestination: 'gob@bluth.com', + steps: [ + new WaitStep(durationMinutes: 10, description: "delay"), + new CreateAsgStep(), + new ResizeStep(capacity: 1, startUpTimeoutMinutes: 30), + new JudgmentStep(durationMinutes: 60), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == ['Waiting 10 minutes before starting deployment.'] + - createAsgLog + canaryScaleUpLog + canaryJudgeLog + + workflowOperations.logHistory == ['{"step":0}', 'Waiting 10 minutes before next step.', '{"step":1}'] + + createAsgLog + '{"step":2}' + canaryScaleUpLog + '{"step":3}' + canaryJudgeLog + "Deployment was rolled back. Judge decided ASG 'the_seaward-v003' was not viable." interaction { @@ -140,16 +154,17 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 1) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 1) >> '' + then: 1 * mockActivities.startAsgAnalysis('the_seaward', 'gob@bluth.com') >> new ScheduledAsgAnalysis( + "ASG analysis for 'the_seaward' cluster.", new DateTime()) then: 1 * mockActivities.askIfDeploymentShouldProceed('gob@bluth.com', 'the_seaward-v003', - "ASG will now be evaluated for up to 60 minutes during the canary capacity judgment period.") >> false - then: - 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', - "Canary capacity judgment period for ASG 'the_seaward-v003' has ended.", + "ASG will now be evaluated for up to 60 minutes during the judgment period.") >> false + then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', + "Judgment period for ASG 'the_seaward-v003' has ended.", "Please make a decision to proceed or roll back.") - then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') - then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') + then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') >> true + then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') >> true then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment failed for ASG 'the_seaward-v003'.", @@ -159,18 +174,24 @@ class DeploymentWorkflowSpec extends Specification { def 'should execute deployment without canary or delay'() { workflowOperations.addFiredTimerNames(['waitAfterEurekaChange']) DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: false, desiredCapacityStartUpTimeoutMinutes: 40, - desiredCapacityJudgmentPeriodMinutes: 120, fullTrafficJudgmentPeriodMinutes: 240, - scaleUp: ProceedPreference.Yes, disablePreviousAsg: ProceedPreference.Yes, - deletePreviousAsg: ProceedPreference.Yes) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40), + new DisableAsgStep(targetAsg: AsgRoleInCluster.Previous), + new DeleteAsgStep(targetAsg: AsgRoleInCluster.Previous) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + fullCapacityScaleUpLog + [ + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + fullCapacityScaleUpLog + [ + '{"step":2}', "Disabling ASG 'the_seaward-v002'.", "Waiting 90 seconds for clients to stop using instances.", + '{"step":3}', "Deleting ASG 'the_seaward-v002'.", "Deployment was successful." ] @@ -179,9 +200,8 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 2, 3, 4) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 3, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> '' - then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v002') then: 1 * mockActivities.deleteAsg(userContext, 'the_seaward-v002') then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', @@ -190,16 +210,18 @@ class DeploymentWorkflowSpec extends Specification { def 'should execute canary without scaling up'() { DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: true, - canaryCapacity: 1, canaryStartUpTimeoutMinutes: 30, canaryJudgmentPeriodMinutes: 60, - scaleUp: ProceedPreference.No) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 1, startUpTimeoutMinutes: 30) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + canaryScaleUpLog + [ - "ASG 'the_seaward-v002' was not disabled. The new ASG is not taking full traffic.", + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + canaryScaleUpLog + [ "Deployment was successful." ] interaction { @@ -207,37 +229,38 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 1) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 1) >> '' - then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment succeeded for ASG 'the_seaward-v003'.", "Deployment was successful.") } def 'should display error and rollback deployment if there is an error checking health'() { DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: true, - canaryCapacity: 1, canaryStartUpTimeoutMinutes: 30, canaryJudgmentPeriodMinutes: 60, - scaleUp: ProceedPreference.No) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 1, startUpTimeoutMinutes: 30) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + canaryScaleUpLog + + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + canaryScaleUpLog + "Deployment was rolled back due to error: java.lang.IllegalStateException: Something really went wrong!" interaction { createAsgInteractions() } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 1) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 1) >> { throw new IllegalStateException('Something really went wrong!') } - then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') - then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') - then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") + then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') >> true + then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') >> true then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment failed for ASG 'the_seaward-v003'.", "Deployment was rolled back due to error: java.lang.IllegalStateException: Something really went wrong!" @@ -246,16 +269,18 @@ class DeploymentWorkflowSpec extends Specification { def 'should retry health check if not ready yet.'() { DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: false, - scaleUp: ProceedPreference.Yes, disablePreviousAsg: ProceedPreference.No, - desiredCapacityStartUpTimeoutMinutes: 40) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + fullCapacityScaleUpLog + [ - "ASG 'the_seaward-v002' was not disabled. The new ASG is not taking full traffic.", + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + fullCapacityScaleUpLog + [ "Deployment was successful." ] @@ -264,26 +289,28 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 2, 3, 4) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 3, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> 'Not healthy Yet' then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> '' - then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment succeeded for ASG 'the_seaward-v003'.", "Deployment was successful.") } - def 'should rollback for canary start up time out'() { + def 'should rollback for start up time out'() { workflowOperations.addFiredTimerNames(['startupTimeout']) DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: true, - canaryCapacity: 1, canaryStartUpTimeoutMinutes: 30, canaryJudgmentPeriodMinutes: 60, - scaleUp: ProceedPreference.No) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 1, startUpTimeoutMinutes: 30) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + canaryScaleUpLog + + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + canaryScaleUpLog + "Deployment was rolled back. ASG 'the_seaward-v003' was not at capacity after 30 minutes." interaction { @@ -291,11 +318,10 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 1) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 4) then: mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 1) >> 'Not operational yet.' - then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') - then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') - then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") + then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') >> true + then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') >> true then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment failed for ASG 'the_seaward-v003'.", "Deployment was rolled back. ASG 'the_seaward-v003' was not at capacity after 30 minutes.") @@ -304,14 +330,18 @@ class DeploymentWorkflowSpec extends Specification { def 'should rollback for desired capacity start up time out'() { workflowOperations.addFiredTimerNames(['startupTimeout']) DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', desiredCapacityStartUpTimeoutMinutes: 40, - scaleUp: ProceedPreference.Yes) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + fullCapacityScaleUpLog + + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + fullCapacityScaleUpLog + "Deployment was rolled back. ASG 'the_seaward-v003' was not at capacity after 40 minutes." interaction { @@ -319,11 +349,10 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 2, 3, 4) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 3, 4) then: (1.._) * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> 'Not healthy Yet' - then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') - then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') - then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") + then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') >> true + then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') >> true then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment failed for ASG 'the_seaward-v003'.", "Deployment was rolled back. ASG 'the_seaward-v003' was not at capacity after 40 minutes.") @@ -331,15 +360,21 @@ class DeploymentWorkflowSpec extends Specification { def 'should rollback for canary decision to not proceed'() { DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: true, - canaryCapacity: 1, canaryStartUpTimeoutMinutes: 30, canaryJudgmentPeriodMinutes: 60, - scaleUp: ProceedPreference.Ask) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 1, startUpTimeoutMinutes: 30), + new JudgmentStep(durationMinutes: 60), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + canaryScaleUpLog + canaryJudgeLog + + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + canaryScaleUpLog + + '{"step":2}' + canaryJudgeLog + "Deployment was rolled back. Judge decided ASG 'the_seaward-v003' was not viable." interaction { @@ -347,12 +382,14 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 1) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 1) >> '' + then: 1 * mockActivities.startAsgAnalysis('the_seaward', 'gob@bluth.com') >> new ScheduledAsgAnalysis( + "ASG analysis for 'the_seaward' cluster.", new DateTime()) then: 1 * mockActivities.askIfDeploymentShouldProceed('gob@bluth.com', 'the_seaward-v003', - "ASG will now be evaluated for up to 60 minutes during the canary capacity judgment period.") >> false - then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') - then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') + "ASG will now be evaluated for up to 60 minutes during the judgment period.") >> false + then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') >> true + then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') >> true then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment failed for ASG 'the_seaward-v003'.", @@ -361,18 +398,21 @@ class DeploymentWorkflowSpec extends Specification { def 'should continue deployment for canary decision to proceed'() { DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: true, - canaryCapacity: 1, canaryStartUpTimeoutMinutes: 30, canaryJudgmentPeriodMinutes: 60, - desiredCapacityStartUpTimeoutMinutes: 40, - scaleUp: ProceedPreference.Ask, disablePreviousAsg: ProceedPreference.No) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 1, startUpTimeoutMinutes: 30), + new JudgmentStep(durationMinutes: 60), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + canaryScaleUpLog + canaryJudgeLog + - fullCapacityScaleUpLog + [ - "ASG 'the_seaward-v002' was not disabled. The new ASG is not taking full traffic.", + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + canaryScaleUpLog + + '{"step":2}' + canaryJudgeLog + '{"step":3}' + fullCapacityScaleUpLog + [ "Deployment was successful." ] @@ -381,30 +421,38 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 1) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 1) >> '' + then: 1 * mockActivities.startAsgAnalysis('the_seaward', 'gob@bluth.com') >> new ScheduledAsgAnalysis( + "ASG analysis for 'the_seaward' cluster.", new DateTime()) then: 1 * mockActivities.askIfDeploymentShouldProceed('gob@bluth.com', 'the_seaward-v003', - "ASG will now be evaluated for up to 60 minutes during the canary capacity judgment period.") >> true - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 2, 3, 4) - then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> '' + "ASG will now be evaluated for up to 60 minutes during the judgment period.") >> true then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 3, 4) + then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> '' then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment succeeded for ASG 'the_seaward-v003'.", "Deployment was successful.") } def 'should rollback deployment for full capacity decision to not proceed'() { DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: false, - scaleUp: ProceedPreference.Yes, disablePreviousAsg: ProceedPreference.Ask, - deletePreviousAsg: ProceedPreference.Yes, desiredCapacityStartUpTimeoutMinutes: 40, - desiredCapacityJudgmentPeriodMinutes: 120) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40), + new JudgmentStep(durationMinutes: 120), + new DisableAsgStep(targetAsg: AsgRoleInCluster.Previous), + new DeleteAsgStep(targetAsg: AsgRoleInCluster.Previous) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + fullCapacityScaleUpLog + [ - "ASG will now be evaluated for up to 120 minutes during the full capacity judgment period.", + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + fullCapacityScaleUpLog + [ + '{"step":2}', + "ASG will now be evaluated for up to 120 minutes during the judgment period.", "Deployment was rolled back. Judge decided ASG 'the_seaward-v003' was not viable." ] @@ -413,12 +461,14 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 2, 3, 4) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 3, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> '' + then: 1 * mockActivities.startAsgAnalysis('the_seaward', 'gob@bluth.com') >> new ScheduledAsgAnalysis( + "ASG analysis for 'the_seaward' cluster.", new DateTime()) then: 1 * mockActivities.askIfDeploymentShouldProceed('gob@bluth.com', 'the_seaward-v003', - "ASG will now be evaluated for up to 120 minutes during the full capacity judgment period.") >> false - then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') - then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') + "ASG will now be evaluated for up to 120 minutes during the judgment period.") >> false + then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') >> true + then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') >> true then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment failed for ASG 'the_seaward-v003'.", @@ -428,19 +478,27 @@ class DeploymentWorkflowSpec extends Specification { def 'should continue with full capacity decision to proceed'() { workflowOperations.addFiredTimerNames(['waitAfterEurekaChange']) DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: false, - scaleUp: ProceedPreference.Yes, disablePreviousAsg: ProceedPreference.Ask, - deletePreviousAsg: ProceedPreference.Yes, desiredCapacityStartUpTimeoutMinutes: 40, - desiredCapacityJudgmentPeriodMinutes: 120) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40), + new JudgmentStep(durationMinutes: 120), + new DisableAsgStep(targetAsg: AsgRoleInCluster.Previous), + new DeleteAsgStep(targetAsg: AsgRoleInCluster.Previous) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + fullCapacityScaleUpLog + [ - "ASG will now be evaluated for up to 120 minutes during the full capacity judgment period.", + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + fullCapacityScaleUpLog + [ + '{"step":2}', + "ASG will now be evaluated for up to 120 minutes during the judgment period.", + '{"step":3}', "Disabling ASG 'the_seaward-v002'.", "Waiting 90 seconds for clients to stop using instances.", + '{"step":4}', "Deleting ASG 'the_seaward-v002'.", "Deployment was successful." ] @@ -450,10 +508,12 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 2, 3, 4) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 3, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> '' + then: 1 * mockActivities.startAsgAnalysis('the_seaward', 'gob@bluth.com') >> new ScheduledAsgAnalysis( + "ASG analysis for 'the_seaward' cluster.", new DateTime()) then: 1 * mockActivities.askIfDeploymentShouldProceed('gob@bluth.com', 'the_seaward-v003', - "ASG will now be evaluated for up to 120 minutes during the full capacity judgment period.") >> true + "ASG will now be evaluated for up to 120 minutes during the judgment period.") >> true then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v002') then: 1 * mockActivities.deleteAsg(userContext, 'the_seaward-v002') @@ -464,15 +524,20 @@ class DeploymentWorkflowSpec extends Specification { def 'should not delete previous ASG if specified not to'() { workflowOperations.addFiredTimerNames(['waitAfterEurekaChange']) DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: false, - scaleUp: ProceedPreference.Yes, disablePreviousAsg: ProceedPreference.Yes, - deletePreviousAsg: ProceedPreference.No, desiredCapacityStartUpTimeoutMinutes: 40, ) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40), + new DisableAsgStep(targetAsg: AsgRoleInCluster.Previous) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + fullCapacityScaleUpLog + [ + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + fullCapacityScaleUpLog + [ + '{"step":2}', "Disabling ASG 'the_seaward-v002'.", "Waiting 90 seconds for clients to stop using instances.", "Deployment was successful." @@ -483,9 +548,8 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 2, 3, 4) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 3, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> '' - then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v002') then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment succeeded for ASG 'the_seaward-v003'.", "Deployment was successful.") @@ -494,19 +558,26 @@ class DeploymentWorkflowSpec extends Specification { def 'should rollback deployment for full traffic decision to not proceed'() { workflowOperations.addFiredTimerNames(['waitAfterEurekaChange']) DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', - notificationDestination: 'gob@bluth.com', doCanary: false, - scaleUp: ProceedPreference.Yes, disablePreviousAsg: ProceedPreference.Yes, - deletePreviousAsg: ProceedPreference.Ask, desiredCapacityStartUpTimeoutMinutes: 40, - fullTrafficJudgmentPeriodMinutes: 240) + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 3, startUpTimeoutMinutes: 40), + new DisableAsgStep(targetAsg: AsgRoleInCluster.Previous), + new JudgmentStep(durationMinutes: 240), + new DeleteAsgStep(targetAsg: AsgRoleInCluster.Previous) + ] + ) when: workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) then: - workflowOperations.logHistory == createAsgLog + fullCapacityScaleUpLog + [ + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + fullCapacityScaleUpLog + [ + '{"step":2}', "Disabling ASG 'the_seaward-v002'.", "Waiting 90 seconds for clients to stop using instances.", - "ASG will now be evaluated for up to 240 minutes during the full traffic judgment period.", + '{"step":3}', + "ASG will now be evaluated for up to 240 minutes during the judgment period.", "Deployment was rolled back. Judge decided ASG 'the_seaward-v003' was not viable." ] @@ -515,16 +586,53 @@ class DeploymentWorkflowSpec extends Specification { } 0 * _ - then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 2, 3, 4) + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 3, 4) then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 3) >> '' - then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v002') + then: 1 * mockActivities.startAsgAnalysis('the_seaward', 'gob@bluth.com') >> new ScheduledAsgAnalysis( + "ASG analysis for 'the_seaward' cluster.", new DateTime()) then: 1 * mockActivities.askIfDeploymentShouldProceed('gob@bluth.com', 'the_seaward-v003', - "ASG will now be evaluated for up to 240 minutes during the full traffic judgment period.") >> false - then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') - then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') + "ASG will now be evaluated for up to 240 minutes during the judgment period.") >> false + then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') >> true + then: 1 * mockActivities.disableAsg(userContext, 'the_seaward-v003') >> true + then: 1 * mockActivities.stopAsgAnalysis("ASG analysis for 'the_seaward' cluster.") then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', "Deployment failed for ASG 'the_seaward-v003'.", "Deployment was rolled back. Judge decided ASG 'the_seaward-v003' was not viable.") } + + def 'should not rollback if previous ASG has disappeared'() { + DeploymentWorkflowOptions deploymentOptions = new DeploymentWorkflowOptions(clusterName: 'the_seaward', + notificationDestination: 'gob@bluth.com', + steps: [ + new CreateAsgStep(), + new ResizeStep(capacity: 1, startUpTimeoutMinutes: 30) + ] + ) + + when: + workflowExecuter.deploy(userContext, deploymentOptions, lcInputs, asgInputs) + + then: + workflowOperations.logHistory == ['{"step":0}'] + createAsgLog + '{"step":1}' + canaryScaleUpLog + + "Previous ASG 'the_seaward-v002' could not be enabled." + + "Deployment was rolled back due to error: java.lang.IllegalStateException: Something really went wrong!" + interaction { + createAsgInteractions() + } + 0 * _ + + then: 1 * mockActivities.resizeAsg(userContext, 'the_seaward-v003', 1, 1, 4) + then: 1 * mockActivities.reasonAsgIsNotOperational(userContext, 'the_seaward-v003', 1) >> { + throw new IllegalStateException('Something really went wrong!') + } + then: 1 * mockActivities.enableAsg(userContext, 'the_seaward-v002') >> { + throw new AmazonServiceException('AutoScalingGroup name not found - no such group: the_seaward-v002') + } + then: 0 * mockActivities.disableAsg(userContext, 'the_seaward-v003') >> true + then: 1 * mockActivities.sendNotification(_, 'gob@bluth.com', 'the_seaward', + "Deployment failed for ASG 'the_seaward-v003'.", + "Deployment was rolled back due to error: java.lang.IllegalStateException: Something really went wrong!" + ) + } } diff --git a/test/unit/com/netflix/asgard/deployment/StartDeploymentRequestSpec.groovy b/test/unit/com/netflix/asgard/deployment/StartDeploymentRequestSpec.groovy index ac70b021..409fd73c 100644 --- a/test/unit/com/netflix/asgard/deployment/StartDeploymentRequestSpec.groovy +++ b/test/unit/com/netflix/asgard/deployment/StartDeploymentRequestSpec.groovy @@ -16,6 +16,13 @@ package com.netflix.asgard.deployment import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.asgard.deployment.steps.CreateAsgStep +import com.netflix.asgard.deployment.steps.DeleteAsgStep +import com.netflix.asgard.deployment.steps.DisableAsgStep +import com.netflix.asgard.deployment.steps.JudgmentStep +import com.netflix.asgard.deployment.steps.ResizeStep +import com.netflix.asgard.deployment.steps.WaitStep +import com.netflix.asgard.model.AsgRoleInCluster import com.netflix.asgard.model.AutoScalingGroupBeanOptions import com.netflix.asgard.model.AutoScalingProcessType import com.netflix.asgard.model.LaunchConfigurationBeanOptions @@ -28,17 +35,16 @@ class StartDeploymentRequestSpec extends Specification { new DeploymentWorkflowOptions( clusterName: "helloworld", notificationDestination:"jdoe@netflix.com", - delayDurationMinutes: 5, - doCanary: true, - canaryCapacity: 1, - canaryStartUpTimeoutMinutes: 30, - canaryJudgmentPeriodMinutes: 60, - scaleUp: "Yes", - desiredCapacityStartUpTimeoutMinutes: 40, - desiredCapacityJudgmentPeriodMinutes: 120, - disablePreviousAsg: "Ask", - fullTrafficJudgmentPeriodMinutes: 240, - deletePreviousAsg: "No"), + steps: [ + new WaitStep(durationMinutes: 5, description: "delay"), + new CreateAsgStep(), + new ResizeStep(capacity: 1, targetAsg: AsgRoleInCluster.Next, startUpTimeoutMinutes: 30), + new ResizeStep(capacity: 3, targetAsg: AsgRoleInCluster.Next, startUpTimeoutMinutes: 40), + new JudgmentStep(durationMinutes: 120), + new DisableAsgStep(targetAsg: AsgRoleInCluster.Previous), + new DeleteAsgStep(targetAsg: AsgRoleInCluster.Previous) + ] + ), new LaunchConfigurationBeanOptions( imageId: "ami-12345678", keyName: "nf-test-keypair-a", @@ -66,12 +72,16 @@ class StartDeploymentRequestSpec extends Specification { suspendedProcesses: [AutoScalingProcessType.AddToLoadBalancer]) ) - String json = '{"deploymentOptions":{"clusterName":"helloworld",' + - '"notificationDestination":"jdoe@netflix.com","delayDurationMinutes":5,"doCanary":true,' + - '"canaryCapacity":1,"canaryStartUpTimeoutMinutes":30,"canaryJudgmentPeriodMinutes":60,' + - '"scaleUp":"Yes","desiredCapacityStartUpTimeoutMinutes":40,' + - '"desiredCapacityJudgmentPeriodMinutes":120,"disablePreviousAsg":"Ask",' + - '"fullTrafficJudgmentPeriodMinutes":240,"deletePreviousAsg":"No"},' + + String json = '{"deploymentOptions":{"clusterName":"helloworld","notificationDestination":"jdoe@netflix.com",' + + '"steps":[' + + '{"type":"Wait","durationMinutes":5,"description":"delay"},' + + '{"type":"CreateAsg"},' + + '{"type":"Resize","targetAsg":"Next","capacity":1,"startUpTimeoutMinutes":30},' + + '{"type":"Resize","targetAsg":"Next","capacity":3,"startUpTimeoutMinutes":40},' + + '{"type":"Judgment","durationMinutes":120},' + + '{"type":"DisableAsg","targetAsg":"Previous"},' + + '{"type":"DeleteAsg","targetAsg":"Previous"}' + + ']},' + '"lcOptions":{"launchConfigurationName":null,"imageId":"ami-12345678",' + '"keyName":"nf-test-keypair-a","securityGroups":["sg-12345678"],' + @@ -105,7 +115,7 @@ class StartDeploymentRequestSpec extends Specification { startDeploymentRequest.asgOptions.maxSize = 1 expect: startDeploymentRequest.validationErrors == [ - "Resize ASG capacity '2' is greater than the ASG's maximum instance bound '1'." + "Resize ASG capacity '3' is greater than the ASG's maximum instance bound '1'." ] } } diff --git a/test/unit/com/netflix/asgard/model/DeploymentSpec.groovy b/test/unit/com/netflix/asgard/model/DeploymentSpec.groovy index 79d5a6c5..c4f8d2c8 100644 --- a/test/unit/com/netflix/asgard/model/DeploymentSpec.groovy +++ b/test/unit/com/netflix/asgard/model/DeploymentSpec.groovy @@ -50,4 +50,36 @@ class DeploymentSpec extends Specification { new Date(0) | new Date(1000) | '1s' new Date(0) | new Date(9999999) | '2h 46m 39s' } + + void 'should construct step JSON'() { + expect: + Deployment.constructStepJson(7) == '{"step":7}' + } + + void 'should parse step JSON'() { + expect: + Deployment.parseStepIndex('{"step":7}') == 7 + } + + void 'should organize log by steps'() { + Deployment deployment = new Deployment(null, null, null, null, null, null, null, null, null, [ + '{"step":0}', + 'on the first step', + 'still finishing up step one', + '{"step":1}', + 'now working on the next one', + + ]) + + expect: + deployment.logForSteps == [ + [ + 'on the first step', + 'still finishing up step one', + ], + [ + 'now working on the next one' + ] + ] + } } diff --git a/test/unit/com/netflix/asgard/DefaultAdvancedUserDataProviderSpec.groovy b/test/unit/com/netflix/asgard/userdata/DefaultAdvancedUserDataProviderSpec.groovy similarity index 93% rename from test/unit/com/netflix/asgard/DefaultAdvancedUserDataProviderSpec.groovy rename to test/unit/com/netflix/asgard/userdata/DefaultAdvancedUserDataProviderSpec.groovy index 934257f9..dee1f497 100644 --- a/test/unit/com/netflix/asgard/DefaultAdvancedUserDataProviderSpec.groovy +++ b/test/unit/com/netflix/asgard/userdata/DefaultAdvancedUserDataProviderSpec.groovy @@ -13,8 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.netflix.asgard +package com.netflix.asgard.userdata +import com.netflix.asgard.PluginService +import com.netflix.asgard.Region +import com.netflix.asgard.UserContext import com.netflix.asgard.model.AutoScalingGroupBeanOptions import com.netflix.asgard.model.LaunchConfigurationBeanOptions import com.netflix.asgard.model.LaunchContext diff --git a/test/unit/com/netflix/asgard/DefaultUserDataProviderSpec.groovy b/test/unit/com/netflix/asgard/userdata/DefaultUserDataProviderSpec.groovy similarity index 62% rename from test/unit/com/netflix/asgard/DefaultUserDataProviderSpec.groovy rename to test/unit/com/netflix/asgard/userdata/DefaultUserDataProviderSpec.groovy index 62255f96..49c3a90c 100644 --- a/test/unit/com/netflix/asgard/DefaultUserDataProviderSpec.groovy +++ b/test/unit/com/netflix/asgard/userdata/DefaultUserDataProviderSpec.groovy @@ -13,8 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.netflix.asgard +package com.netflix.asgard.userdata +import com.netflix.asgard.ApplicationService +import com.netflix.asgard.ConfigService +import com.netflix.asgard.Region +import com.netflix.asgard.UserContext import javax.xml.bind.DatatypeConverter import spock.lang.Specification @@ -38,41 +42,41 @@ class DefaultUserDataProviderSpec extends Specification { def 'should generate user data in the default format'() { + String expected = 'export ENVIRONMENT=\n' + + 'export MONITOR_BUCKET=helloworld\n' + + 'export APP=helloworld\n' + + 'export APP_GROUP=\n' + + 'export STACK=example\n' + + 'export CLUSTER=helloworld-example\n' + + 'export AUTO_SCALE_GROUP=helloworld-example-v345\n' + + 'export LAUNCH_CONFIG=helloworld-example-v345-1234567890\n' + + 'export EC2_REGION=sa-east-1\n' + when: String userDataEncoded = provider.buildUserDataForVariables(userContext, 'helloworld', 'helloworld-example-v345', 'helloworld-example-v345-1234567890') then: - decode(userDataEncoded) == '''\ - export ENVIRONMENT= - export MONITOR_BUCKET=helloworld - export APP=helloworld - export APP_GROUP= - export STACK=example - export CLUSTER=helloworld-example - export AUTO_SCALE_GROUP=helloworld-example-v345 - export LAUNCH_CONFIG=helloworld-example-v345-1234567890 - export EC2_REGION=sa-east-1 - '''.stripIndent() + decode(userDataEncoded) == expected } def 'should generate user data with blanks for null values'() { + String expected = 'export ENVIRONMENT=\n' + + 'export MONITOR_BUCKET=helloworld\n' + + 'export APP=helloworld\n' + + 'export APP_GROUP=\n' + + 'export STACK=\n' + + 'export CLUSTER=\n' + + 'export AUTO_SCALE_GROUP=\n' + + 'export LAUNCH_CONFIG=\n' + + 'export EC2_REGION=sa-east-1\n' + when: String userDataEncoded = provider.buildUserDataForVariables(userContext, 'helloworld', null, null) then: - decode(userDataEncoded) == '''\ - export ENVIRONMENT= - export MONITOR_BUCKET=helloworld - export APP=helloworld - export APP_GROUP= - export STACK= - export CLUSTER= - export AUTO_SCALE_GROUP= - export LAUNCH_CONFIG= - export EC2_REGION=sa-east-1 - '''.stripIndent() + decode(userDataEncoded) == expected } private String decode(String encoded) { diff --git a/test/unit/com/netflix/asgard/NetflixAdvancedUserDataProviderSpec.groovy b/test/unit/com/netflix/asgard/userdata/NetflixAdvancedUserDataProviderSpec.groovy similarity index 69% rename from test/unit/com/netflix/asgard/NetflixAdvancedUserDataProviderSpec.groovy rename to test/unit/com/netflix/asgard/userdata/NetflixAdvancedUserDataProviderSpec.groovy index d95736d1..62e6922f 100644 --- a/test/unit/com/netflix/asgard/NetflixAdvancedUserDataProviderSpec.groovy +++ b/test/unit/com/netflix/asgard/userdata/NetflixAdvancedUserDataProviderSpec.groovy @@ -13,10 +13,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.netflix.asgard +package com.netflix.asgard.userdata import com.amazonaws.services.ec2.model.Image import com.amazonaws.services.ec2.model.Tag +import com.netflix.asgard.AppRegistration +import com.netflix.asgard.ApplicationService +import com.netflix.asgard.ConfigService +import com.netflix.asgard.MonkeyPatcherService +import com.netflix.asgard.PluginService +import com.netflix.asgard.Region +import com.netflix.asgard.UserContext import com.netflix.asgard.model.AutoScalingGroupBeanOptions import com.netflix.asgard.model.LaunchConfigurationBeanOptions import com.netflix.asgard.model.LaunchContext @@ -63,15 +70,15 @@ class NetflixAdvancedUserDataProviderSpec extends Specification { } final static String helloStandardUserData = """\ - export NETFLIX_ENVIRONMENT=test - export NETFLIX_MONITOR_BUCKET=hello - export NETFLIX_APP=hello - export NETFLIX_APP_GROUP= - export NETFLIX_STACK=dev - export NETFLIX_CLUSTER=hello-dev - export NETFLIX_AUTO_SCALE_GROUP=hello-dev-v001 - export NETFLIX_LAUNCH_CONFIG=hello-dev-v001-1234567 - export EC2_REGION=us-west-2 + NETFLIX_ENVIRONMENT=test + NETFLIX_MONITOR_BUCKET=hello + NETFLIX_APP=hello + NETFLIX_APP_GROUP= + NETFLIX_STACK=dev + NETFLIX_CLUSTER=hello-dev + NETFLIX_AUTO_SCALE_GROUP=hello-dev-v001 + NETFLIX_LAUNCH_CONFIG=hello-dev-v001-1234567 + EC2_REGION=us-west-2 """.stripIndent() final static String helloCustomUserData = "No soup for you. region=us-west-2 app=hello asg=hello-dev-v001" @@ -121,15 +128,15 @@ class NetflixAdvancedUserDataProviderSpec extends Specification { then: userData == """\ - export NETFLIX_ENVIRONMENT=test - export NETFLIX_MONITOR_BUCKET=${monitorBucket ?: ''} - export NETFLIX_APP=hi - export NETFLIX_APP_GROUP=hi_group - export NETFLIX_STACK=dev - export NETFLIX_CLUSTER=hi-dev - export NETFLIX_AUTO_SCALE_GROUP=hi-dev-v001 - export NETFLIX_LAUNCH_CONFIG=hi-dev-v001-1234567 - export EC2_REGION=us-west-2 + NETFLIX_ENVIRONMENT=test + NETFLIX_MONITOR_BUCKET=${monitorBucket ?: ''} + NETFLIX_APP=hi + NETFLIX_APP_GROUP=hi_group + NETFLIX_STACK=dev + NETFLIX_CLUSTER=hi-dev + NETFLIX_AUTO_SCALE_GROUP=hi-dev-v001 + NETFLIX_LAUNCH_CONFIG=hi-dev-v001-1234567 + EC2_REGION=us-west-2 """.stripIndent() where: @@ -176,15 +183,15 @@ class NetflixAdvancedUserDataProviderSpec extends Specification { then: userData == """\ - export NETFLIX_ENVIRONMENT=test - export NETFLIX_MONITOR_BUCKET=${appEnvVar} - export NETFLIX_APP=${appEnvVar} - export NETFLIX_APP_GROUP= - export NETFLIX_STACK= - export NETFLIX_CLUSTER=${asg?.autoScalingGroupName ?: ''} - export NETFLIX_AUTO_SCALE_GROUP=${asg?.autoScalingGroupName ?: ''} - export NETFLIX_LAUNCH_CONFIG=robot-123456 - export EC2_REGION=us-west-2 + NETFLIX_ENVIRONMENT=test + NETFLIX_MONITOR_BUCKET=${appEnvVar} + NETFLIX_APP=${appEnvVar} + NETFLIX_APP_GROUP= + NETFLIX_STACK= + NETFLIX_CLUSTER=${asg?.autoScalingGroupName ?: ''} + NETFLIX_AUTO_SCALE_GROUP=${asg?.autoScalingGroupName ?: ''} + NETFLIX_LAUNCH_CONFIG=robot-123456 + EC2_REGION=us-west-2 """.stripIndent() where: @@ -208,6 +215,41 @@ class NetflixAdvancedUserDataProviderSpec extends Specification { 1 * userDataProvider.buildUserDataForVariables(userContext, '', '', '') } + void 'should use properties file format user data only if image and configuration indicate are set up for it'() { + + configService.usePropertyFileUserDataForWindowsImages >> propForWin + Image image = new Image(platform: platform, description: description) + + expect: + result == netflixAdvancedUserDataProvider.shouldUsePropertiesUserData(image) + + where: + result | propForWin | platform | description + false | false | null | null + false | false | '' | '' + false | true | '' | '' + false | false | '' | "blah blah blah, ancestor_version=nflx-base-1-12345-h24" + false | true | null | "blah blah blah, ancestor_version=nflx-base-1-12345-h24" + false | false | null | "blah blah blah, ancestor_version=nflx-base-1.0-12345-h24" + false | false | null | "blah blah blah, ancestor_version=nflx-base-1.3-12345-h24" + true | false | null | "blah blah blah, ancestor_version=nflx-base-2.0-12345-h24" + true | true | null | "blah blah blah, ancestor_version=nflx-base-2.0-12345-h24" + true | false | null | "blah blah blah, ancestor_version=nflx-base-3-12345-h24" + true | false | null | "blah blah blah, ancestor_version=nflx-base-10-12345-h24" + true | false | null | "blah blah blah, ancestor_version=nflx-base-10.0-12345-h24" + true | false | null | "blah blah blah, ancestor_version=nflx-base-10.1-12345-h24" + true | false | null | "blah blah blah, ancestor_version=nflx-base-11-12345-h24" + false | false | null | "blah blah blah" + false | false | 'windows' | "blah blah blah" + false | false | 'Windows' | "blah blah blah" + true | true | 'windows' | "blah blah blah" + true | true | 'windows' | "blah blah blah" + true | true | 'Windows' | "blah blah blah" + false | true | 'linux' | "blah blah blah" + true | true | 'windows' | "blah blah blah, ancestor_version=nflx-base-1-12345-h24" + true | true | 'windows' | "blah blah blah, ancestor_version=nflx-base-2-12345-h24" + } + private AutoScalingGroupBeanOptions asg(String name) { new AutoScalingGroupBeanOptions(autoScalingGroupName: name) } diff --git a/test/unit/com/netflix/asgard/userdata/PropertiesUserDataProviderSpec.groovy b/test/unit/com/netflix/asgard/userdata/PropertiesUserDataProviderSpec.groovy new file mode 100644 index 00000000..2ca54c5a --- /dev/null +++ b/test/unit/com/netflix/asgard/userdata/PropertiesUserDataProviderSpec.groovy @@ -0,0 +1,93 @@ +/* + * Copyright 2014 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.asgard.userdata + +import com.netflix.asgard.AppRegistration +import com.netflix.asgard.ApplicationService +import com.netflix.asgard.ConfigService +import com.netflix.asgard.UserContext +import javax.xml.bind.DatatypeConverter +import spock.lang.Specification + +/** + * Tests for PropertiesUserDataProvider. + */ +class PropertiesUserDataProviderSpec extends Specification { + + AppRegistration app = new AppRegistration(group: 'common') + String clusterName = 'helloworld-example-c0asia-d0sony' + String asgName = 'helloworld-example-c0asia-d0sony-v033' + String launchConfigName = 'helloworld-example-c0asia-d0sony-v033-123456789' + UserContext userContext = UserContext.auto() + ConfigService configService = Mock(ConfigService) + ApplicationService applicationService = Mock(ApplicationService) + PropertiesUserDataProvider provider = new PropertiesUserDataProvider(configService: configService, + applicationService: applicationService) + + void setup() { + configService.userDataVarPrefix >> 'CLOUD_' + configService.accountName >> 'prod' + applicationService.getMonitorBucket(userContext, 'helloworld', clusterName) >> 'helloworld' + applicationService.getRegisteredApplication(userContext, 'helloworld') >> app + } + + void 'should create a map of properties based on cloud objects'() { + + when: + Map props = provider.mapProperties(userContext, 'helloworld', asgName, launchConfigName) + + then: + props == [ + CLOUD_APP: 'helloworld', + CLOUD_APP_GROUP: 'common', + CLOUD_AUTO_SCALE_GROUP: 'helloworld-example-c0asia-d0sony-v033', + CLOUD_CLUSTER: 'helloworld-example-c0asia-d0sony', + CLOUD_COUNTRIES: 'asia', + CLOUD_DEV_PHASE: 'sony', + CLOUD_ENVIRONMENT: 'prod', + CLOUD_LAUNCH_CONFIG: 'helloworld-example-c0asia-d0sony-v033-123456789', + CLOUD_MONITOR_BUCKET: 'helloworld', + CLOUD_STACK: 'example', + EC2_REGION: 'us-east-1' + ] + } + + def 'should generate user data in the format of a properties file'() { + + String expected = 'CLOUD_ENVIRONMENT=prod\n' + + 'CLOUD_MONITOR_BUCKET=helloworld\n' + + 'CLOUD_APP=helloworld\n' + + 'CLOUD_APP_GROUP=common\n' + + 'CLOUD_STACK=example\n' + + 'CLOUD_CLUSTER=helloworld-example-c0asia-d0sony\n' + + 'CLOUD_AUTO_SCALE_GROUP=helloworld-example-c0asia-d0sony-v033\n' + + 'CLOUD_LAUNCH_CONFIG=helloworld-example-c0asia-d0sony-v033-123456789\n' + + 'EC2_REGION=us-east-1\n' + + 'CLOUD_COUNTRIES=asia\n' + + 'CLOUD_DEV_PHASE=sony\n' + + when: + String userDataEncoded = provider.buildUserDataForVariables(userContext, 'helloworld', asgName, + launchConfigName) + + then: + decode(userDataEncoded) == expected + } + + private String decode(String encoded) { + new String(DatatypeConverter.parseBase64Binary(encoded)) + } +} diff --git a/web-app/css/main.css b/web-app/css/main.css index 7c0f31dd..d58bd63a 100644 --- a/web-app/css/main.css +++ b/web-app/css/main.css @@ -350,9 +350,7 @@ button.resize div, a.resize { background-image: url(../images/ .buttons button.elastic div { background-image: url(../images/tango/24/tools/select-lasso.png); } .buttons button.save div { background-image: url(../images/tango/24/actions/document-save.png); } .buttons button.deploy div { background-image: url(../images/tango/24/tools/deploy.png); } -.buttons a.deploy { background-image: url(../images/tango/24/tools/deploy.png); } -.buttons button.proceed div { background-image: url(../images/tango/24/actions/media-playback-start.png); } -.buttons button.rollback div { background-image: url(../images/tango/24/actions/media-seek-backward.png); } +.buttons a.deploy { background-image: url(../images/tango/24/tools/deploy.png); } .buttons button.schedule div { background-image: url(../images/tango/24/actions/appointment-new.png); } .buttons button.outOfService div { background-image: url(../images/tango/24/status/weather-clear-night.png); } .buttons button.inService div { background-image: url(../images/tango/24/status/weather-clear.png); } @@ -461,36 +459,58 @@ table.securityGroups { width: auto; } td.checkbox { text-align: center; } .deployment { margin-bottom: 800px; } +.deployment input[type="radio"] { margin: 3px !important; } .deployment div label { font-weight: normal } .deployment .well { margin: 5px; } .deployment .row { margin: 3px; } .deployment div h3 { margin-top: 0 } -.deployment .deploymentOptions div.step { margin: 5px; width: 600px; } -.deployment .deploymentOptions div.step h4 { padding: 0 35px; margin: 0; display: inline-block; background: no-repeat; height: 25px; } -.deployment .deploymentOptions div.step div.removeStep { padding: 8px; margin: 2px; float: right; display: inline-block; background: no-repeat url(../images/tango/16/places/user-trash.png); cursor: pointer;} - -.deployment .deploymentOptions div.step h4.createAsg { background-image: url(../images/tango/24/status/network-idle.png) } -.deployment .deploymentOptions div.step h4.judgment { background-image: url(../images/tango/24/actions/appointment-new.png); } -.deployment .deploymentOptions div.step h4.scaling { background-image: url(../images/tango/24/tools/resize.png); } -.deployment .deploymentOptions div.step h4.disablePreviousAsg { background-image: url(../images/tango/24/status/traffic-disable.png); } -.deployment .deploymentOptions div.step h4.cleanUp { background-image: url(../images/tango/24/actions/edit-clear.png); } - -.deployment .deploymentOptions div.stepDivider { padding: 0; margin: 0; width: 600px; text-align: center; } - -div.step.ng-hide-remove { - -webkit-animation: flipInX 1s; - -moz-animation: flipInX 1s; - -ms-animation: flipInX 1s; - animation: flipInX 1s; -} - -div.step.ng-hide-add { - display: block !important; - -webkit-animation: flipOutX 1s; - -moz-animation: flipOutX 1s; - -ms-animation: flipOutX 1s; - animation: flipOutX 1s; +.deploymentOptions div.step { margin: 5px 0; width: 600px; } +.deploymentOptions div.step input, .deploymentOptions div.step select { margin: 3px !important; } +.deploymentOptions div.step h4 { padding: 0 35px; margin: 0; display: inline-block; background: no-repeat; height: 25px; } +.deploymentOptions div.step .panel-heading { padding: 5px; } +.deploymentOptions div.step .panel-body { padding: 5px; line-height: 22px; } +.deploymentOptions div.step div.removeStep { padding: 8px; margin: 2px; float: right; display: inline-block; background: no-repeat url(../images/tango/16/places/user-trash.png); cursor: pointer; } +.deploymentOptions div.step div.waiting { padding: 8px; margin: 2px; float: right; display: inline-block; background: no-repeat url(../images/spinner.gif); } + +.deploymentOptions div.step h4.createAsg { background-image: url(../images/tango/24/status/network-idle.png) } +.deploymentOptions div.step h4.wait { background-image: url(../images/tango/24/actions/appointment-new.png); } +.deploymentOptions div.step h4.judgment { background-image: url(../images/tango/24/actions/appointment-new.png); } +.deploymentOptions div.step h4.scaling { background-image: url(../images/tango/24/tools/resize.png); } +.deploymentOptions div.step h4.disableAsg { background-image: url(../images/tango/24/status/traffic-disable.png); } +.deploymentOptions div.step h4.enableAsg { background-image: url(../images/tango/24/status/traffic-enable.png); } +.deploymentOptions div.step h4.cleanUp { background-image: url(../images/tango/24/actions/edit-clear.png); } +.deploymentOptions div.step h4.addStep { background-image: url(../images/tango/24/places/start-here.png); } + +.deployment .container .row { margin: 10px 0; } +.deployment .logMessage { margin: 0; padding: 0; line-height: 12px; } +.deployment .selectedValues div { margin: 0 4px 4px 0; padding: 3px; border: 1px solid #999; background-color: #fff; display: inline-block; } + +.steps button.btn { padding: 0 10px; margin: 0 2px; height: 22px; } +.steps button.insert { padding: 0 200px; } +.steps .buttonPanel { text-align: center; width: 600px; background: no-repeat; } +.steps .buttonPanel button { padding: 0 30px; background: no-repeat; background-position: 4px 2px; } +.steps .buttonPanel button.proceed { background-image: url(../images/tango/16/actions/media-seek-backward.png); } +.steps .buttonPanel button.rollback { background-image: url(../images/tango/16/actions/media-playback-start.png); } + +.animate-repeat.ng-move, +.animate-repeat.ng-enter, +.animate-repeat.ng-leave { + transition: all linear 0.5s; +} + +.animate-repeat.ng-leave.ng-leave-active, +.animate-repeat.ng-move, +.animate-repeat.ng-enter { + opacity:0; + max-height:0; +} + +.animate-repeat.ng-leave, +.animate-repeat.ng-move.ng-move-active, +.animate-repeat.ng-enter.ng-enter-active { + opacity:1; + max-height:100px; } /* GROUP REPLACING PUSH */