Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix travis failures in staticcheck #1601

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 10 additions & 5 deletions test/integration_test/action_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ import (
"github.com/stretchr/testify/require"
)

var horntail = false

func TestAction(t *testing.T) {

setupOnce(t)
Expand All @@ -23,7 +25,7 @@ func TestAction(t *testing.T) {
t.Run("testFailoverWithoutMigration", testFailoverWithoutMigration)
t.Run("testFailoverForMultipleNamespaces", testFailoverForMultipleNamespaces)
t.Run("testFailoverWithMultipleApplications", testFailoverWithMultipleApplications)
// t.Run("testFailoverForFailedPromoteVolume", testFailoverForFailedPromoteVolume)
t.Run("testFailoverForFailedPromoteVolume", testFailoverForFailedPromoteVolume)
}

func setupOnce(t *testing.T) {
Expand Down Expand Up @@ -52,7 +54,7 @@ func testFailoverBasic(t *testing.T) {
ctxs := scheduleAppAndWait(t, instanceIDs, appKey)

startAppsOnMigration := false
preMigrationCtxs, ctxs, _ := triggerMigrationMultiple(
preMigrationCtxs, _, _ := triggerMigrationMultiple(
t, ctxs, migrationName, namespaces, true, false, startAppsOnMigration)

validateMigrationOnSrc(t, migrationName, namespaces)
Expand Down Expand Up @@ -95,7 +97,7 @@ func testFailoverForMultipleNamespaces(t *testing.T) {
ctxs := scheduleAppAndWait(t, instanceIDs, appKey)

startAppsOnMigration := false
preMigrationCtxs, ctxs, _ := triggerMigrationMultiple(
preMigrationCtxs, _, _ := triggerMigrationMultiple(
t, ctxs, migrationName, namespaces, true, false, startAppsOnMigration)

validateMigrationOnSrc(t, migrationName, namespaces)
Expand All @@ -119,7 +121,7 @@ func testFailoverWithMultipleApplications(t *testing.T) {
addTasksAndWait(t, ctxs[0], additionalAppKeys)

startAppsOnMigration := false
preMigrationCtxs, ctxs, _ := triggerMigrationMultiple(
preMigrationCtxs, _, _ := triggerMigrationMultiple(
t, ctxs, migrationName, namespaces, true, false, startAppsOnMigration)

validateMigrationOnSrc(t, migrationName, namespaces)
Expand Down Expand Up @@ -153,6 +155,9 @@ func deactivateClusterDomainAndTriggerFailover(
// It depends on a change that will prevent promote call from going
// through if PX is down on the nearsync node for a volume
func testFailoverForFailedPromoteVolume(t *testing.T) {
if !horntail {
return
}
appKey := "mysql-nearsync"
instanceIDs := []string{"failover-failed-promote-volume"}
storageClass := "px-sc"
Expand Down Expand Up @@ -190,7 +195,7 @@ func testFailoverForFailedPromoteVolume(t *testing.T) {
funcRestartNode := func() {
mapNodeIDToNode := node.GetNodesByVoDriverNodeID()
logrus.Infof("mapNodeIDToNode: %v", mapNodeIDToNode)
nodeObj, _ := mapNodeIDToNode[nearSyncTargetMid]
nodeObj := mapNodeIDToNode[nearSyncTargetMid]
logrus.Infof("node: %v", nodeObj)

_, err = nodeDriver.RunCommand(
Expand Down
2 changes: 1 addition & 1 deletion test/integration_test/common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1735,7 +1735,7 @@ func addRunToMilestone(testrailID int, testResult *string) (int, error) {
if testrailID == 0 {
return 0, fmt.Errorf("invalid testcase ID: %v", testrailID)
}
runID, err = strconv.Atoi(testrailutils.JobRunID)
_, err = strconv.Atoi(testrailutils.JobRunID)
if err != nil {
return 0, fmt.Errorf("invalid testrail run ID: %v", testrailutils.JobRunID)
}
Expand Down
6 changes: 0 additions & 6 deletions test/integration_test/kubevirt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,6 @@ import (
kubevirtv1 "kubevirt.io/api/core/v1"
)

var templatePVCSpecs = map[string]string{
"fedora": "kubevirt-templates",
}

const (
importerPodPrefix = "importer"
importerPodStartTimeout = 2 * time.Minute
Expand All @@ -33,8 +29,6 @@ const (
kubevirtDatadiskNamespace = "openshift-virtualization-datadisk-templates"
kubevirtCDIStorageConditionAnnotation = "cdi.kubevirt.io/storage.condition.running.reason"
kubevirtCDIStoragePodPhaseAnnotation = "cdi.kubevirt.io/storage.pod.phase"

volumeBindingImmediate = "kubevirt-templates"
)

func TestKubevirt(t *testing.T) {
Expand Down
6 changes: 6 additions & 0 deletions test/integration_test/migration_features_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ func migrationStashStrategy(t *testing.T, appName string, appPath string) {
logrus.Infof("Starting migration %s/%s with startApplication false", appData.Ns, migNamePref+appName)
startApplications := false
mig, err := asyncdr.CreateMigration(migNamePref+appName, appData.Ns, clusterPairName, appData.Ns, &includeVolumesFlag, &includeResourcesFlag, &startApplications)
require.NoError(t, err, "Failed to create migration")
err = asyncdr.WaitForMigration([]*storkapi.Migration{mig})
require.NoError(t, err, "Error waiting for migration")
logrus.Infof("Migration %s/%s completed successfully ", appData.Ns, migNamePref+appName)
Expand Down Expand Up @@ -305,6 +306,7 @@ func testMigrationStashStrategyWithStartApplication(t *testing.T) {
logrus.Infof("Starting migration %s/%s with startApplication true", appData.Ns, migNamePref+appName)
startApplications := true
mig, err := asyncdr.CreateMigration(migNamePref+appName, appData.Ns, clusterPairName, appData.Ns, &includeVolumesFlag, &includeResourcesFlag, &startApplications)
require.NoError(t, err, "Failed to create migration")
err = asyncdr.WaitForMigration([]*storkapi.Migration{mig})
require.NoError(t, err, "Error waiting for migration")
logrus.Infof("Migration %s/%s completed successfully ", appData.Ns, migNamePref+appName)
Expand Down Expand Up @@ -404,6 +406,7 @@ func testMultipleTimesMigrationsWithStashStrategy(t *testing.T) {
firstMigrationName := fmt.Sprintf("%s%s-%d", migNamePref, appName, 1)
logrus.Infof("Starting migration %s/%s with startApplication false, iteration number: 1", appData.Ns, firstMigrationName)
mig1, err := asyncdr.CreateMigration(firstMigrationName, appData.Ns, clusterPairName, appData.Ns, &includeVolumesFlag, &includeResourcesFlag, &startApplications)
require.NoError(t, err, "Failed to create migration")
err = asyncdr.WaitForMigration([]*storkapi.Migration{mig1})
require.NoError(t, err, "Error waiting for migration")
logrus.Infof("Migration %s/%s completed successfully ", appData.Ns, mig1.Name)
Expand All @@ -416,6 +419,7 @@ func testMultipleTimesMigrationsWithStashStrategy(t *testing.T) {
secondMigrationName := fmt.Sprintf("%s%s-%d", migNamePref, appName, 2)
logrus.Infof("Starting migration %s/%s with startApplication false, iteration number: 2", appData.Ns, secondMigrationName)
mig2, err := asyncdr.CreateMigration(secondMigrationName, appData.Ns, clusterPairName, appData.Ns, &includeVolumesFlag, &includeResourcesFlag, &startApplications)
require.NoError(t, err, "Failed to create migration")
err = asyncdr.WaitForMigration([]*storkapi.Migration{mig2})
require.NoError(t, err, "Error waiting for migration")
logrus.Infof("Migration %s/%s completed successfully ", appData.Ns, mig2.Name)
Expand Down Expand Up @@ -544,6 +548,7 @@ func testFailbackWithStashStrategy(t *testing.T) {
logrus.Infof("Starting migration %s/%s with startApplication false", appData.Ns, migNamePref+appName)
startApplications := false
mig, err := asyncdr.CreateMigration(migNamePref+appName, migrationNamespace, clusterPairName, appData.Ns, &includeVolumesFlag, &includeResourcesFlag, &startApplications)
require.NoError(t, err, "Failed to create migration")
err = asyncdr.WaitForMigration([]*storkapi.Migration{mig})
require.NoError(t, err, "Error waiting for migration")
logrus.Infof("Migration %s/%s completed successfully ", appData.Ns, migNamePref+appName)
Expand Down Expand Up @@ -600,6 +605,7 @@ func testFailbackWithStashStrategy(t *testing.T) {
revMigrationName := fmt.Sprintf("%s%s-%s", migNamePref, appName, "reverse")
logrus.Infof("Starting reverse migration %s/%s with startApplication false", appData.Ns, revMigrationName)
revmig, err := asyncdr.CreateMigration(revMigrationName, migrationNamespace, clusterPairName, appData.Ns, &includeVolumesFlag, &includeResourcesFlag, &startApplications)
require.NoError(t, err, "Failed to create migration")
err = asyncdr.WaitForMigration([]*storkapi.Migration{revmig})
require.NoError(t, err, "Error waiting for migration")
logrus.Infof("Migration %s/%s completed successfully ", appData.Ns, revMigrationName)
Expand Down
35 changes: 17 additions & 18 deletions test/integration_test/migration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"

"github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1"
storkv1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1"
)

Expand Down Expand Up @@ -856,7 +855,7 @@ func validateMigration(t *testing.T, name, namespace string) {
require.NoError(t, err, "error getting migration schedule")
require.Len(t, migrationsMap, 1, "expected only one schedule type in migration map")

migrationStatus := migrationsMap[v1alpha1.SchedulePolicyTypeInterval][0]
migrationStatus := migrationsMap[storkv1.SchedulePolicyTypeInterval][0]
// Independently validate the migration
err = storkops.Instance().ValidateMigration(
migrationStatus.Name, namespace, defaultWaitTimeout, defaultWaitInterval)
Expand All @@ -868,7 +867,7 @@ func migrationDailyScheduleTest(t *testing.T) {
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

migrationScheduleTest(t, v1alpha1.SchedulePolicyTypeDaily, "mysql-migration-schedule-daily", "", -1)
migrationScheduleTest(t, storkv1.SchedulePolicyTypeDaily, "mysql-migration-schedule-daily", "", -1)

// If we are here then the test has passed
testResult = testResultPass
Expand All @@ -880,7 +879,7 @@ func migrationWeeklyScheduleTest(t *testing.T) {
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

migrationScheduleTest(t, v1alpha1.SchedulePolicyTypeWeekly, "mysql-migration-schedule-weekly", "Monday", -1)
migrationScheduleTest(t, storkv1.SchedulePolicyTypeWeekly, "mysql-migration-schedule-weekly", "Monday", -1)

// If we are here then the test has passed
testResult = testResultPass
Expand All @@ -892,7 +891,7 @@ func migrationMonthlyScheduleTest(t *testing.T) {
runID := testrailSetupForTest(testrailID, &testResult)
defer updateTestRail(&testResult, testrailID, runID)

migrationScheduleTest(t, v1alpha1.SchedulePolicyTypeMonthly, "mysql-migration-schedule-monthly", "", 11)
migrationScheduleTest(t, storkv1.SchedulePolicyTypeMonthly, "mysql-migration-schedule-monthly", "", 11)

// If we are here then the test has passed
testResult = testResultPass
Expand Down Expand Up @@ -967,7 +966,7 @@ func migrationScheduleInvalidTest(t *testing.T) {
// trigger time of 12:05PM. Ensure the SchedulePolicy specs use that time.
func migrationScheduleTest(
t *testing.T,
scheduleType v1alpha1.SchedulePolicyType,
scheduleType storkv1.SchedulePolicyType,
migrationScheduleName string,
scheduleDay string,
scheduleDate int) {
Expand Down Expand Up @@ -1083,11 +1082,11 @@ func migrationScheduleTest(

// **** TEST 4 bump time by (1 day / 1 week / 1 month) + 5 minutes. Should cause one new migration
switch scheduleType {
case v1alpha1.SchedulePolicyTypeDaily:
case storkv1.SchedulePolicyTypeDaily:
mockNow = nextTrigger.AddDate(0, 0, 1)
case v1alpha1.SchedulePolicyTypeWeekly:
case storkv1.SchedulePolicyTypeWeekly:
mockNow = nextTrigger.AddDate(0, 0, 7)
case v1alpha1.SchedulePolicyTypeMonthly:
case storkv1.SchedulePolicyTypeMonthly:
mockNow = nextTrigger.AddDate(0, 1, 0)
default:
t.Fatalf("this testcase only supports daily, weekly and monthly intervals")
Expand Down Expand Up @@ -1171,7 +1170,7 @@ func migrationScaleTest(t *testing.T) {
func triggerMigrationScaleTest(t *testing.T, migrationKey, migrationAppKey string, includeResourcesFlag, includeVolumesFlag, startApplicationsFlag bool) {
var appCtxs []*scheduler.Context
var ctxs []*scheduler.Context
var allMigrations []*v1alpha1.Migration
var allMigrations []*storkv1.Migration
var err error
instanceID := migrationKey
appKey := migrationAppKey
Expand Down Expand Up @@ -1525,14 +1524,14 @@ func createMigration(
includeResources *bool,
includeVolumes *bool,
startApplications *bool,
) (*v1alpha1.Migration, error) {
) (*storkv1.Migration, error) {

migration := &v1alpha1.Migration{
migration := &storkv1.Migration{
ObjectMeta: meta_v1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1alpha1.MigrationSpec{
Spec: storkv1.MigrationSpec{
ClusterPair: clusterPair,
IncludeResources: includeResources,
IncludeVolumes: includeVolumes,
Expand All @@ -1551,7 +1550,7 @@ func createMigration(
return mig, err
}

func deleteMigrations(migrations []*v1alpha1.Migration) error {
func deleteMigrations(migrations []*storkv1.Migration) error {
for _, mig := range migrations {
err := storkops.Instance().DeleteMigration(mig.Name, mig.Namespace)
if err != nil {
Expand All @@ -1561,15 +1560,15 @@ func deleteMigrations(migrations []*v1alpha1.Migration) error {
return nil
}

func WaitForMigration(migrationList []*v1alpha1.Migration) error {
func WaitForMigration(migrationList []*storkv1.Migration) error {
checkMigrations := func() (interface{}, bool, error) {
isComplete := true
for _, m := range migrationList {
mig, err := storkops.Instance().GetMigration(m.Name, m.Namespace)
if err != nil {
return "", false, err
}
if mig.Status.Status != v1alpha1.MigrationStatusSuccessful {
if mig.Status.Status != storkv1.MigrationStatusSuccessful {
logrus.Infof("Migration %s in namespace %s is pending", m.Name, m.Namespace)
isComplete = false
}
Expand Down Expand Up @@ -2391,7 +2390,7 @@ func serviceAndServiceAccountUpdate(t *testing.T) {
}
serviceAccountSrc.Secrets = append(serviceAccountSrc.Secrets, objRef)

serviceAccountSrc, err = core.Instance().UpdateServiceAccount(serviceAccountSrc)
_, err = core.Instance().UpdateServiceAccount(serviceAccountSrc)
require.NoError(t, err, "Error updating service account on source")

// After update do a get on service account to get values of secret references
Expand All @@ -2414,7 +2413,7 @@ func serviceAndServiceAccountUpdate(t *testing.T) {
updatedPorts = append(updatedPorts, mysqlService.Spec.Ports[i].Port)
}

mysqlService, err = core.Instance().UpdateService(mysqlService)
_, err = core.Instance().UpdateService(mysqlService)
require.NoError(t, err, "Error updating mysql service on source")

logrus.Infof("Waiting for next migration to trigger...")
Expand Down
4 changes: 2 additions & 2 deletions test/integration_test/operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ func validateAndDestroyCrMigration(t *testing.T, appName string, appPath string)
require.NoError(t, err, "Error setting source kubeconfig")

mig, err := asyncdr.CreateMigration(migNamePref+appName, appData.Ns, clusterPairName, appData.Ns, &includeVolumesFlag, &includeResourcesFlag, &startApplicationsFlag)
require.NoError(t, err, "Failed to create migration")
err = asyncdr.WaitForMigration([]*storkv1.Migration{mig})
require.NoError(t, err, "Error waiting for migration")

Expand Down Expand Up @@ -132,8 +133,7 @@ func getClusterConfigPath(cmName string) (string, error) {
}
config := cm.Data["kubeconfig"]
if len(config) == 0 {
configErr := fmt.Sprintf("Error reading kubeconfig")
return "", fmt.Errorf(configErr)
return "", fmt.Errorf("Error reading kubeconfig")
}
filePath := fmt.Sprintf("%s/%s", kubeconfigDirectory, cmName)
log.Infof("Save kubeconfig to %s", filePath)
Expand Down