diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..0855a0d6a --- /dev/null +++ b/.gitignore @@ -0,0 +1,15 @@ +*~ +ciao-cli/ciao-cli +ciao-controller/ciao-controller +ciao-launcher/ciao-launcher +ciao-launcher/tests/ciao-launcher-server/ciao-launcher-server +ciao-launcher/tests/ciaolc/ciaolc +ciao-scheduler/ciao-scheduler +networking/cnci_agent/cnci_agent +networking/cnci_agent/test_cnci_server/test_cnci_server +networking/docker/plugin/plugin +networking/libsnnet/tests/cncicli/cncicli +networking/libsnnet/tests/cncli/cncli +networking/libsnnet/tests/snnetcli/snnetcli +ssntp/tools/tools +test-cases/test-cases diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..c0b65077c --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,44 @@ +# Contributing to Ciao + +Ciao is an open source project licensed under the [Apache v2 License] (https://opensource.org/licenses/Apache-2.0) + +## Coding Style + +Ciao uses the golang coding style, go fmt is your friend. + +## Certificate of Origin + +In order to get a clear contribution chain of trust we use the [signed-off-by language] (https://01.org/community/signed-process) +used by the Linux kernel project. + +## Patch format + +Beside the signed-off-by footer, we expect each patch to comply with the following format: + +``` + : Change summary + + More detailled explanation of your changes: Why and how. + Wrap it to 72 characters. + See [here] (http://chris.beams.io/posts/git-commit/) + for some more good advices. + + Signed-off-by: +``` + +For example: + +``` + ssntp: Implement role checking + + SSNTP roles are contained within the SSNTP certificates + as key extended attributes. On both the server and client + sides we are verifying that the claimed roles through the + SSNTP connection protocol match the certificates. + + Signed-off-by: Samuel Ortiz +``` + +## Pull requests + +We accept github pull requests. \ No newline at end of file diff --git a/COPYING b/COPYING new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 000000000..20396a39c --- /dev/null +++ b/README.md @@ -0,0 +1,22 @@ +Ciao Project + +Ciao is the "Cloud Integrated Advanced Orchestrator". Its goal is +to provide an easy to deploy, secure, scalable cloud orchestration +system which handles virtual machines, containers, and bare metal apps +agnostically generic workloads. Implemented in the Go language, it +separates logic into "controller", "scheduler" and "launcher" components +which communicate over the "Simple and Secure Node Transfer Protocol +(SSNTP)". + +Controller is responsible for policy choices around tenant workloads. + +Scheduler implements a push scheduling, finding a first fit on cluster +compute nodes for a controller approved workload instance. + +Launcher abstracts the specific launching details for the different +workload types (eg: virtual machine, container, bare metal). Launcher +reports compute node statistics to the scheduler and controller. It also +reports per-instance statistics up to controller. + +An additional set of componentry provides network connectivity for workload +instances and insures tenant isolation. diff --git a/ciao-cli/README.md b/ciao-cli/README.md new file mode 100644 index 000000000..617cd7922 --- /dev/null +++ b/ciao-cli/README.md @@ -0,0 +1,5 @@ +# ciao-cli + +Command-line interface for the Cloud Integrated Advanced Orchestrator +(CIAO). All CIAO components communicate with each other via +[SSNTP](https://github.com/01org/ciao/blob/master/ssntp/README.md). \ No newline at end of file diff --git a/ciao-cli/ciao-cli.go b/ciao-cli/ciao-cli.go new file mode 100644 index 000000000..4a7d49076 --- /dev/null +++ b/ciao-cli/ciao-cli.go @@ -0,0 +1,836 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/golang/glog" + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" + "github.com/rackspace/gophercloud/openstack/identity/v3/tokens" + + "github.com/01org/ciao/payloads" +) + +var scopedToken string +var tenantID string + +const openstackComputePort = 8774 +const openstackComputeVersion = "v2.1" + +type action uint8 + +const ( + computeActionStart action = iota + computeActionStop +) + +type scheme string + +const ( + computeHTTP scheme = "http" + computeHTTPS scheme = "https" +) + +var computeScheme = computeHTTPS + +func (s *scheme) String() string { + switch *s { + case computeHTTP: + return "http" + case computeHTTPS: + return "https" + } + + return "" +} + +func (s *scheme) Set(value string) error { + for _, r := range strings.Split(value, ",") { + if r == "http" { + *s = computeHTTP + return nil + } else if r == "https" { + *s = computeHTTPS + return nil + } else { + return errors.New("Unknown scheme") + } + } + + return nil +} + +func debugf(format string, args ...interface{}) { + glog.V(2).Infof("ciao-cli DEBUG: "+format, args...) +} + +func infof(format string, args ...interface{}) { + glog.V(1).Infof("ciao-cli INFO: "+format, args...) +} + +func warningf(format string, args ...interface{}) { + glog.Warningf("ciao-cli WARNING: "+format, args...) +} + +func errorf(format string, args ...interface{}) { + glog.Errorf("ciao-cli ERROR: "+format, args...) +} + +func fatalf(format string, args ...interface{}) { + glog.Fatalf("ciao-cli FATAL: "+format, args...) + os.Exit(1) +} + +var ( + listInstances = flag.Bool("list-instances", false, "List all instances for a tenant or for a compute node") + listQuotas = flag.Bool("list-quotas", false, "List quotas status for a tenant") + listResources = flag.Bool("list-resources", false, "List consumed resources for a tenant for the past 15mn") + listWorkloads = flag.Bool("list-workloads", false, "List all workloads") + listTenants = flag.Bool("list-tenants", false, "List all tenants") + listComputeNodes = flag.Bool("list-cns", false, "List all compute nodes") + listCNCIs = flag.Bool("list-cncis", false, "List all CNCIs") + listLength = flag.Int("list-length", 0, "Maximum number of items in the reponse") + dumpCNCI = flag.Bool("dump-cnci", false, "Dump a CNCI details") + dumpToken = flag.Bool("dump-token", false, "Dump keystone tokens") + dumpTenantID = flag.Bool("dump-tenant-id", false, "Dump tenant UUID") + clusterStatus = flag.Bool("cluster-status", false, "List all compute nodes") + launchInstances = flag.Bool("launch-instances", false, "Launch Ciao instances") + deleteInstance = flag.Bool("delete-instance", false, "Delete a Ciao instance") + stopInstance = flag.Bool("stop-instance", false, "Stop a Ciao instance") + restartInstance = flag.Bool("restart-instance", false, "Restart a Ciao instance") + workload = flag.String("workload", "", "Workload UUID") + instances = flag.Int("instances", 1, "Number of instances to create") + instance = flag.String("instance", "", "Instance UUID") + instanceMarker = flag.String("instance-marker", "", "Show instance list starting from the next instance after instance-marker") + tenant = flag.String("tenant", "", "Tenant UUID") + scope = flag.String("scope", "service", "Scope tenant name") + computeNode = flag.String("cn", "", "Compute node UUID") + cnci = flag.String("cnci", "", "CNCI UUID") + controllerURL = flag.String("controller", "localhost", "Controller URL") + computePort = flag.Int("computeport", openstackComputePort, "Openstack Compute API port") + identityURL = flag.String("identity", "", "Keystone URL") + identityUser = flag.String("username", "nova", "Openstack Service Username") + identityPassword = flag.String("password", "nova", "Openstack Service Username") +) + +type Project struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` +} + +type getResult struct { + tokens.GetResult +} + +func (r getResult) ExtractProject() (string, error) { + if r.Err != nil { + return "", r.Err + } + + var response struct { + Token struct { + ValidProject Project `mapstructure:"project"` + } `mapstructure:"token"` + } + + err := mapstructure.Decode(r.Body, &response) + if err != nil { + return "", err + } + + return response.Token.ValidProject.ID, nil +} + +func getScopedToken(username string, password string, projectScope string) (string, string, error) { + opt := gophercloud.AuthOptions{ + IdentityEndpoint: *identityURL + "/v3/", + Username: username, + Password: password, + DomainID: "default", + AllowReauth: true, + } + + provider, err := openstack.AuthenticatedClient(opt) + if err != nil { + errorf("Could not get AuthenticatedClient %s\n", err) + return "", "", nil + } + + client := openstack.NewIdentityV3(provider) + if client == nil { + errorf("something went wrong") + return "", "", nil + } + + scope := tokens.Scope{ + ProjectName: projectScope, + DomainName: "default", + } + token, err := tokens.Create(client, opt, &scope).Extract() + if err != nil { + errorf("Could not extract token %s\n", err) + return "", "", nil + } + + r := tokens.Get(client, token.ID) + result := getResult{r} + tenantID, err := result.ExtractProject() + if err != nil { + errorf("Could not extract tenant ID %s\n", err) + return "", "", nil + } + + debugf("Token: %s\n", spew.Sdump(result.Body)) + + if *dumpToken == true { + spew.Dump(result.Body) + } + + infof("Got token %s for (%s, %s, %s)\n", token.ID, username, password, projectScope) + + return token.ID, tenantID, nil +} + +type queryValue struct { + name, value string +} + +func buildComputeURL(format string, args ...interface{}) string { + prefix := fmt.Sprintf("%s://%s:%d/%s/", computeScheme, *controllerURL, *computePort, openstackComputeVersion) + return fmt.Sprintf(prefix+format, args...) +} + +func sendComputeRequest(method string, url string, values []queryValue, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest(method, os.ExpandEnv(url), body) + if err != nil { + return nil, err + } + + infof("Sending %s %s\n", method, url) + + if values != nil { + v := req.URL.Query() + + for _, value := range values { + infof("Adding URL query %s=%s\n", value.name, value.value) + v.Add(value.name, value.value) + } + + req.URL.RawQuery = v.Encode() + } + + if scopedToken != "" { + req.Header.Add("X-Auth-Token", scopedToken) + } + + if body != nil { + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + } + + tlsConfig := &tls.Config{} + if computeScheme == computeHTTPS { + warningf("Skipping TLS verification for %s scheme\n", computeScheme) + tlsConfig = &tls.Config{InsecureSkipVerify: true} + } + + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + + client := &http.Client{Transport: transport} + resp, err := client.Do(req) + if err != nil { + errorf("Could not send HTTP request %s\n", err) + return nil, err + } + + infof("Got HTTP response (status %s)\n", resp.Status) + + if resp.StatusCode >= http.StatusBadRequest { + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + errorf("Could not read the HTTP response %s\n", err) + spew.Dump(resp.Body) + return resp, err + } + + return resp, fmt.Errorf("HTTP Error [%d] for [%s %s]: %s", resp.StatusCode, method, url, respBody) + } + + return resp, err +} + +func unmarshalComputeResponse(resp *http.Response, v interface{}) error { + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + errorf("Could not read the HTTP response %s\n", err) + return err + } + + debugf("Response body %v\n", spew.Sdump(body)) + + err = json.Unmarshal(body, v) + if err != nil { + errorf("Could not unmarshal the HTTP response %s\n", err) + return err + } + + return nil +} + +func listAllInstances(tenant string, workload string, marker string, limit int) { + var servers payloads.ComputeServers + var url string + + if tenant != "" { + url = buildComputeURL("%s/servers/detail", tenant) + } else if workload != "" { + url = buildComputeURL("flavors/%s/servers/detail", workload) + } + + var values []queryValue + if limit > 0 { + values = append(values, queryValue{ + name: "limit", + value: fmt.Sprintf("%d", limit), + }) + } + + if marker != "" { + values = append(values, queryValue{ + name: "marker", + value: marker, + }) + } + + resp, err := sendComputeRequest("GET", url, values, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &servers) + if err != nil { + fatalf(err.Error()) + } + + for i, server := range servers.Servers { + fmt.Printf("Instance #%d\n", i+1) + fmt.Printf("\tUUID: %s\n", server.ID) + fmt.Printf("\tStatus: %s\n", server.Status) + fmt.Printf("\tPrivate IP: %s\n", server.Addresses.Private[0].Addr) + fmt.Printf("\tMAC Address: %s\n", server.Addresses.Private[0].OSEXTIPSMACMacAddr) + fmt.Printf("\tCN UUID: %s\n", server.HostID) + fmt.Printf("\tImage UUID: %s\n", server.Image.ID) + fmt.Printf("\tTenant UUID: %s\n", server.TenantID) + if server.SSHIP != "" { + fmt.Printf("\tSSH IP: %s\n", server.SSHIP) + fmt.Printf("\tSSH Port: %d\n", server.SSHPort) + } + } +} + +func limitToString(limit int) string { + if limit == -1 { + return "Unlimited" + } + + return fmt.Sprintf("%d", limit) +} + +func listTenantQuotas(tenant string) { + var resources payloads.CiaoTenantResources + url := buildComputeURL("%s/quotas", tenant) + + resp, err := sendComputeRequest("GET", url, nil, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &resources) + if err != nil { + fatalf(err.Error()) + } + + fmt.Printf("Quotas for tenant %s:\n", resources.ID) + fmt.Printf("\tInstances: %d | %s\n", resources.InstanceUsage, limitToString(resources.InstanceLimit)) + fmt.Printf("\tCPUs: %d | %s\n", resources.VCPUUsage, limitToString(resources.VCPULimit)) + fmt.Printf("\tMemory: %d | %s\n", resources.MemUsage, limitToString(resources.MemLimit)) + fmt.Printf("\tDisk: %d | %s\n", resources.DiskUsage, limitToString(resources.DiskLimit)) +} + +func listTenantResources(tenant string) { + var usage payloads.CiaoUsageHistory + url := buildComputeURL("%s/resources", tenant) + + now := time.Now() + values := []queryValue{ + { + name: "start_date", + value: now.Add(-15 * time.Minute).Format(time.RFC3339), + }, + { + name: "end_date", + value: now.Format(time.RFC3339), + }, + } + + resp, err := sendComputeRequest("GET", url, values, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &usage) + if err != nil { + fatalf(err.Error()) + } + + fmt.Printf("Usage for tenant %s:\n", tenant) + for _, u := range usage.Usages { + fmt.Printf("\t%v: [%d CPUs] [%d MB memory] [%d MB disk]\n", u.Timestamp, u.VCPU, u.Memory, u.Disk) + } +} + +func workloadDetail(tenant string, workload string) string { + var flavor payloads.ComputeFlavorDetails + + url := buildComputeURL("%s/flavors/%s", tenant, workload) + + resp, err := sendComputeRequest("GET", url, nil, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &flavor) + if err != nil { + fatalf(err.Error()) + } + + return fmt.Sprintf("\tName: %s\n\tUUID:%s\n\tImage UUID: %s\n\tCPUs: %d\n\tMemory: %d MB\n", + flavor.Flavor.Name, flavor.Flavor.ID, flavor.Flavor.Disk, flavor.Flavor.Vcpus, flavor.Flavor.RAM) +} + +func listTenantWorkloads(tenant string) { + var flavors payloads.ComputeFlavors + if tenant == "" { + tenant = "faketenant" + } + + url := buildComputeURL("%s/flavors", tenant) + + resp, err := sendComputeRequest("GET", url, nil, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &flavors) + if err != nil { + fatalf(err.Error()) + } + + for i, flavor := range flavors.Flavors { + fmt.Printf("Workload %d\n", i+1) + fmt.Printf(workloadDetail(tenant, flavor.ID)) + } +} + +func listAllTenants() { + var tenants payloads.CiaoComputeTenants + + url := buildComputeURL("tenants") + + resp, err := sendComputeRequest("GET", url, nil, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &tenants) + if err != nil { + fatalf(err.Error()) + } + + for i, tenant := range tenants.Tenants { + fmt.Printf("Tenant %d\n", i+1) + fmt.Printf("\tUUID: %s\n", tenant.ID) + fmt.Printf("\tName: %s\n", tenant.Name) + } +} + +func listAllComputeNodes() { + var nodes payloads.CiaoComputeNodes + + url := buildComputeURL("nodes") + + resp, err := sendComputeRequest("GET", url, nil, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &nodes) + if err != nil { + fatalf(err.Error()) + } + + for i, node := range nodes.Nodes { + fmt.Printf("Compute Node %d\n", i+1) + fmt.Printf("\tUUID: %s\n", node.ID) + fmt.Printf("\tStatus: %s\n", node.Status) + fmt.Printf("\tLoad: %d\n", node.Load) + fmt.Printf("\tAvailable/Total memory: %d/%d MB\n", node.MemAvailable, node.MemTotal) + fmt.Printf("\tAvailable/Total disk: %d/%d MB\n", node.DiskAvailable, node.DiskTotal) + fmt.Printf("\tTotal Instances: %d\n", node.TotalInstances) + fmt.Printf("\t\tRunning Instances: %d\n", node.TotalRunningInstances) + fmt.Printf("\t\tPending Instances: %d\n", node.TotalPendingInstances) + fmt.Printf("\t\tPaused Instances: %d\n", node.TotalPausedInstances) + } +} + +func listAllCNCIs() { + var cncis payloads.CiaoCNCIs + + url := buildComputeURL("cncis") + + resp, err := sendComputeRequest("GET", url, nil, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &cncis) + if err != nil { + fatalf(err.Error()) + } + + for i, cnci := range cncis.CNCIs { + fmt.Printf("CNCI %d\n", i+1) + fmt.Printf("\tCNCI UUID: %s\n", cnci.ID) + fmt.Printf("\tTenant UUID: %s\n", cnci.TenantID) + fmt.Printf("\tIPv4: %s\n", cnci.IPv4) + fmt.Printf("\tSubnets:\n") + for _, subnet := range cnci.Subnets { + fmt.Printf("\t\t%s\n", subnet.Subnet) + } + } +} + +func dumpCNCIDetails(cnciID string) { + var cnci payloads.CiaoCNCI + + url := buildComputeURL("cncis/%s/detail", cnciID) + + resp, err := sendComputeRequest("GET", url, nil, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &cnci) + if err != nil { + fatalf(err.Error()) + } + + fmt.Printf("\tCNCI UUID: %s\n", cnci.ID) + fmt.Printf("\tTenant UUID: %s\n", cnci.TenantID) + fmt.Printf("\tIPv4: %s\n", cnci.IPv4) + fmt.Printf("\tSubnets:\n") + for _, subnet := range cnci.Subnets { + fmt.Printf("\t\t%s\n", subnet.Subnet) + } +} + +func createTenantInstance(tenant string, workload string, instances int) { + var server payloads.ComputeCreateServer + var servers payloads.ComputeServers + + server.Server.Workload = workload + server.Server.MaxInstances = instances + server.Server.MinInstances = 1 + + serverBytes, err := json.Marshal(server) + if err != nil { + fatalf(err.Error()) + } + body := bytes.NewReader(serverBytes) + + url := buildComputeURL("%s/servers", tenant) + + resp, err := sendComputeRequest("POST", url, nil, body) + if err != nil { + fatalf(err.Error()) + } + + if resp.StatusCode != http.StatusAccepted { + fatalf("Instance creation failed: %s", resp.Status) + } + + err = unmarshalComputeResponse(resp, &servers) + if err != nil { + fatalf(err.Error()) + } + + for _, server := range servers.Servers { + fmt.Printf("Created new instance: %s\n", server.ID) + } +} + +func deleteTenantInstance(tenant string, instance string) { + url := buildComputeURL("%s/servers/%s", tenant, instance) + + resp, err := sendComputeRequest("DELETE", url, nil, nil) + if err != nil { + fatalf(err.Error()) + + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusAccepted { + fatalf("Instance deletion failed: %s", resp.Status) + } + + fmt.Printf("Deleted instance: %s\n", instance) +} + +func listNodeInstances(node string) { + var servers payloads.CiaoServersStats + url := buildComputeURL("nodes/%s/servers/detail", node) + + resp, err := sendComputeRequest("GET", url, nil, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &servers) + if err != nil { + fatalf(err.Error()) + } + + for i, server := range servers.Servers { + fmt.Printf("Instance #%d\n", i+1) + fmt.Printf("\tUUID: %s\n", server.ID) + fmt.Printf("\tStatus: %s\n", server.Status) + fmt.Printf("\tTenant UUID: %s\n", server.TenantID) + fmt.Printf("\tIPv4: %s\n", server.IPv4) + fmt.Printf("\tCPUs used: %d\n", server.VCPUUsage) + fmt.Printf("\tMemory used: %d MB\n", server.MemUsage) + fmt.Printf("\tDisk used: %d MB\n", server.DiskUsage) + } +} + +func dumpClusterStatus() { + var status payloads.CiaoClusterStatus + url := buildComputeURL("nodes/summary") + + resp, err := sendComputeRequest("GET", url, nil, nil) + if err != nil { + fatalf(err.Error()) + } + + err = unmarshalComputeResponse(resp, &status) + if err != nil { + fatalf(err.Error()) + } + + fmt.Printf("Total Nodes %d\n", status.Status.TotalNodes) + fmt.Printf("\tReady %d\n", status.Status.TotalNodesReady) + fmt.Printf("\tFull %d\n", status.Status.TotalNodesFull) + fmt.Printf("\tOffline %d\n", status.Status.TotalNodesOffline) + fmt.Printf("\tMaintenance %d\n", status.Status.TotalNodesMaintenance) +} + +const osStart = "os-start" +const osStop = "os-stop" + +func startStopInstance(tenant, instance string, action action) { + var actionBytes []byte + + switch action { + case computeActionStart: + actionBytes = []byte(osStart) + case computeActionStop: + actionBytes = []byte(osStop) + default: + fatalf("Unsupported action %d\n", action) + } + + body := bytes.NewReader(actionBytes) + + url := buildComputeURL("%s/servers/%s/action", tenant, instance) + + resp, err := sendComputeRequest("POST", url, nil, body) + if err != nil { + fatalf(err.Error()) + } + + if resp.StatusCode != http.StatusAccepted { + fatalf("Instance action failed: %s", resp.Status) + } + + switch action { + case computeActionStart: + fmt.Printf("Instance %s restarted\n", instance) + case computeActionStop: + fmt.Printf("Instance %s stopped\n", instance) + } +} + +func main() { + flag.Var(&computeScheme, "scheme", "Compute API URL scheme (http or https)") + flag.Parse() + + if *identityURL != "" { + if len(*identityUser) == 0 { + fatalf("Missing required -user parameter") + } + + if len(*identityPassword) == 0 { + fatalf("Missing required -user parameter") + } + + if len(*scope) == 0 { + fatalf("Missing required -scope parameter") + } + + t, id, err := getScopedToken(*identityUser, *identityPassword, *scope) + if err != nil { + fatalf(err.Error()) + } + + scopedToken = t + tenantID = id + } + + if *dumpTenantID == true { + fmt.Printf("Tenant UUID: %s\n", tenantID) + } + + if *listInstances == true { + if len(*tenant) != 0 { + listAllInstances(*tenant, "", *instanceMarker, *listLength) + } else if len(*computeNode) != 0 { + listNodeInstances(*computeNode) + } else if len(*workload) != 0 { + listAllInstances("", *workload, *instanceMarker, *listLength) + } else { + fatalf("Missing required -tenant or -cn or -workload parameters") + } + } + + if *listQuotas == true { + if len(*tenant) == 0 { + fatalf("Missing required -tenant parameter") + } + + listTenantQuotas(*tenant) + } + + if *listResources == true { + if len(*tenant) == 0 { + fatalf("Missing required -tenant parameter") + } + + listTenantResources(*tenant) + } + + if *listWorkloads == true { + if len(*tenant) == 0 { + fatalf("Missing required -tenant parameter") + } + + listTenantWorkloads(*tenant) + } + + if *listTenants == true { + listAllTenants() + } + + if *listComputeNodes == true { + listAllComputeNodes() + } + + if *listCNCIs == true { + listAllCNCIs() + } + + if *clusterStatus == true { + dumpClusterStatus() + } + + if *launchInstances == true { + if len(*tenant) == 0 { + fatalf("Missing required -tenant parameter") + } + + if len(*workload) == 0 { + fatalf("Missing required -workload parameter") + } + + createTenantInstance(*tenant, *workload, *instances) + } + + if *deleteInstance == true { + if len(*tenant) == 0 { + fatalf("Missing required -tenant parameter") + } + + if len(*instance) == 0 { + fatalf("Missing required -instance parameter") + } + + deleteTenantInstance(*tenant, *instance) + } + + if *dumpCNCI == true { + if len(*cnci) == 0 { + fatalf("Missing required -cnci parameter") + } + + dumpCNCIDetails(*cnci) + } + + if *stopInstance == true || *restartInstance == true { + if len(*tenant) == 0 { + fatalf("Missing required -tenant parameter") + } + + if len(*instance) == 0 { + fatalf("Missing required -instance parameter") + } + + action := computeActionStart + if *stopInstance == true { + action = computeActionStop + } + + startStopInstance(*tenant, *instance, action) + } +} diff --git a/ciao-controller/TODO b/ciao-controller/TODO new file mode 100644 index 000000000..7adc194c6 --- /dev/null +++ b/ciao-controller/TODO @@ -0,0 +1,16 @@ +Fix sql error handling +write test cases to check coherency of cache and database +fix dbLock to be a RWMutex +clean up all the comments +either get rid of the table abstraction completely or figure out + how to use it better + +figure out why we can't reauthenticate with gophercloud after our token +expires + +cleanup TODO +----------- +move template files into subdirectory +move db init (csv and yaml) files into subdirectory +make it so you don't need to add usage - just do add instance. +get rid of GetInstanceInfo - make datastore GetInstance() diff --git a/ciao-controller/client.go b/ciao-controller/client.go new file mode 100644 index 000000000..3337794f1 --- /dev/null +++ b/ciao-controller/client.go @@ -0,0 +1,229 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + "github.com/golang/glog" + "gopkg.in/yaml.v2" + "time" +) + +type ssntpClient struct { + context *controller + ssntp ssntp.Client + name string +} + +func (client *ssntpClient) ConnectNotify() { + glog.Info(client.name, " connected") +} + +func (client *ssntpClient) DisconnectNotify() { + glog.Info(client.name, " disconnected") +} + +func (client *ssntpClient) StatusNotify(status ssntp.Status, frame *ssntp.Frame) { + glog.Info("STATUS for ", client.name) +} + +func (client *ssntpClient) CommandNotify(command ssntp.Command, frame *ssntp.Frame) { + var stats payloads.Stat + payload := frame.Payload + + glog.Info("COMMAND ", command, " for ", client.name) + + if command == ssntp.STATS { + stats.Init() + err := yaml.Unmarshal(payload, &stats) + if err != nil { + glog.Warning("error unmarshalling temp stat") + return + } + client.context.ds.HandleStats(stats) + } + glog.V(1).Info(string(payload)) +} + +func (client *ssntpClient) EventNotify(event ssntp.Event, frame *ssntp.Frame) { + payload := frame.Payload + + glog.Info("EVENT ", event, " for ", client.name) + switch event { + case ssntp.InstanceDeleted: + var event payloads.EventInstanceDeleted + err := yaml.Unmarshal(payload, &event) + if err != nil { + glog.Warning("Error unmarshalling InstanceDeleted") + return + } + client.context.ds.DeleteInstance(event.InstanceDeleted.InstanceUUID) + case ssntp.ConcentratorInstanceAdded: + var event payloads.EventConcentratorInstanceAdded + err := yaml.Unmarshal(payload, &event) + if err != nil { + glog.Warning(err) + return + } + newCNCI := event.CNCIAdded + client.context.ds.AddCNCIIP(newCNCI.ConcentratorMAC, newCNCI.ConcentratorIP) + case ssntp.TraceReport: + var trace payloads.Trace + err := yaml.Unmarshal(payload, &trace) + if err != nil { + glog.Warning("error unmarshalling TraceReport") + return + } + client.context.ds.HandleTraceReport(trace) + } + glog.V(1).Info(string(payload)) +} + +func (client *ssntpClient) ErrorNotify(err ssntp.Error, frame *ssntp.Frame) { + payload := frame.Payload + + glog.Info("ERROR (", err, ") for ", client.name) + switch err { + case ssntp.StartFailure: + var failure payloads.ErrorStartFailure + err := yaml.Unmarshal(payload, &failure) + if err != nil { + glog.Warning("Error unmarshalling StartFailure") + return + } + client.context.ds.StartFailure(failure.InstanceUUID, failure.Reason) + case ssntp.StopFailure: + var failure payloads.ErrorStopFailure + err := yaml.Unmarshal(payload, &failure) + if err != nil { + glog.Warning("Error unmarshalling StopFailure") + return + } + client.context.ds.StopFailure(failure.InstanceUUID, failure.Reason) + case ssntp.RestartFailure: + var failure payloads.ErrorRestartFailure + err := yaml.Unmarshal(payload, &failure) + if err != nil { + glog.Warning("Error unmarshalling RestartFailure") + return + } + client.context.ds.RestartFailure(failure.InstanceUUID, failure.Reason) + } + glog.V(1).Info(string(payload)) +} + +func newSSNTPClient(context *controller, config *ssntp.Config) (client *ssntpClient, err error) { + client = &ssntpClient{name: "ciao Controller", context: context} + + err = client.ssntp.Dial(config, client) + return +} + +func (client *ssntpClient) StartTracedWorkload(config string, startTime time.Time, label string) (err error) { + glog.V(1).Info("START TRACED config:") + glog.V(1).Info(config) + traceConfig := &ssntp.TraceConfig{PathTrace: true, Start: startTime, Label: []byte(label)} + _, err = client.ssntp.SendTracedCommand(ssntp.START, []byte(config), traceConfig) + return +} + +func (client *ssntpClient) StartWorkload(config string) (err error) { + glog.V(1).Info("START config:") + glog.V(1).Info(config) + _, err = client.ssntp.SendCommand(ssntp.START, []byte(config)) + return +} + +func (client *ssntpClient) DeleteInstance(instanceID string, nodeID string) (err error) { + stopCmd := payloads.StopCmd{ + InstanceUUID: instanceID, + WorkloadAgentUUID: nodeID, + } + payload := payloads.Delete{ + Delete: stopCmd, + } + y, err := yaml.Marshal(payload) + if err != nil { + return err + } + glog.Info("DELETE instance_id: ", instanceID, "node_id ", nodeID) + glog.V(1).Info(string(y)) + _, err = client.ssntp.SendCommand(ssntp.DELETE, y) + return +} + +func (client *ssntpClient) StopInstance(instanceID string, nodeID string) (err error) { + stopCmd := payloads.StopCmd{ + InstanceUUID: instanceID, + WorkloadAgentUUID: nodeID, + } + payload := payloads.Stop{ + Stop: stopCmd, + } + y, err := yaml.Marshal(payload) + if err != nil { + return err + } + glog.Info("STOP instance_id: ", instanceID, "node_id ", nodeID) + glog.V(1).Info(string(y)) + _, err = client.ssntp.SendCommand(ssntp.STOP, y) + return +} + +func (client *ssntpClient) RestartInstance(instanceID string, nodeID string) (err error) { + restartCmd := payloads.RestartCmd{ + InstanceUUID: instanceID, + WorkloadAgentUUID: nodeID, + } + + payload := payloads.Restart{ + Restart: restartCmd, + } + + y, err := yaml.Marshal(payload) + if err != nil { + return err + } + glog.Info("RESTART instance: ", instanceID) + glog.V(1).Info(string(y)) + _, err = client.ssntp.SendCommand(ssntp.RESTART, y) + return +} + +func (client *ssntpClient) EvacuateNode(nodeID string) (err error) { + evacuateCmd := payloads.EvacuateCmd{ + WorkloadAgentUUID: nodeID, + } + + payload := payloads.Evacuate{ + Evacuate: evacuateCmd, + } + + y, err := yaml.Marshal(payload) + if err != nil { + return err + } + glog.Info("EVACUATE node: ", nodeID) + glog.V(1).Info(string(y)) + _, err = client.ssntp.SendCommand(ssntp.EVACUATE, y) + return +} + +func (client *ssntpClient) Disconnect() { + client.ssntp.Close() +} diff --git a/ciao-controller/command.go b/ciao-controller/command.go new file mode 100644 index 000000000..f7b64d8e0 --- /dev/null +++ b/ciao-controller/command.go @@ -0,0 +1,206 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "errors" + "fmt" + "github.com/01org/ciao/ciao-controller/types" + "github.com/golang/glog" + "time" +) + +func (c *controller) evacuateNode(nodeID string) error { + // should I bother to see if nodeID is valid? + go c.client.EvacuateNode(nodeID) + return nil +} + +func (c *controller) restartInstance(instanceID string) error { + // should I bother to see if instanceID is valid? + // get node id. If there is no node id we can't send a restart + nodeID, state, err := c.ds.GetInstanceInfo(instanceID) + if err != nil { + return err + } + + if nodeID == "" { + return errors.New("Instance Not Assigned to Node") + } + + if state != "exited" { + return errors.New("You may only restart paused instances") + } + + go c.client.RestartInstance(instanceID, nodeID) + return nil +} + +func (c *controller) stopInstance(instanceID string) error { + // get node id. If there is no node id we can't send a delete + nodeID, state, err := c.ds.GetInstanceInfo(instanceID) + if err != nil { + return err + } + + if nodeID == "" { + return errors.New("Instance Not Assigned to Node") + } + + if state == "pending" { + return errors.New("You may not stop a pending instance") + } + + go c.client.StopInstance(instanceID, nodeID) + return nil +} + +func (c *controller) deleteInstance(instanceID string) error { + // get node id. If there is no node id we can't send a delete + nodeID, _, err := c.ds.GetInstanceInfo(instanceID) + if err != nil { + return err + } + + if nodeID == "" { + return errors.New("Instance Not Assigned to Node") + } + + go c.client.DeleteInstance(instanceID, nodeID) + return nil +} + +func (c *controller) startWorkload(workloadID string, tenantID string, instances int, trace bool, label string) ([]*types.Instance, error) { + var e error + + if instances == 0 { + return nil, errors.New("Missing number of instances to start") + } + + wl, err := c.ds.GetWorkload(workloadID) + if err != nil { + return nil, err + } + + if !isCNCIWorkload(wl) { + tenant, err := c.ds.GetTenant(tenantID) + if err != nil { + return nil, err + } + + if tenant == nil { + if *noNetwork { + _, err := c.ds.AddTenant(tenantID) + if err != nil { + return nil, err + } + } else { + + err = c.addTenant(tenantID) + if err != nil { + return nil, err + } + } + } else if tenant.CNCIIP == "" { + if !*noNetwork { + _ = c.addTenant(tenantID) + tenant, err = c.ds.GetTenant(tenantID) + if tenant.CNCIIP == "" { + return nil, errors.New("Unable to Launch Tenant CNCI") + } + } + } + } + + var newInstances []*types.Instance + + for i := 0; i < instances; i++ { + startTime := time.Now() + instance, err := newInstance(c, tenantID, wl) + if err != nil { + glog.V(2).Info("error newInstance") + e = err + continue + } + instance.startTime = startTime + + ok, err := instance.Allowed() + if ok { + err = instance.Add() + if err != nil { + glog.V(2).Info("error adding instance") + instance.Clean() + e = err + continue + } + + newInstances = append(newInstances, &instance.Instance) + if trace == false { + go c.client.StartWorkload(instance.newConfig.config) + } else { + go c.client.StartTracedWorkload(instance.newConfig.config, instance.startTime, label) + } + } else { + instance.Clean() + if err != nil { + e = err + continue + } else { + // stop if we are over limits + return nil, errors.New("Over Tenant Limits") + } + } + } + + return newInstances, e +} + +func (c *controller) launchCNCI(tenantID string) (err error) { + workloadID, err := c.ds.GetCNCIWorkloadID() + if err != nil { + return err + } + + ch := make(chan bool) + + c.ds.AddTenantChan(ch, tenantID) + + _, err = c.startWorkload(workloadID, tenantID, 1, false, "") + if err != nil { + return err + } + + success := <-ch + + if success { + return nil + } + msg := fmt.Sprintf("Failed to Launch CNCI for %s", tenantID) + return errors.New(msg) +} + +func (c *controller) addTenant(id string) error { + // create new entry in datastore + _, err := c.ds.AddTenant(id) + if err != nil { + return err + } + + // start up a CNCI. this will block till the + // CNCI started event is returned + return c.launchCNCI(id) +} diff --git a/ciao-controller/compute.go b/ciao-controller/compute.go new file mode 100644 index 000000000..b44a1a090 --- /dev/null +++ b/ciao-controller/compute.go @@ -0,0 +1,1071 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/http/httputil" + "strconv" + "strings" + "time" + + "github.com/01org/ciao/ciao-controller/types" + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + "github.com/golang/glog" + "github.com/gorilla/mux" +) + +const openstackComputeAPIPort = 8774 + +type action uint8 + +const ( + computeActionStart action = iota + computeActionStop +) + +type pagerFilterType uint8 + +const ( + none pagerFilterType = 0 + workloadFilter = 0x1 + statusFilter = 0x2 +) + +type pager interface { + filter(filterType pagerFilterType, filter string, item interface{}) bool + nextPage(filterType pagerFilterType, filter string, r *http.Request) ([]byte, error) +} + +type serverPager struct { + context *controller + instances []*types.Instance +} + +func dumpRequestBody(r *http.Request, body bool) { + if glog.V(2) { + dump, err := httputil.DumpRequest(r, body) + if err != nil { + glog.Errorf("HTTP request dump error %s", err) + } + + glog.Infof("HTTP request [%q]", dump) + } +} + +func dumpRequest(r *http.Request) { + dumpRequestBody(r, false) +} + +func serverQueryParse(r *http.Request) (int, string) { + values := r.URL.Query() + limit := 0 + marker := "" + if values["limit"] != nil { + l, err := strconv.ParseInt(values["limit"][0], 10, 32) + if err != nil { + limit = 0 + } else { + limit = (int)(l) + } + } + + if values["marker"] != nil { + marker = values["marker"][0] + } + + return limit, marker +} + +func (pager *serverPager) getInstances(filterType pagerFilterType, filter string, instances []*types.Instance, limit int) ([]byte, error) { + var servers payloads.ComputeServers + pageLength := 0 + + for _, instance := range instances { + if filterType != none && pager.filter(filterType, filter, instance) { + continue + } + + server, err := instanceToServer(pager.context, instance) + if err != nil { + return nil, err + } + + servers.Servers = append(servers.Servers, server) + pageLength++ + if limit > 0 && pageLength >= limit { + break + } + + } + + b, err := json.Marshal(servers) + if err != nil { + return nil, err + } + + return b, nil +} + +func (pager *serverPager) filter(filterType pagerFilterType, filter string, instance *types.Instance) bool { + switch filterType { + case workloadFilter: + if instance.WorkloadId != filter { + return true + } + } + + return false +} + +func (pager *serverPager) nextPage(filterType pagerFilterType, filter string, r *http.Request) ([]byte, error) { + limit, lastSeen := serverQueryParse(r) + + if lastSeen == "" { + if limit != 0 { + return pager.getInstances(filterType, filter, pager.instances, limit) + } + + return pager.getInstances(filterType, filter, pager.instances, 0) + } + + for i, instance := range pager.instances { + if instance.Id == lastSeen { + if i >= len(pager.instances)-1 { + return pager.getInstances(filterType, filter, nil, limit) + } + + return pager.getInstances(filterType, filter, pager.instances[i+1:], limit) + } + } + + return nil, fmt.Errorf("Item %s not found", lastSeen) +} + +func tenantToken(context *controller, r *http.Request, tenant string) bool { + var validServices = []struct { + serviceType string + serviceName string + }{ + { + serviceType: "compute", + serviceName: "ciao", + }, + { + serviceType: "compute", + serviceName: "nova", + }, + } + token := r.Header["X-Auth-Token"] + if token == nil { + return false + } + + /* TODO Caching or PKI */ + for _, s := range validServices { + if context.id.validateService(token[0], tenant, s.serviceType, s.serviceName) == true { + return true + } + + } + + for _, s := range validServices { + if context.id.validateService(token[0], tenant, s.serviceType, "") == true { + return true + } + + } + + return false +} + +func adminToken(context *controller, r *http.Request) bool { + var validAdmins = []struct { + project string + role string + }{ + { + project: "service", + role: "admin", + }, + { + project: "admin", + role: "admin", + }, + } + + token := r.Header["X-Auth-Token"] + if token == nil { + return false + } + + /* TODO Caching or PKI */ + for _, a := range validAdmins { + if context.id.validateProjectRole(token[0], a.project, a.role) == true { + return true + } + } + + vars := mux.Vars(r) + tenant := vars["tenant"] + glog.V(2).Infof("Invalid token for [%s]", tenant) + return false +} + +func validateToken(context *controller, r *http.Request) bool { + vars := mux.Vars(r) + tenant := vars["tenant"] + + glog.V(2).Infof("Token validation for [%s]", tenant) + + // We do not want to unconditionally check for an admin token, this is inefficient. + // We check for an admin token iff: + // - We do not have a tenant variable + // - We do have one but it does not match the token + + /* If we don't have a tenant parameter, are we admin ? */ + if tenant == "" { + return adminToken(context, r) + } + + /* If we have a tenant parameter that does not match the token are we admin ? */ + if tenantToken(context, r, tenant) == false { + return adminToken(context, r) + } + + return true +} + +func instanceToServer(context *controller, instance *types.Instance) (payloads.Server, error) { + workload, err := context.ds.GetWorkload(instance.WorkloadId) + if err != nil { + return payloads.Server{}, err + } + + imageID := workload.ImageID + + server := payloads.Server{ + HostID: instance.NodeId, + ID: instance.Id, + TenantID: instance.TenantId, + Flavor: payloads.Flavor{ + ID: instance.WorkloadId, + }, + Image: payloads.Image{ + ID: imageID, + }, + Status: instance.State, + Addresses: payloads.Addresses{ + []payloads.PrivateAddresses{ + { + Addr: instance.IPAddress, + OSEXTIPSMACMacAddr: instance.MACAddress, + }, + }, + }, + SSHIP: instance.SSHIP, + SSHPort: instance.SSHPort, + } + + return server, nil +} + +func showServerDetails(w http.ResponseWriter, r *http.Request, context *controller) { + vars := mux.Vars(r) + tenant := vars["tenant"] + instanceID := vars["server"] + var server payloads.ComputeServer + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + instance, err := context.ds.GetInstanceFromTenant(tenant, instanceID) + if err != nil { + http.Error(w, "Instance not available", http.StatusInternalServerError) + return + } + + server.Server, err = instanceToServer(context, instance) + if err != nil { + http.Error(w, "Instance not available", http.StatusInternalServerError) + return + } + + b, err := json.Marshal(server) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func deleteServer(w http.ResponseWriter, r *http.Request, context *controller) { + vars := mux.Vars(r) + tenant := vars["tenant"] + instance := vars["server"] + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + /* First check that the instance belongs to this tenant */ + _, err := context.ds.GetInstanceFromTenant(tenant, instance) + if err != nil { + http.Error(w, "Instance not available", http.StatusInternalServerError) + return + } + + err = context.deleteInstance(instance) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusAccepted) +} + +func listFlavors(w http.ResponseWriter, r *http.Request, context *controller) { + var flavors payloads.ComputeFlavors + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + workloads, err := context.ds.GetWorkloads() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + for _, workload := range workloads { + flavors.Flavors = append(flavors.Flavors, + struct { + ID string `json:"id"` + Links []payloads.Link `json:"links"` + Name string `json:"name"` + }{ + ID: workload.Id, + Name: workload.Description, + }, + ) + } + + b, err := json.Marshal(flavors) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +const ( + instances int = 1 + vcpu = 2 + memory = 3 + disk = 4 +) + +func listTenantQuotas(w http.ResponseWriter, r *http.Request, context *controller) { + vars := mux.Vars(r) + tenant := vars["tenant"] + var tenantResource payloads.CiaoTenantResources + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + t, err := context.ds.GetTenant(tenant) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if t == nil { + if *noNetwork { + _, err := context.ds.AddTenant(tenant) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } else { + err = context.addTenant(tenant) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + + t, err = context.ds.GetTenant(tenant) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + + resources := t.Resources + + tenantResource.ID = t.Id + + for _, resource := range resources { + switch resource.Rtype { + case instances: + tenantResource.InstanceLimit = resource.Limit + tenantResource.InstanceUsage = resource.Usage + + case vcpu: + tenantResource.VCPULimit = resource.Limit + tenantResource.VCPUUsage = resource.Usage + + case memory: + tenantResource.MemLimit = resource.Limit + tenantResource.MemUsage = resource.Usage + + case disk: + tenantResource.DiskLimit = resource.Limit + tenantResource.DiskUsage = resource.Usage + } + } + + b, err := json.Marshal(tenantResource) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func tenantQueryParse(r *http.Request) (time.Time, time.Time, error) { + values := r.URL.Query() + var startTime, endTime time.Time + + if values["start_date"] == nil || values["end_date"] == nil { + return startTime, endTime, fmt.Errorf("Missing date") + } + + startTime, err := time.Parse(time.RFC3339, values["start_date"][0]) + if err != nil { + return startTime, endTime, err + } + + endTime, err = time.Parse(time.RFC3339, values["end_date"][0]) + if err != nil { + return startTime, endTime, err + } + + return startTime, endTime, nil +} + +func listTenantResources(w http.ResponseWriter, r *http.Request, context *controller) { + vars := mux.Vars(r) + tenant := vars["tenant"] + var usage payloads.CiaoUsageHistory + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + start, end, err := tenantQueryParse(r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + glog.V(2).Infof("Start %v\n", start) + glog.V(2).Infof("End %v\n", end) + + usage.Usages, err = context.ds.GetTenantUsage(tenant, start, end) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + b, err := json.Marshal(usage) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func showFlavorDetails(w http.ResponseWriter, r *http.Request, context *controller) { + vars := mux.Vars(r) + workloadID := vars["flavor"] + var flavor payloads.ComputeFlavorDetails + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + workload, err := context.ds.GetWorkload(workloadID) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + defaults := workload.Defaults + if len(defaults) == 0 { + http.Error(w, "Workload resources not set", http.StatusInternalServerError) + return + } + + var details payloads.FlavorDetails + + details.OsFlavorAccessIsPublic = true + details.ID = workloadID + details.Disk = workload.ImageID + details.Name = workload.Description + + for r := range defaults { + switch defaults[r].Type { + case payloads.VCPUs: + details.Vcpus = defaults[r].Value + case payloads.MemMB: + details.RAM = defaults[r].Value + } + } + + flavor.Flavor = details + + b, err := json.Marshal(flavor) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func listServerDetails(w http.ResponseWriter, r *http.Request, context *controller) { + vars := mux.Vars(r) + tenant := vars["tenant"] + workload := vars["flavor"] + var instances []*types.Instance + var err error + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + if tenant != "" { + instances, err = context.ds.GetAllInstancesFromTenant(tenant) + } else { + instances, err = context.ds.GetAllInstances() + } + + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + pager := serverPager{ + context: context, + instances: instances, + } + + filterType := none + filter := "" + if workload != "" { + filterType = workloadFilter + filter = workload + } + + b, err := pager.nextPage(filterType, filter, r) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func createServer(w http.ResponseWriter, r *http.Request, context *controller) { + vars := mux.Vars(r) + tenant := vars["tenant"] + var server payloads.ComputeCreateServer + var servers payloads.ComputeServers + + dumpRequestBody(r, true) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + err = json.Unmarshal(body, &server) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + nInstances := 1 + + if server.Server.MaxInstances != 0 { + nInstances = server.Server.MaxInstances + } else if server.Server.MinInstances != 0 { + nInstances = server.Server.MinInstances + } + + instances, err := context.startWorkload(server.Server.Workload, tenant, nInstances, false, "") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + for _, instance := range instances { + server, err := instanceToServer(context, instance) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + servers.Servers = append(servers.Servers, server) + } + + b, err := json.Marshal(servers) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + w.Write(b) +} + +func serverAction(w http.ResponseWriter, r *http.Request, context *controller) { + vars := mux.Vars(r) + tenant := vars["tenant"] + instance := vars["server"] + var action action + + dumpRequestBody(r, true) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + /* First check that the instance belongs to this tenant */ + _, err := context.ds.GetInstanceFromTenant(tenant, instance) + if err != nil { + http.Error(w, "Instance not available", http.StatusInternalServerError) + return + } + + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + bodyString := string(body) + + if strings.Contains(bodyString, "os-start") { + action = computeActionStart + } else if strings.Contains(bodyString, "os-stop") { + action = computeActionStop + } else { + http.Error(w, "Unsupported action", http.StatusServiceUnavailable) + return + } + + switch action { + case computeActionStart: + err = context.restartInstance(instance) + case computeActionStop: + err = context.stopInstance(instance) + } + + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusAccepted) +} + +func listTenants(w http.ResponseWriter, r *http.Request, context *controller) { + var computeTenants payloads.CiaoComputeTenants + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Unauthorized token", http.StatusInternalServerError) + return + } + + tenants, err := context.ds.GetAllTenants() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + for _, tenant := range tenants { + computeTenants.Tenants = append(computeTenants.Tenants, + struct { + ID string `json:"id"` + Name string `json:"name"` + }{ + ID: tenant.Id, + Name: tenant.Name, + }, + ) + } + + b, err := json.Marshal(computeTenants) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func listNodes(w http.ResponseWriter, r *http.Request, context *controller) { + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + computeNodes := context.ds.GetNodeLastStats() + + nodeSummary, err := context.ds.GetNodeSummary() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + for _, node := range nodeSummary { + for i := range computeNodes.Nodes { + if computeNodes.Nodes[i].ID != node.NodeId { + continue + } + + computeNodes.Nodes[i].TotalInstances = node.TotalInstances + computeNodes.Nodes[i].TotalRunningInstances = node.TotalRunningInstances + computeNodes.Nodes[i].TotalPendingInstances = node.TotalPendingInstances + computeNodes.Nodes[i].TotalPausedInstances = node.TotalPausedInstances + } + } + + b, err := json.Marshal(computeNodes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func nodesSummary(w http.ResponseWriter, r *http.Request, context *controller) { + var nodesStatus payloads.CiaoClusterStatus + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + computeNodes := context.ds.GetNodeLastStats() + + glog.V(2).Infof("nodesSummary %d nodes", len(computeNodes.Nodes)) + + nodesStatus.Status.TotalNodes = len(computeNodes.Nodes) + for _, node := range computeNodes.Nodes { + if node.Status == ssntp.READY.String() { + nodesStatus.Status.TotalNodesReady++ + } else if node.Status == ssntp.FULL.String() { + nodesStatus.Status.TotalNodesFull++ + } else if node.Status == ssntp.OFFLINE.String() { + nodesStatus.Status.TotalNodesOffline++ + } else if node.Status == ssntp.MAINTENANCE.String() { + nodesStatus.Status.TotalNodesMaintenance++ + } + } + + b, err := json.Marshal(nodesStatus) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func listNodeServers(w http.ResponseWriter, r *http.Request, context *controller) { + vars := mux.Vars(r) + nodeID := vars["node"] + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + serversStats := context.ds.GetInstanceLastStats(nodeID) + + instances, err := context.ds.GetAllInstancesByNode(nodeID) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + for _, instance := range instances { + for i := range serversStats.Servers { + if serversStats.Servers[i].ID != instance.Id { + continue + } + + serversStats.Servers[i].TenantID = instance.TenantId + serversStats.Servers[i].IPv4 = instance.IPAddress + } + } + + b, err := json.Marshal(serversStats) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func listCNCIs(w http.ResponseWriter, r *http.Request, context *controller) { + var ciaoCNCIs payloads.CiaoCNCIs + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + cncis, err := context.ds.GetTenantCNCISummary("") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + var subnets []payloads.CiaoCNCISubnet + + for _, cnci := range cncis { + if cnci.InstanceID == "" { + continue + } + + for _, subnet := range cnci.Subnets { + subnets = append(subnets, + payloads.CiaoCNCISubnet{ + Subnet: subnet, + }, + ) + } + + ciaoCNCIs.CNCIs = append(ciaoCNCIs.CNCIs, + payloads.CiaoCNCI{ + ID: cnci.InstanceID, + TenantID: cnci.TenantID, + IPv4: cnci.IPAddress, + Subnets: subnets, + }, + ) + } + + b, err := json.Marshal(ciaoCNCIs) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func listCNCIDetails(w http.ResponseWriter, r *http.Request, context *controller) { + vars := mux.Vars(r) + cnciID := vars["cnci"] + var ciaoCNCI payloads.CiaoCNCI + + dumpRequest(r) + + if validateToken(context, r) == false { + http.Error(w, "Invalid token", http.StatusInternalServerError) + return + } + + cncis, err := context.ds.GetTenantCNCISummary(cnciID) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if len(cncis) > 0 { + var subnets []payloads.CiaoCNCISubnet + cnci := cncis[0] + + for _, subnet := range cnci.Subnets { + subnets = append(subnets, + payloads.CiaoCNCISubnet{ + Subnet: subnet, + }, + ) + } + + ciaoCNCI = payloads.CiaoCNCI{ + ID: cnci.InstanceID, + TenantID: cnci.TenantID, + IPv4: cnci.IPAddress, + Subnets: subnets, + } + } + + b, err := json.Marshal(ciaoCNCI) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func createComputeAPI(context *controller) { + r := mux.NewRouter() + + r.HandleFunc("/v2.1/{tenant}/servers", func(w http.ResponseWriter, r *http.Request) { + createServer(w, r, context) + }).Methods("POST") + + r.HandleFunc("/v2.1/{tenant}/servers/detail", func(w http.ResponseWriter, r *http.Request) { + listServerDetails(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/{tenant}/servers/{server}", func(w http.ResponseWriter, r *http.Request) { + showServerDetails(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/{tenant}/servers/{server}", func(w http.ResponseWriter, r *http.Request) { + deleteServer(w, r, context) + }).Methods("DELETE") + + r.HandleFunc("/v2.1/{tenant}/servers/{server}/action", func(w http.ResponseWriter, r *http.Request) { + serverAction(w, r, context) + }).Methods("POST") + + r.HandleFunc("/v2.1/{tenant}/flavors", func(w http.ResponseWriter, r *http.Request) { + listFlavors(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/{tenant}/flavors/{flavor}", func(w http.ResponseWriter, r *http.Request) { + showFlavorDetails(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/{tenant}/resources", func(w http.ResponseWriter, r *http.Request) { + listTenantQuotas(w, r, context) + //listTenantResources(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/{tenant}/quotas", func(w http.ResponseWriter, r *http.Request) { + listTenantQuotas(w, r, context) + }).Methods("GET") + + /* Avoid conflict with {tenant}/servers/detail */ + r.HandleFunc("/v2.1/nodes/{node}/servers/detail", func(w http.ResponseWriter, r *http.Request) { + listNodeServers(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/flavors/{flavor}/servers/detail", func(w http.ResponseWriter, r *http.Request) { + listServerDetails(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/tenants", func(w http.ResponseWriter, r *http.Request) { + listTenants(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/nodes", func(w http.ResponseWriter, r *http.Request) { + listNodes(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/nodes/summary", func(w http.ResponseWriter, r *http.Request) { + nodesSummary(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/cncis", func(w http.ResponseWriter, r *http.Request) { + listCNCIs(w, r, context) + }).Methods("GET") + + r.HandleFunc("/v2.1/cncis/{cnci}/detail", func(w http.ResponseWriter, r *http.Request) { + listCNCIDetails(w, r, context) + }).Methods("GET") + + service := fmt.Sprintf(":%d", *computeAPIPort) + log.Fatal(http.ListenAndServeTLS(service, *httpsCAcert, *httpsKey, r)) +} diff --git a/ciao-controller/controller_test.go b/ciao-controller/controller_test.go new file mode 100644 index 000000000..599701344 --- /dev/null +++ b/ciao-controller/controller_test.go @@ -0,0 +1,1492 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + datastore "github.com/01org/ciao/ciao-controller/internal/datastore" + "github.com/01org/ciao/ciao-controller/types" + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + "github.com/docker/distribution/uuid" + "gopkg.in/yaml.v2" + "math/rand" + "net" + "os" + "strconv" + "sync" + "testing" + "time" +) + +type ssntpTestServer struct { + ssntp ssntp.Server + clients []string + cmdChans map[ssntp.Command]chan cmdResult + cmdChansLock *sync.Mutex + + netClients map[string]bool + netClientsLock *sync.RWMutex +} + +type cmdResult struct { + instanceUUID string + err error + nodeUUID string + tenantUUID string + cnci bool +} + +func (server *ssntpTestServer) addCmdChan(cmd ssntp.Command, c chan cmdResult) { + server.cmdChansLock.Lock() + server.cmdChans[cmd] = c + server.cmdChansLock.Unlock() +} + +func (server *ssntpTestServer) ConnectNotify(uuid string, role uint32) { + switch role { + case ssntp.AGENT: + server.clients = append(server.clients, uuid) + + case ssntp.NETAGENT: + server.netClientsLock.Lock() + server.netClients[uuid] = true + server.netClientsLock.Unlock() + } + +} + +func (server *ssntpTestServer) DisconnectNotify(uuid string) { + for index := range server.clients { + if server.clients[index] == uuid { + server.clients = append(server.clients[:index], server.clients[index+1:]...) + return + } + } + + server.netClientsLock.Lock() + delete(server.netClients, uuid) + server.netClientsLock.Unlock() +} + +func (server *ssntpTestServer) StatusNotify(uuid string, status ssntp.Status, frame *ssntp.Frame) { +} + +func (server *ssntpTestServer) CommandNotify(uuid string, command ssntp.Command, frame *ssntp.Frame) { + var result cmdResult + var nn bool + + payload := frame.Payload + + server.cmdChansLock.Lock() + c, ok := server.cmdChans[command] + server.cmdChansLock.Unlock() + + switch command { + case ssntp.START: + var startCmd payloads.Start + + err := yaml.Unmarshal(payload, &startCmd) + + if err == nil { + resources := startCmd.Start.RequestedResources + + for i := range resources { + if resources[i].Type == payloads.NetworkNode { + nn = true + break + } + } + + if nn { + server.netClientsLock.RLock() + for key := range server.netClients { + server.ssntp.SendCommand(key, command, frame.Payload) + break + } + server.netClientsLock.RUnlock() + } else if len(server.clients) > 0 { + index := rand.Intn(len(server.clients)) + server.ssntp.SendCommand(server.clients[index], command, frame.Payload) + } + } + + if ok { + if err != nil { + result.err = err + } else { + result.instanceUUID = startCmd.Start.InstanceUUID + result.tenantUUID = startCmd.Start.TenantUUID + result.cnci = nn + } + + } + + case ssntp.DELETE: + if ok { + var delCmd payloads.Delete + + err := yaml.Unmarshal(payload, &delCmd) + if err != nil { + result.err = err + } else { + result.instanceUUID = delCmd.Delete.InstanceUUID + } + } + + case ssntp.STOP: + if ok { + var stopCmd payloads.Stop + + err := yaml.Unmarshal(payload, &stopCmd) + if err != nil { + result.err = err + } else { + result.instanceUUID = stopCmd.Stop.InstanceUUID + server.ssntp.SendCommand(stopCmd.Stop.WorkloadAgentUUID, command, frame.Payload) + } + } + + case ssntp.RESTART: + if ok { + var restartCmd payloads.Restart + + err := yaml.Unmarshal(payload, &restartCmd) + if err != nil { + result.err = err + } else { + result.instanceUUID = restartCmd.Restart.InstanceUUID + server.ssntp.SendCommand(restartCmd.Restart.WorkloadAgentUUID, command, frame.Payload) + } + } + + case ssntp.EVACUATE: + if ok { + var evacCmd payloads.Evacuate + + err := yaml.Unmarshal(payload, &evacCmd) + if err != nil { + result.err = err + } else { + result.nodeUUID = evacCmd.Evacuate.WorkloadAgentUUID + } + } + } + + if ok { + server.cmdChansLock.Lock() + delete(server.cmdChans, command) + server.cmdChansLock.Unlock() + + c <- result + + close(c) + } +} + +func (server *ssntpTestServer) EventNotify(uuid string, event ssntp.Event, frame *ssntp.Frame) { +} + +func (server *ssntpTestServer) ErrorNotify(uuid string, error ssntp.Error, frame *ssntp.Frame) { +} + +type ssntpTestClient struct { + ssntp ssntp.Client + name string + instances []payloads.InstanceStat + ticker *time.Ticker + uuid string + role ssntp.Role + startFail bool + startFailReason payloads.StartFailureReason + stopFail bool + stopFailReason payloads.StopFailureReason + restartFail bool + restartFailReason payloads.RestartFailureReason +} + +func (client *ssntpTestClient) ConnectNotify() { +} + +func (client *ssntpTestClient) DisconnectNotify() { +} + +func (client *ssntpTestClient) StatusNotify(status ssntp.Status, frame *ssntp.Frame) { +} + +func (client *ssntpTestClient) CommandNotify(command ssntp.Command, frame *ssntp.Frame) { + var start payloads.Start + payload := frame.Payload + + switch command { + case ssntp.START: + err := yaml.Unmarshal(payload, &start) + if err != nil { + return + } + + if client.role == ssntp.NETAGENT { + networking := start.Start.Networking + + client.sendConcentratorAddedEvent(start.Start.InstanceUUID, start.Start.TenantUUID, networking.VnicMAC) + return + } + + if !client.startFail { + istat := payloads.InstanceStat{ + InstanceUUID: start.Start.InstanceUUID, + State: payloads.Running, + MemoryUsageMB: 0, + DiskUsageMB: 0, + CPUUsage: 0, + } + + client.instances = append(client.instances, istat) + } else { + client.sendStartFailure(start.Start.InstanceUUID, client.startFailReason) + } + + case ssntp.STOP: + var stopCmd payloads.Stop + + err := yaml.Unmarshal(payload, &stopCmd) + if err != nil { + return + } + + if !client.stopFail { + for i := range client.instances { + istat := client.instances[i] + if istat.InstanceUUID == stopCmd.Stop.InstanceUUID { + client.instances[i].State = payloads.Exited + } + } + } else { + client.sendStopFailure(stopCmd.Stop.InstanceUUID, client.stopFailReason) + } + + case ssntp.RESTART: + var restartCmd payloads.Restart + + err := yaml.Unmarshal(payload, &restartCmd) + if err != nil { + return + } + + if !client.restartFail { + for i := range client.instances { + istat := client.instances[i] + if istat.InstanceUUID == restartCmd.Restart.InstanceUUID { + client.instances[i].State = payloads.Running + } + } + } else { + client.sendRestartFailure(restartCmd.Restart.InstanceUUID, client.restartFailReason) + } + } +} + +func (client *ssntpTestClient) EventNotify(event ssntp.Event, frame *ssntp.Frame) { +} + +func (client *ssntpTestClient) ErrorNotify(error ssntp.Error, frame *ssntp.Frame) { +} + +func newTestClient(num int, role ssntp.Role) *ssntpTestClient { + client := &ssntpTestClient{ + name: "Test " + role.String() + strconv.Itoa(num), + uuid: uuid.Generate().String(), + role: role, + } + + config := &ssntp.Config{ + Role: uint32(role), + CAcert: *caCert, + Cert: *cert, + Log: ssntp.Log, + UUID: client.uuid, + } + + if client.ssntp.Dial(config, client) != nil { + return nil + } + + return client +} + +func (client *ssntpTestClient) sendStats() { + stat := payloads.Stat{ + NodeUUID: client.uuid, + MemTotalMB: 256, + MemAvailableMB: 256, + DiskTotalMB: 1024, + DiskAvailableMB: 1024, + Load: 20, + CpusOnline: 4, + NodeHostName: client.name, + Instances: client.instances, + } + + y, err := yaml.Marshal(stat) + if err != nil { + return + } + + _, err = client.ssntp.SendCommand(ssntp.STATS, y) + if err != nil { + fmt.Println(err) + } +} + +func (client *ssntpTestClient) sendDeleteEvent(uuid string) { + evt := payloads.InstanceDeletedEvent{ + InstanceUUID: uuid, + } + + event := payloads.EventInstanceDeleted{ + InstanceDeleted: evt, + } + + y, err := yaml.Marshal(event) + if err != nil { + return + } + + _, err = client.ssntp.SendEvent(ssntp.InstanceDeleted, y) + if err != nil { + fmt.Println(err) + } + +} + +func (client *ssntpTestClient) sendConcentratorAddedEvent(instanceUUID string, tenantUUID string, vnicMAC string) { + evt := payloads.ConcentratorInstanceAddedEvent{ + InstanceUUID: instanceUUID, + TenantUUID: tenantUUID, + ConcentratorIP: "192.168.0.1", + ConcentratorMAC: vnicMAC, + } + + event := payloads.EventConcentratorInstanceAdded{ + CNCIAdded: evt, + } + + y, err := yaml.Marshal(event) + if err != nil { + return + } + + _, err = client.ssntp.SendEvent(ssntp.ConcentratorInstanceAdded, y) + if err != nil { + fmt.Println(err) + } +} + +func (client *ssntpTestClient) sendStartFailure(instanceUUID string, reason payloads.StartFailureReason) { + e := payloads.ErrorStartFailure{ + InstanceUUID: instanceUUID, + Reason: reason, + } + + y, err := yaml.Marshal(e) + if err != nil { + return + } + + _, err = client.ssntp.SendError(ssntp.StartFailure, y) + if err != nil { + fmt.Println(err) + } +} + +func (client *ssntpTestClient) sendStopFailure(instanceUUID string, reason payloads.StopFailureReason) { + e := payloads.ErrorStopFailure{ + InstanceUUID: instanceUUID, + Reason: reason, + } + + y, err := yaml.Marshal(e) + if err != nil { + return + } + + _, err = client.ssntp.SendError(ssntp.StopFailure, y) + if err != nil { + fmt.Println(err) + } +} + +func (client *ssntpTestClient) sendRestartFailure(instanceUUID string, reason payloads.RestartFailureReason) { + e := payloads.ErrorRestartFailure{ + InstanceUUID: instanceUUID, + Reason: reason, + } + + y, err := yaml.Marshal(e) + if err != nil { + return + } + + _, err = client.ssntp.SendError(ssntp.RestartFailure, y) + if err != nil { + fmt.Println(err) + } +} + +func startTestServer(server *ssntpTestServer) { + server.cmdChans = make(map[ssntp.Command]chan cmdResult) + server.cmdChansLock = &sync.Mutex{} + + server.netClients = make(map[string]bool) + server.netClientsLock = &sync.RWMutex{} + + serverConfig := ssntp.Config{ + Role: ssntp.SERVER, + CAcert: *caCert, + Cert: *cert, + Log: ssntp.Log, + ForwardRules: []ssntp.FrameForwardRule{ + { + Operand: ssntp.STATS, + Dest: ssntp.Controller, + }, + { + Operand: ssntp.InstanceDeleted, + Dest: ssntp.Controller, + }, + { + Operand: ssntp.ConcentratorInstanceAdded, + Dest: ssntp.Controller, + }, + { + Operand: ssntp.StartFailure, + Dest: ssntp.Controller, + }, + { + Operand: ssntp.StopFailure, + Dest: ssntp.Controller, + }, + { + Operand: ssntp.RestartFailure, + Dest: ssntp.Controller, + }, + }, + } + + go server.ssntp.Serve(&serverConfig, server) + return +} + +func addTestTenant() (tenant *types.Tenant, err error) { + /* add a new tenant */ + tuuid := uuid.Generate() + tenant, err = context.ds.AddTenant(tuuid.String()) + if err != nil { + return + } + + // Add fake CNCI + err = context.ds.AddTenantCNCI(tuuid.String(), uuid.Generate().String(), tenant.CNCIMAC) + if err != nil { + return + } + err = context.ds.AddCNCIIP(tenant.CNCIMAC, "192.168.0.1") + if err != nil { + return + } + return +} + +func BenchmarkStartSingleWorkload(b *testing.B) { + var err error + + /* add a new tenant */ + tuuid := uuid.Generate() + tenant, err := context.ds.AddTenant(tuuid.String()) + if err != nil { + b.Error(err) + } + + // Add fake CNCI + err = context.ds.AddTenantCNCI(tuuid.String(), uuid.Generate().String(), tenant.CNCIMAC) + if err != nil { + b.Error(err) + } + err = context.ds.AddCNCIIP(tenant.CNCIMAC, "192.168.0.1") + if err != nil { + b.Error(err) + } + + // get workload ID + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + b.Fatal(err) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, err = context.startWorkload(wls[0].Id, tuuid.String(), 1, false, "") + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkStart1000Workload(b *testing.B) { + var err error + + /* add a new tenant */ + tuuid := uuid.Generate() + tenant, err := context.ds.AddTenant(tuuid.String()) + if err != nil { + b.Error(err) + } + + // Add fake CNCI + err = context.ds.AddTenantCNCI(tuuid.String(), uuid.Generate().String(), tenant.CNCIMAC) + if err != nil { + b.Error(err) + } + err = context.ds.AddCNCIIP(tenant.CNCIMAC, "192.168.0.1") + if err != nil { + b.Error(err) + } + + // get workload ID + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + b.Fatal(err) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, err = context.startWorkload(wls[0].Id, tuuid.String(), 1000, false, "") + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkNewConfig(b *testing.B) { + var err error + + tenant, err := addTestTenant() + if err != nil { + b.Error(err) + } + + // get workload ID + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + b.Fatal(err) + } + + id := uuid.Generate() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, err := newConfig(context, wls[0], id.String(), tenant.Id) + if err != nil { + b.Error(err) + } + } +} + +func TestTenantWithinBounds(t *testing.T) { + var err error + + tenant, err := addTestTenant() + if err != nil { + t.Fatal(err) + } + + /* put tenant limit of 1 instance */ + err = context.ds.AddLimit(tenant.Id, 1, 1) + if err != nil { + t.Fatal(err) + } + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + _, err = context.startWorkload(wls[0].Id, tenant.Id, 1, false, "") + if err != nil { + t.Fatal(err) + } +} + +func TestTenantOutOfBounds(t *testing.T) { + var err error + + /* add a new tenant */ + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + /* put tenant limit of 1 instance */ + _ = context.ds.AddLimit(tenant.Id, 1, 1) + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + /* try to send 2 workload start commands */ + _, err = context.startWorkload(wls[0].Id, tenant.Id, 2, false, "") + if err == nil { + t.Errorf("Not tracking limits correctly") + } +} + +// TestNewTenantHardwareAddr +// Confirm that the mac addresses generated from a given +// IP address is as expected. +func TestNewTenantHardwareAddr(t *testing.T) { + ip := net.ParseIP("172.16.0.2") + expectedMAC := "02:00:ac:10:00:02" + hw := newTenantHardwareAddr(ip) + if hw.String() != expectedMAC { + t.Error("Expected: ", expectedMAC, " Received: ", hw.String()) + } +} + +func TestStartWorkload(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Fatal(err) + } + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + c := make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + instances, err := context.startWorkload(wls[0].Id, tenant.Id, 1, false, "") + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Error(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command") + } +} + +func TestStartWorkloadLaunchCNCI(t *testing.T) { + netClient := newTestClient(0, ssntp.NETAGENT) + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + c := make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + id := uuid.Generate().String() + + var instances []*types.Instance + + go func() { + instances, err = context.startWorkload(wls[0].Id, id, 1, false, "") + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Fatal(err) + } + }() + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.tenantUUID != id { + t.Fatal("Did not get correct tenant ID") + } + + if !result.cnci { + t.Fatal("this is not a CNCI launch request") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command for CNCI") + } + + c = make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command") + } + + time.Sleep(1 * time.Second) + + tenant, err := context.ds.GetTenant(id) + if err != nil { + t.Fatal(err) + } + + if tenant.CNCIIP == "" { + t.Fatal("CNCI Info not updated") + } + + netClient.ssntp.Close() +} + +// TBD: for the launch CNCI tests, I really need to create a fake +// network node and test that way. + +func TestDeleteInstance(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Fatal(err) + } + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + client := newTestClient(0, ssntp.AGENT) + + c := make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + instances, err := context.startWorkload(wls[0].Id, tenant.Id, 1, false, "") + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Error(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command") + } + + time.Sleep(1 * time.Second) + + client.sendStats() + + c = make(chan cmdResult) + server.addCmdChan(ssntp.DELETE, c) + + time.Sleep(1 * time.Second) + + err = context.deleteInstance(instances[0].Id) + if err != nil { + t.Fatal(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for DELETE command") + } + + client.ssntp.Close() +} + +func TestStopInstance(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Fatal(err) + } + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + client := newTestClient(0, ssntp.AGENT) + + c := make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + instances, err := context.startWorkload(wls[0].Id, tenant.Id, 1, false, "") + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Error(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command") + } + + time.Sleep(1 * time.Second) + + client.sendStats() + + c = make(chan cmdResult) + server.addCmdChan(ssntp.STOP, c) + + time.Sleep(1 * time.Second) + + err = context.stopInstance(instances[0].Id) + if err != nil { + t.Fatal(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for STOP command") + } + + client.ssntp.Close() +} + +func TestRestartInstance(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Fatal(err) + } + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + client := newTestClient(0, ssntp.AGENT) + + c := make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + instances, err := context.startWorkload(wls[0].Id, tenant.Id, 1, false, "") + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Error(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command") + } + + time.Sleep(1 * time.Second) + + client.sendStats() + + c = make(chan cmdResult) + server.addCmdChan(ssntp.STOP, c) + + time.Sleep(1 * time.Second) + + err = context.stopInstance(instances[0].Id) + if err != nil { + t.Fatal(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for STOP command") + } + + // now attempt to restart + time.Sleep(1 * time.Second) + + client.sendStats() + + c = make(chan cmdResult) + server.addCmdChan(ssntp.RESTART, c) + + time.Sleep(1 * time.Second) + + err = context.restartInstance(instances[0].Id) + if err != nil { + t.Fatal(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for RESTART command") + } + + client.ssntp.Close() +} + +func TestEvacuateNode(t *testing.T) { + client := newTestClient(0, ssntp.AGENT) + + c := make(chan cmdResult) + server.addCmdChan(ssntp.EVACUATE, c) + + // ok to not send workload first? + + err := context.evacuateNode(client.uuid) + if err != nil { + t.Error(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.nodeUUID != client.uuid { + t.Fatal("Did not get node ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for EVACUATE command") + } + + client.ssntp.Close() +} + +func TestInstanceDeletedEvent(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Fatal(err) + } + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + client := newTestClient(0, ssntp.AGENT) + + instances, err := context.startWorkload(wls[0].Id, tenant.Id, 1, false, "") + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Error(err) + } + + time.Sleep(1 * time.Second) + + client.sendStats() + + time.Sleep(1 * time.Second) + + // right now I don't have this forwarded to the client + // so this step is probably not necessary + err = context.deleteInstance(instances[0].Id) + if err != nil { + t.Fatal(err) + } + + time.Sleep(1 * time.Second) + + client.sendDeleteEvent(instances[0].Id) + + time.Sleep(1 * time.Second) + + // try to get instance info + _, _, err = context.ds.GetInstanceInfo(instances[0].Id) + if err == nil { + t.Error("Instance not deleted") + } + + client.ssntp.Close() +} + +func TestLaunchCNCI(t *testing.T) { + netClient := newTestClient(0, ssntp.NETAGENT) + + c := make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + id := uuid.Generate().String() + + // this blocks till it get success or failure + go context.addTenant(id) + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.tenantUUID != id { + t.Fatal("Did not get correct tenant ID") + } + + if !result.cnci { + t.Fatal("this is not a CNCI launch request") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command") + } + + time.Sleep(2 * time.Second) + + tenant, err := context.ds.GetTenant(id) + if err != nil || tenant == nil { + t.Fatal(err) + } + + if tenant.CNCIIP == "" { + t.Fatal("CNCI Info not updated") + } + + netClient.ssntp.Close() +} + +func TestStartFailure(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Fatal(err) + } + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + client := newTestClient(0, ssntp.AGENT) + client.startFail = true + client.startFailReason = payloads.FullCloud + + c := make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + instances, err := context.startWorkload(wls[0].Id, tenant.Id, 1, false, "") + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Error(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command") + } + + // since we had a start failure, we should confirm that the + // instance is no longer pending in the database + client.ssntp.Close() +} + +func TestStopFailure(t *testing.T) { + context.ds.ClearLog() + + tenant, err := addTestTenant() + if err != nil { + t.Fatal(err) + } + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + client := newTestClient(0, ssntp.AGENT) + client.stopFail = true + client.stopFailReason = payloads.StopNoInstance + + c := make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + instances, err := context.startWorkload(wls[0].Id, tenant.Id, 1, false, "") + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Error(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command") + } + + time.Sleep(1 * time.Second) + + client.sendStats() + + time.Sleep(1 * time.Second) + + c = make(chan cmdResult) + server.addCmdChan(ssntp.STOP, c) + + time.Sleep(1 * time.Second) + + err = context.stopInstance(instances[0].Id) + if err != nil { + t.Fatal(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for STOP command") + } + + time.Sleep(1 * time.Second) + + client.ssntp.Close() + + // the response to a stop failure is to log the failure + entries, err := context.ds.GetEventLog() + if err != nil { + t.Fatal(err) + } + + expectedMsg := fmt.Sprintf("Stop Failure %s: %s", instances[0].Id, client.stopFailReason.String()) + + for i := range entries { + if entries[i].Message == expectedMsg { + return + } + } + t.Error("Did not find failure message in Log") +} + +func TestRestartFailure(t *testing.T) { + context.ds.ClearLog() + + tenant, err := addTestTenant() + if err != nil { + t.Fatal(err) + } + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + client := newTestClient(0, ssntp.AGENT) + client.restartFail = true + client.restartFailReason = payloads.RestartLaunchFailure + + c := make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + instances, err := context.startWorkload(wls[0].Id, tenant.Id, 1, false, "") + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Error(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command") + } + + time.Sleep(1 * time.Second) + + client.sendStats() + + time.Sleep(1 * time.Second) + + c = make(chan cmdResult) + server.addCmdChan(ssntp.STOP, c) + + err = context.stopInstance(instances[0].Id) + if err != nil { + t.Fatal(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for STOP command") + } + + time.Sleep(1 * time.Second) + + client.sendStats() + + time.Sleep(1 * time.Second) + + c = make(chan cmdResult) + server.addCmdChan(ssntp.RESTART, c) + + err = context.restartInstance(instances[0].Id) + if err != nil { + t.Fatal(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for RESTART command") + } + + time.Sleep(1 * time.Second) + + client.ssntp.Close() + + // the response to a restart failure is to log the failure + entries, err := context.ds.GetEventLog() + if err != nil { + t.Fatal(err) + } + + expectedMsg := fmt.Sprintf("Restart Failure %s: %s", instances[0].Id, client.restartFailReason.String()) + + for i := range entries { + if entries[i].Message == expectedMsg { + return + } + } + t.Error("Did not find failure message in Log") +} + +func TestNoNetwork(t *testing.T) { + nn := true + + noNetwork = &nn + + id := uuid.Generate().String() + + wls, err := context.ds.GetWorkloads() + if err != nil || len(wls) == 0 { + t.Fatal(err) + } + + c := make(chan cmdResult) + server.addCmdChan(ssntp.START, c) + + instances, err := context.startWorkload(wls[0].Id, id, 1, false, "") + if err != nil { + t.Fatal(err) + } + + if len(instances) != 1 { + t.Error(err) + } + + select { + case result := <-c: + if result.err != nil { + t.Fatal("Error parsing command yaml") + } + + if result.instanceUUID != instances[0].Id { + t.Fatal("Did not get correct Instance ID") + } + + case <-time.After(5 * time.Second): + t.Fatal("Timeout waiting for START command") + } +} + +var testClients []*ssntpTestClient +var context *controller +var server ssntpTestServer + +func TestMain(m *testing.M) { + flag.Parse() + + // create fake ssntp server + startTestServer(&server) + defer server.ssntp.Stop() + + context = new(controller) + context.ds = new(datastore.Datastore) + err := context.ds.Connect("./ciao-controller-test.db", "./ciao-controller-test-tdb.db") + if err != nil { + os.Exit(1) + } + + err = context.ds.Init(*tablesInitPath, *workloadsPath) + if err != nil { + os.Exit(1) + } + + config := &ssntp.Config{ + URI: "localhost", + CAcert: *caCert, + Cert: *cert, + Role: ssntp.Controller, + } + + context.client, err = newSSNTPClient(context, config) + if err != nil { + os.Exit(1) + } + + code := m.Run() + + context.client.Disconnect() + context.ds.Disconnect() + + os.Remove("./ciao-controller-test.db") + os.Remove("./ciao-controller-test.db-shm") + os.Remove("./ciao-controller-test.db-wal") + os.Remove("./ciao-controller-test-tdb.db") + os.Remove("./ciao-controller-test-tdb.db-shm") + os.Remove("./ciao-controller-test-tdb.db-wal") + + os.Exit(code) +} diff --git a/ciao-controller/docker-ubuntu.yaml b/ciao-controller/docker-ubuntu.yaml new file mode 100644 index 000000000..a0d142610 --- /dev/null +++ b/ciao-controller/docker-ubuntu.yaml @@ -0,0 +1,5 @@ +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... diff --git a/ciao-controller/framestats.gtpl b/ciao-controller/framestats.gtpl new file mode 100644 index 000000000..720cb4d35 --- /dev/null +++ b/ciao-controller/framestats.gtpl @@ -0,0 +1,147 @@ + + + + + + + + +
+

Summary

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Number of Instances Launched
Total Elapsed Time to Launch All Instances
Average Elapsed Time Per Instance
Average Elapsed Time for Controller
Average Elapsed Time for Launcher
Average Elapsed Time for Scheduler
Controller Variance
Launcher Variance
Scheduler Variance
+
+
+
+

Component Elapsed Time per Frame

+
+
+ + diff --git a/ciao-controller/identity.go b/ciao-controller/identity.go new file mode 100644 index 000000000..b082d12c8 --- /dev/null +++ b/ciao-controller/identity.go @@ -0,0 +1,280 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "errors" + "github.com/golang/glog" + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" + v3tokens "github.com/rackspace/gophercloud/openstack/identity/v3/tokens" +) + +type identity struct { + scV3 *gophercloud.ServiceClient +} + +type identityConfig struct { + endpoint string + serviceUserName string + servicePassword string +} + +// Project holds project information extracted from the keystone response. +type Project struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` +} + +// RoleEntry contains the name of a role extracted from the keystone response. +type RoleEntry struct { + Name string `mapstructure:"name"` +} + +// Roles contains a list of role names extracted from the keystone response. +type Roles struct { + Entries []RoleEntry +} + +// Endpoint contains endpoint information extracted from the keystone response. +type Endpoint struct { + ID string `mapstructure:"id"` + Region string `mapstructure:"region"` + Interface string `mapstructure:"interface"` + URL string `mapstructure:"url"` +} + +// ServiceEntry contains information about a service extracted from the keystone response. +type ServiceEntry struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + Type string `mapstructure:"type"` + Endpoints []Endpoint `mapstructure:"endpoints"` +} + +// Services is a list of ServiceEntry structs +// These structs contain information about the services keystone knows about. +type Services struct { + Entries []ServiceEntry +} + +type getResult struct { + v3tokens.GetResult +} + +// extractProject +// Ideally we would actually contribute this functionality +// back to the gophercloud project, but for now we extend +// their object to allow us to get project information out +// of the response from the GET token validation request. +func (r getResult) extractProject() (*Project, error) { + if r.Err != nil { + glog.V(2).Info(r.Err) + return nil, r.Err + } + + // can there be more than one project? You need to test. + var response struct { + Token struct { + ValidProject Project `mapstructure:"project"` + } `mapstructure:"token"` + } + + err := mapstructure.Decode(r.Body, &response) + if err != nil { + glog.V(2).Info(err) + return nil, err + } + + return &Project{ + ID: response.Token.ValidProject.ID, + Name: response.Token.ValidProject.Name, + }, nil +} + +func (r getResult) extractServices() (*Services, error) { + if r.Err != nil { + glog.V(2).Info(r.Err) + return nil, r.Err + } + + var response struct { + Token struct { + Entries []ServiceEntry `mapstructure:"catalog"` + } `mapstructure:"token"` + } + + err := mapstructure.Decode(r.Body, &response) + if err != nil { + glog.Errorf(err.Error()) + return nil, err + } + + return &Services{Entries: response.Token.Entries}, nil +} + +// extractRole +// Ideally we would actually contribute this functionality +// back to the gophercloud project, but for now we extend +// their object to allow us to get project information out +// of the response from the GET token validation request. +func (r getResult) extractRoles() (*Roles, error) { + if r.Err != nil { + glog.V(2).Info(r.Err) + return nil, r.Err + } + + var response struct { + Token struct { + ValidRoles []RoleEntry `mapstructure:"roles"` + } `mapstructure:"token"` + } + + err := mapstructure.Decode(r.Body, &response) + if err != nil { + glog.V(2).Info(err) + return nil, err + } + + return &Roles{Entries: response.Token.ValidRoles}, nil +} + +// validate +// Confirm that the token has access to the Project they are requesting +// an operation on, and that they have a role which permits them to +// access this api. +func (i *identity) validate(token string, tenantID string, role string) bool { + r := v3tokens.Get(i.scV3, token) + + result := getResult{r} + + p, err := result.extractProject() + if err != nil { + return false + } + + if p.ID != tenantID { + glog.V(2).Info("expected ", tenantID, " got ", p.ID) + return false + } + + roles, err := result.extractRoles() + if err != nil { + return false + } + + for i := range roles.Entries { + if roles.Entries[i].Name == role { + return true + } + } + + return false +} + +// validateServices +// Validates that a given user belonging to a tenant +// can access a service specified by its type and name. +func (i *identity) validateService(token string, tenantID string, serviceType string, serviceName string) bool { + r := v3tokens.Get(i.scV3, token) + result := getResult{r} + + p, err := result.extractProject() + if err != nil { + return false + } + + if p.ID != tenantID { + glog.Errorf("expected %s got %s\n", tenantID, p.ID) + return false + } + + services, err := result.extractServices() + if err != nil { + return false + } + + for _, e := range services.Entries { + if e.Type == serviceType { + if serviceName == "" { + return true + } + + if e.Name == serviceName { + return true + } + } + } + + return false +} + +func (i *identity) validateProjectRole(token string, project string, role string) bool { + r := v3tokens.Get(i.scV3, token) + result := getResult{r} + p, err := result.extractProject() + if err != nil { + return false + } + + if project != "" && p.Name != project { + return false + } + + roles, err := result.extractRoles() + if err != nil { + return false + } + + for i := range roles.Entries { + if roles.Entries[i].Name == role { + return true + } + } + return false +} + +func newIdentityClient(config identityConfig) (id *identity, err error) { + opt := gophercloud.AuthOptions{ + IdentityEndpoint: config.endpoint + "/v3/", + Username: config.serviceUserName, + Password: config.servicePassword, + TenantName: "service", + DomainID: "default", + AllowReauth: false, + } + provider, err := openstack.AuthenticatedClient(opt) + if err != nil { + return + } + + provider.ReauthFunc = func() error { + provider.TokenID = "" + return openstack.Authenticate(provider, opt) + } + + v3client := openstack.NewIdentityV3(provider) + if v3client == nil { + return nil, errors.New("Unable to get keystone V3 client") + } + + id = &identity{ + scV3: v3client, + } + return +} diff --git a/ciao-controller/instance.go b/ciao-controller/instance.go new file mode 100644 index 000000000..ad1a022e0 --- /dev/null +++ b/ciao-controller/instance.go @@ -0,0 +1,245 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "fmt" + "github.com/01org/ciao/ciao-controller/types" + "github.com/01org/ciao/payloads" + "github.com/docker/distribution/uuid" + "github.com/golang/glog" + "gopkg.in/yaml.v2" + "net" + "time" +) + +type config struct { + sc payloads.Start + config string + cnci bool + mac string + ip string +} + +type instance struct { + types.Instance + newConfig config + context *controller + startTime time.Time +} + +func isCNCIWorkload(workload *types.Workload) bool { + for r := range workload.Defaults { + if workload.Defaults[r].Type == payloads.NetworkNode { + return true + } + } + return false +} + +func newInstance(context *controller, tenantID string, workload *types.Workload) (i *instance, err error) { + id := uuid.Generate() + + config, err := newConfig(context, workload, id.String(), tenantID) + if err != nil { + return + } + + newInstance := types.Instance{ + TenantId: tenantID, + WorkloadId: workload.Id, + State: payloads.Pending, + Id: id.String(), + CNCI: config.cnci, + IPAddress: config.ip, + MACAddress: config.mac, + } + + i = &instance{ + context: context, + newConfig: config, + Instance: newInstance, + } + return +} + +func (i *instance) Add() (err error) { + if i.CNCI == false { + ds := i.context.ds + go ds.AddInstance(&i.Instance) + usage := i.newConfig.GetResources() + err = ds.AddUsage(i.TenantId, i.Id, usage) + } else { + i.context.ds.AddTenantCNCI(i.TenantId, i.Id, i.MACAddress) + } + return +} + +func (i *instance) Clean() (err error) { + if i.CNCI == false { + i.context.ds.ReleaseTenantIP(i.TenantId, i.IPAddress) + } + return +} + +func (i *instance) Allowed() (b bool, err error) { + if i.CNCI == true { + // should I bother to check the tenant id exists? + return true, nil + } + + ds := i.context.ds + tenant, err := ds.GetTenant(i.TenantId) + if err != nil { + return false, err + } + + usage := i.newConfig.GetResources() + + for _, res := range tenant.Resources { + // check instance count separately + if res.Rtype == 1 { + if res.OverLimit(1) { + return false, nil + } + continue + } + if res.OverLimit(usage[res.Rname]) { + return false, nil + } + } + return true, nil +} + +func (c *config) GetResources() (resources map[string]int) { + rr := c.sc.Start.RequestedResources + + // convert RequestedResources into a map[string]int + resources = make(map[string]int) + for i := range rr { + resources[string(rr[i].Type)] = rr[i].Value + } + return +} + +func newConfig(context *controller, wl *types.Workload, instanceID string, tenantID string) (config config, err error) { + type UserData struct { + UUID string `json:"uuid"` + Hostname string `json:"hostname"` + } + var userData UserData + + baseConfig := wl.Config + defaults := wl.Defaults + imageID := wl.ImageID + fwType := wl.FWType + + tenant, err := context.ds.GetTenant(tenantID) + if err != nil { + fmt.Println("unable to get tenant") + } + + config.cnci = isCNCIWorkload(wl) + + var networking payloads.NetworkResources + + // do we ever need to save the vnic uuid? + networking.VnicUUID = uuid.Generate().String() + + if config.cnci == false { + ipAddress, err := context.ds.AllocateTenantIP(tenantID) + if err != nil { + fmt.Println("Unable to allocate IP address: ", err) + return config, err + } + + networking.VnicMAC = newTenantHardwareAddr(ipAddress).String() + + // send in CIDR notation? + networking.PrivateIP = ipAddress.String() + config.ip = ipAddress.String() + mask := net.IPv4Mask(255, 255, 255, 0) + ipnet := net.IPNet{ + IP: ipAddress.Mask(mask), + Mask: mask, + } + networking.Subnet = ipnet.String() + networking.ConcentratorUUID = tenant.CNCIID + + // in theory we should refuse to go on if ip is null + // for now let's keep going + networking.ConcentratorIP = tenant.CNCIIP + + // set the hostname and uuid for userdata + userData.UUID = instanceID + userData.Hostname = instanceID + } else { + networking.VnicMAC = tenant.CNCIMAC + + // set the hostname and uuid for userdata + userData.UUID = instanceID + userData.Hostname = "cnci-" + tenantID + } + + // hardcode persistence until changes can be made to workload + // template datastore. Estimated resources can be blank + // for now because we don't support it yet. + startCmd := payloads.StartCmd{ + TenantUUID: tenantID, + InstanceUUID: instanceID, + ImageUUID: imageID, + FWType: payloads.Firmware(fwType), + VMType: wl.VMType, + InstancePersistence: payloads.Host, + RequestedResources: defaults, + Networking: networking, + } + + if wl.VMType == payloads.Docker { + startCmd.DockerImage = wl.ImageName + } + + cmd := payloads.Start{ + Start: startCmd, + } + config.sc = cmd + + y, err := yaml.Marshal(&config.sc) + if err != nil { + glog.Warning("error marshalling config: ", err) + } + + b, err := json.MarshalIndent(userData, "", "\t") + if err != nil { + glog.Warning("error marshalling user data: ", err) + } + + config.config = "---\n" + string(y) + "...\n" + baseConfig + "---\n" + string(b) + "\n...\n" + config.mac = networking.VnicMAC + return +} + +func newTenantHardwareAddr(ip net.IP) (hw net.HardwareAddr) { + buf := make([]byte, 6) + ipBytes := ip.To4() + buf[0] |= 2 + buf[1] = 0 + copy(buf[2:6], ipBytes) + hw = net.HardwareAddr(buf) + return +} diff --git a/ciao-controller/interface.go b/ciao-controller/interface.go new file mode 100644 index 000000000..6bf54bf5c --- /dev/null +++ b/ciao-controller/interface.go @@ -0,0 +1,395 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "html/template" + "log" + "net/http" + "strconv" + "strings" + "time" +) + +func tenantDebug(w http.ResponseWriter, r *http.Request, context *controller) { + if r.Method == "GET" { + t, err := template.New("t").ParseFiles("tenantDebug.gtpl") + if err != nil { + panic(err) + } + data := *identityURL + + err = t.ExecuteTemplate(w, "tenantDebug.gtpl", data) + if err != nil { + panic(err) + } + } +} + +func login(w http.ResponseWriter, r *http.Request, context *controller) { + if r.Method == "GET" { + t, err := template.New("t").ParseFiles("login.gtpl") + if err != nil { + panic(err) + } + data := *identityURL + + err = t.ExecuteTemplate(w, "login.gtpl", data) + if err != nil { + panic(err) + } + } +} + +func framestats(w http.ResponseWriter, r *http.Request, context *controller) { + if r.Method == "GET" { + t, err := template.New("t").ParseFiles("framestats.gtpl") + if err != nil { + panic(err) + } + err = t.ExecuteTemplate(w, "framestats.gtpl", nil) + if err != nil { + panic(err) + } + } +} + +func stats(w http.ResponseWriter, r *http.Request, context *controller) { + if r.Method == "GET" { + t, err := template.New("t").ParseFiles("stats.gtpl") + if err != nil { + panic(err) + } + err = t.ExecuteTemplate(w, "stats.gtpl", nil) + if err != nil { + panic(err) + } + } else if r.Method == "POST" { + action := r.PostFormValue("admin_action") + switch action { + case "deleteAll", "cleanAll": + instances, err := context.ds.GetAllInstances() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + for index := range instances { + var err error + id := instances[index].Id + if action == "deleteAll" { + if instances[index].State != "pending" { + err = context.deleteInstance(id) + } + } else if action == "cleanAll" { + if instances[index].State == "pending" { + err = context.ds.DeleteInstance(id) + } + } + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + + case "clearEventLog": + context.ds.ClearLog() + case "evacuate": + val := r.PostFormValue("node_ids") + nodeIds := strings.Fields(val) + for index := range nodeIds { + context.evacuateNode(nodeIds[index]) + } + case "delete", "clean", "restart", "stop": + val := r.PostFormValue("instances_ids") + instanceIDs := strings.Fields(val) + for index := range instanceIDs { + var err error + instanceID := instanceIDs[index] + if action == "delete" { + err = context.deleteInstance(instanceID) + } else if action == "clean" { + err = context.ds.DeleteInstance(instanceID) + } else if action == "stop" { + err = context.stopInstance(instanceID) + } else if action == "restart" { + err = context.restartInstance(instanceID) + } + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + } + http.Redirect(w, r, "/stats", http.StatusFound) + } +} + +func getBatchFrameSummaryStats(w http.ResponseWriter, r *http.Request, context *controller) { + stats, err := context.ds.GetBatchFrameSummary() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + b, err := json.Marshal(stats) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func getBatchFrameStats(w http.ResponseWriter, r *http.Request, context *controller) { + if r.Method == "POST" { + label := r.PostFormValue("batch_id") + + stats, err := context.ds.GetFrameStatistics(label) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + b, err := json.Marshal(stats) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) + } else if r.Method == "GET" { + } +} + +func getFrameStats(w http.ResponseWriter, r *http.Request, context *controller) { + if r.Method == "POST" { + label := r.PostFormValue("batch_id") + + stats, err := context.ds.GetBatchFrameStatistics(label) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + b, err := json.Marshal(stats) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) + } else if r.Method == "GET" { + } +} + +func getEventLog(w http.ResponseWriter, r *http.Request, context *controller) { + logEntries, err := context.ds.GetEventLog() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + b, err := json.Marshal(logEntries) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func getNodeSummary(w http.ResponseWriter, r *http.Request, context *controller) { + summary, err := context.ds.GetNodeSummary() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + b, err := json.Marshal(summary) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func getNodeStats(w http.ResponseWriter, r *http.Request, context *controller) { + end := time.Now().UTC() + start := end.Add(-20 * time.Minute) + + statsRows, err := context.ds.GetNodeStats(start, end) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + b, err := json.Marshal(statsRows) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) + // hack - clean up the stats so it doesn't get too big + _ = context.ds.ClearNodeStats(start) +} + +func getInstances(w http.ResponseWriter, r *http.Request, context *controller) { + instances, err := context.ds.GetAllInstances() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + b, err := json.Marshal(instances) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func getWorkloads(w http.ResponseWriter, r *http.Request, context *controller) { + workloads, err := context.ds.GetWorkloads() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + b, err := json.Marshal(workloads) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func getCNCI(w http.ResponseWriter, r *http.Request, context *controller) { + cncis, err := context.ds.GetTenantCNCISummary("") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + b, err := json.Marshal(cncis) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func getNodes(w http.ResponseWriter, r *http.Request, context *controller) { + nodes, err := context.ds.GetNodes() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + b, err := json.Marshal(nodes) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + w.Header().Set("Content-Type", "application/json") + w.Write(b) +} + +func workload(w http.ResponseWriter, r *http.Request, context *controller) { + if r.Method == "POST" { + token := r.PostFormValue("token") + + trace, err := strconv.ParseBool(r.PostFormValue("trace")) + if err != nil { + http.Error(w, "Bogus Trace value", http.StatusInternalServerError) + return + } + + label := r.PostFormValue("label") + r.ParseForm() + var tenantID string + var workloadID string + var numInstances int + + if len(r.Form["workload_id"]) > 0 { + workloadID = r.Form["workload_id"][0] + } else { + http.Error(w, "Missing workload", http.StatusInternalServerError) + return + } + + if len(r.Form["tenant_id"]) > 0 { + tenantID = r.Form["tenant_id"][0] + } else { + http.Error(w, "Missing tenant ID", http.StatusInternalServerError) + return + } + + numInstances, _ = strconv.Atoi(r.Form["num_instances"][0]) + + ok := context.id.validate(token, tenantID, "user") + if ok { + _, err := context.startWorkload(workloadID, tenantID, numInstances, trace, label) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } else { + http.Error(w, "Token Validation Failed", http.StatusInternalServerError) + return + } + } +} + +func createDebugInterface(context *controller) { + http.HandleFunc("/workload", func(w http.ResponseWriter, r *http.Request) { + workload(w, r, context) + }) + http.HandleFunc("/tenantDebug", func(w http.ResponseWriter, r *http.Request) { + tenantDebug(w, r, context) + }) + http.HandleFunc("/stats", func(w http.ResponseWriter, r *http.Request) { + stats(w, r, context) + }) + http.HandleFunc("/login", func(w http.ResponseWriter, r *http.Request) { + login(w, r, context) + }) + http.HandleFunc("/framestats", func(w http.ResponseWriter, r *http.Request) { + framestats(w, r, context) + }) + http.HandleFunc("/getNodeStats", func(w http.ResponseWriter, r *http.Request) { + getNodeStats(w, r, context) + }) + http.HandleFunc("/getInstances", func(w http.ResponseWriter, r *http.Request) { + getInstances(w, r, context) + }) + http.HandleFunc("/getEventLog", func(w http.ResponseWriter, r *http.Request) { + getEventLog(w, r, context) + }) + http.HandleFunc("/getNodeSummary", func(w http.ResponseWriter, r *http.Request) { + getNodeSummary(w, r, context) + }) + http.HandleFunc("/getWorkloads", func(w http.ResponseWriter, r *http.Request) { + getWorkloads(w, r, context) + }) + http.HandleFunc("/getCNCI", func(w http.ResponseWriter, r *http.Request) { + getCNCI(w, r, context) + }) + http.HandleFunc("/getFrameStats", func(w http.ResponseWriter, r *http.Request) { + getFrameStats(w, r, context) + }) + http.HandleFunc("/getBatchFrameSummaryStats", func(w http.ResponseWriter, r *http.Request) { + getBatchFrameSummaryStats(w, r, context) + }) + http.HandleFunc("/getBatchFrameStats", func(w http.ResponseWriter, r *http.Request) { + getBatchFrameStats(w, r, context) + }) + http.HandleFunc("/getNodes", func(w http.ResponseWriter, r *http.Request) { + getNodes(w, r, context) + }) + httpPort := ":" + strconv.Itoa(*port) + log.Fatal(http.ListenAndServeTLS(httpPort, *httpsCAcert, *httpsKey, nil)) +} diff --git a/ciao-controller/internal/datastore/TODO b/ciao-controller/internal/datastore/TODO new file mode 100644 index 000000000..6c960cd52 --- /dev/null +++ b/ciao-controller/internal/datastore/TODO @@ -0,0 +1,5 @@ +1. stop requiring csv files at Init time - you should populate anything + absolutely required programmatically. +2. Write test for releasing tenant IPs which releases an entire subnet +3. create schema versioning and migration +4. everytime you fail to get a tx, you should not use it. diff --git a/ciao-controller/internal/datastore/datastore.go b/ciao-controller/internal/datastore/datastore.go new file mode 100644 index 000000000..42e0c64fe --- /dev/null +++ b/ciao-controller/internal/datastore/datastore.go @@ -0,0 +1,3374 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +// Package datastore retrieves stores data for the ciao controller. +// This package caches most data in memory, and uses a sql +// database as persistent storage. +package datastore + +import ( + "crypto/rand" + "database/sql" + "encoding/binary" + "encoding/csv" + "errors" + "fmt" + "github.com/01org/ciao/ciao-controller/types" + "github.com/01org/ciao/payloads" + "github.com/golang/glog" + sqlite3 "github.com/mattn/go-sqlite3" + "io/ioutil" + "net" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +type userEventType string + +const ( + userInfo userEventType = "info" + userWarn userEventType = "warn" + userError userEventType = "error" +) + +type workload struct { + types.Workload + filename string +} + +type tenant struct { + types.Tenant + network map[int]map[int]bool + subnets []int + instances map[string]*types.Instance +} + +type node struct { + types.Node + instances map[string]*types.Instance +} + +// Datastore provides context for the datastore package. +type Datastore struct { + db *sql.DB + tdb *sql.DB + dbName string + tdbName string + tables []persistentData + tableInitPath string + workloadsPath string + dbLock *sync.Mutex + tdbLock *sync.RWMutex + + cnciAddedChans map[string]chan bool + cnciAddedLock *sync.Mutex + + nodeLastStat map[string]payloads.CiaoComputeNode + nodeLastStatLock *sync.RWMutex + + instanceLastStat map[string]payloads.CiaoServerStats + instanceLastStatLock *sync.RWMutex + + tenants map[string]*tenant + tenantsLock *sync.RWMutex + allSubnets map[int]bool + + workloads map[string]*workload + workloadsLock *sync.RWMutex + + nodes map[string]node + nodesLock *sync.RWMutex + + instances map[string]*types.Instance + instancesLock *sync.RWMutex + + tenantUsage map[string][]payloads.CiaoUsage + tenantUsageLock *sync.RWMutex +} + +type persistentData interface { + Init() error + Populate() error + Create(...string) error + Name() string + DB() *sql.DB +} + +type namedData struct { + ds *Datastore + name string + db *sql.DB +} + +func (d namedData) Create(record ...string) (err error) { + err = d.ds.create(d.name, record) + return +} + +func (d namedData) Populate() (err error) { + return nil +} + +func (d namedData) Name() (name string) { + return d.name +} + +func (d namedData) DB() *sql.DB { + return d.db +} + +func (d namedData) ReadCsv() (records [][]string, err error) { + f, err := os.Open(fmt.Sprintf("%s/%s.csv", d.ds.tableInitPath, d.name)) + if err != nil { + return + } + defer f.Close() + + r := csv.NewReader(f) + r.TrimLeadingSpace = true + r.Comment = '#' + + records, err = r.ReadAll() + if err != nil { + return + } + return +} + +type logData struct { + namedData +} + +func (d logData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS log + ( + id integer primary key, + tenant_id varchar(32), + type string, + message string, + timestamp DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL + );` + err = d.ds.exec(d.db, cmd) + return +} + +type subnetData struct { + namedData +} + +func (d subnetData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS tenant_network + ( + tenant_id varchar(32), + subnet int, + rest int, + foreign key(tenant_id) references tenants(id) + );` + err = d.ds.exec(d.db, cmd) + return +} + +// Handling of Limit specific Data +type limitsData struct { + namedData +} + +func (d limitsData) Populate() (err error) { + lines, err := d.ReadCsv() + if err != nil { + return err + } + + for _, line := range lines { + resourceID, _ := strconv.Atoi(line[0]) + tenantID := line[1] + maxValue, _ := strconv.Atoi(line[2]) + err = d.ds.create(d.name, resourceID, tenantID, maxValue) + if err != nil { + glog.V(2).Info("could not add limit: ", err) + } + } + return +} + +func (d limitsData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS limits + ( + resource_id integer, + tenant_id varchar(32), + max_value integer, + foreign key(resource_id) references resources(id), + foreign key(tenant_id) references tenants(id) + );` + err = d.ds.exec(d.db, cmd) + return +} + +// Handling of Instance specific data +type instanceData struct { + namedData +} + +func (d instanceData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS instances + ( + id string primary key, + tenant_id string, + workload_id string, + mac_address string, + ip string, + foreign key(tenant_id) references tenants(id), + foreign key(workload_id) references workload_template(id), + unique(tenant_id, ip, mac_address) + );` + err = d.ds.exec(d.db, cmd) + return +} + +// Resources data +type resourceData struct { + namedData +} + +func (d resourceData) Populate() (err error) { + lines, err := d.ReadCsv() + if err != nil { + return err + } + + for _, line := range lines { + id, _ := strconv.Atoi(line[0]) + name := line[1] + err = d.ds.create(d.name, id, name) + if err != nil { + glog.V(2).Info("could not add resource: ", err) + } + } + return +} + +func (d resourceData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS resources + ( + id int primary key, + name text + );` + err = d.ds.exec(d.db, cmd) + return +} + +// Tenants data +type tenantData struct { + namedData +} + +func (d tenantData) Populate() (err error) { + lines, err := d.ReadCsv() + if err != nil { + return err + } + + for _, line := range lines { + id := line[0] + name := line[1] + mac := line[2] + if err != nil { + glog.V(2).Info("could not add tenant: ", err) + } + err = d.ds.create(d.name, id, name, "", mac, "") + if err != nil { + glog.V(2).Info("could not add tenant: ", err) + } + } + return +} + +func (d tenantData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS tenants + ( + id varchar(32) primary key, + name text, + cnci_id varchar(32) default null, + cnci_mac string default null, + cnci_ip string default null + );` + err = d.ds.exec(d.db, cmd) + return +} + +// usage data +type usageData struct { + namedData +} + +func (d usageData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS usage + ( + instance_id string, + resource_id int, + value int, + foreign key(instance_id) references instances(id), + foreign key(resource_id) references resources(id) + ); + CREATE UNIQUE INDEX IF NOT EXISTS myindex + ON usage(instance_id, resource_id);` + err = d.ds.exec(d.db, cmd) + return +} + +// workload resources +type workloadResourceData struct { + namedData +} + +func (d workloadResourceData) Populate() (err error) { + lines, err := d.ReadCsv() + if err != nil { + return err + } + + for _, line := range lines { + workloadID := line[0] + resourceID, _ := strconv.Atoi(line[1]) + defaultValue, _ := strconv.Atoi(line[2]) + estimatedValue, _ := strconv.Atoi(line[3]) + mandatory, _ := strconv.Atoi(line[4]) + err = d.ds.create(d.name, workloadID, resourceID, defaultValue, estimatedValue, mandatory) + if err != nil { + glog.V(2).Info("could not add workload: ", err) + } + } + return +} + +func (d workloadResourceData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS workload_resources + ( + workload_id varchar(32), + resource_id int, + default_value int, + estimated_value int, + mandatory int, + foreign key(workload_id) references workload_template(id), + foreign key(resource_id) references resources(id) + ); + CREATE UNIQUE INDEX IF NOT EXISTS wlr_index + ON workload_resources(workload_id, resource_id);` + err = d.ds.exec(d.db, cmd) + return +} + +// workload template data +type workloadTemplateData struct { + namedData +} + +func (d workloadTemplateData) Populate() (err error) { + lines, err := d.ReadCsv() + if err != nil { + return err + } + + for _, line := range lines { + id := line[0] + description := line[1] + filename := line[2] + fwType := line[3] + vmType := line[4] + imageID := line[5] + imageName := line[6] + internal := line[7] + err = d.ds.create(d.name, id, description, filename, fwType, vmType, imageID, imageName, internal) + if err != nil { + glog.V(2).Info("could not add workload: ", err) + } + } + return +} + +func (d workloadTemplateData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS workload_template + ( + id varchar(32) primary key, + description text, + filename text, + fw_type text, + vm_type text, + image_id varchar(32), + image_name text, + internal integer + );` + err = d.ds.exec(d.db, cmd) + return +} + +// statistics +type nodeStatisticsData struct { + namedData +} + +func (d nodeStatisticsData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS node_statistics + ( + id integer primary key autoincrement not null, + node_id varchar(32), + mem_total_mb int, + mem_available_mb int, + disk_total_mb int, + disk_available_mb int, + load int, + cpus_online int, + timestamp DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL + );` + err = d.ds.exec(d.db, cmd) + return +} + +type instanceStatisticsData struct { + namedData +} + +func (d instanceStatisticsData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS instance_statistics + ( + id integer primary key autoincrement not null, + instance_id varchar(32), + memory_usage_mb int, + disk_usage_mb int, + cpu_usage int, + state string, + node_id varchar(32), + ssh_ip string, + ssh_port int, + timestamp DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL + );` + err = d.ds.exec(d.db, cmd) + return +} + +type frameStatisticsData struct { + namedData +} + +func (d frameStatisticsData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS frame_statistics + ( + id integer primary key autoincrement not null, + label string, + type string, + operand string, + start_timestamp DATETIME, + end_timestamp DATETIME + );` + err = d.ds.exec(d.db, cmd) + return +} + +type traceData struct { + namedData +} + +func (d traceData) Init() (err error) { + cmd := `CREATE TABLE IF NOT EXISTS trace_data + ( + id integer primary key autoincrement not null, + frame_id int, + ssntp_uuid varchar(32), + tx_timestamp DATETIME, + rx_timestamp DATETIME, + foreign key(frame_id) references frame_statistics(id) + );` + err = d.ds.exec(d.db, cmd) + return +} + +func (ds *Datastore) exec(db *sql.DB, cmd string) (err error) { + glog.V(2).Info("exec: ", cmd) + + tx, err := db.Begin() + if err != nil { + return + } + + _, err = tx.Exec(cmd) + if err != nil { + tx.Rollback() + return + } + tx.Commit() + return +} + +func (ds *Datastore) create(tableName string, record ...interface{}) (err error) { + // get database location of this table + db := ds.getTableDB(tableName) + + if db == nil { + err = errors.New("Bad table name") + return + } + + var values []string + for _, val := range record { + v := reflect.ValueOf(val) + var newval string + // enclose strings in quotes to not confuse sqlite + if v.Kind() == reflect.String { + newval = fmt.Sprintf("'%v'", val) + } else { + newval = fmt.Sprintf("%v", val) + } + values = append(values, newval) + } + args := strings.Join(values, ",") + cmd := "INSERT or IGNORE into " + tableName + " VALUES (" + args + ");" + err = ds.exec(db, cmd) + return +} + +func (ds *Datastore) getTableDB(name string) *sql.DB { + for _, table := range ds.tables { + n := table.Name() + if n == name { + return table.DB() + } + } + return nil +} + +// Init initializes the private data for the Datastore object. +// The sql tables are populated with initial data from csv +// files if this is the first time the database has been +// created. The datastore caches are also filled. +func (ds *Datastore) Init(tableInitPath string, workloadsPath string) (err error) { + ds.dbLock = &sync.Mutex{} + ds.tdbLock = &sync.RWMutex{} + + ds.tables = []persistentData{ + resourceData{namedData{ds: ds, name: "resources", db: ds.db}}, + tenantData{namedData{ds: ds, name: "tenants", db: ds.db}}, + limitsData{namedData{ds: ds, name: "limits", db: ds.db}}, + instanceData{namedData{ds: ds, name: "instances", db: ds.db}}, + workloadTemplateData{namedData{ds: ds, name: "workload_template", db: ds.db}}, + workloadResourceData{namedData{ds: ds, name: "workload_resources", db: ds.db}}, + usageData{namedData{ds: ds, name: "usage", db: ds.db}}, + nodeStatisticsData{namedData{ds: ds, name: "node_statistics", db: ds.tdb}}, + logData{namedData{ds: ds, name: "log", db: ds.tdb}}, + subnetData{namedData{ds: ds, name: "tenant_network", db: ds.db}}, + instanceStatisticsData{namedData{ds: ds, name: "instance_statistics", db: ds.tdb}}, + frameStatisticsData{namedData{ds: ds, name: "frame_statistics", db: ds.tdb}}, + traceData{namedData{ds: ds, name: "trace_data", db: ds.tdb}}, + } + + ds.tableInitPath = tableInitPath + ds.workloadsPath = workloadsPath + + for _, table := range ds.tables { + err = table.Init() + if err != nil { + return + } + } + + for _, table := range ds.tables { + // Populate failures are not fatal, because it could just mean + // there's no initial data to populate + _ = table.Populate() + } + + ds.cnciAddedChans = make(map[string]chan bool) + ds.cnciAddedLock = &sync.Mutex{} + + ds.nodeLastStat = make(map[string]payloads.CiaoComputeNode) + ds.nodeLastStatLock = &sync.RWMutex{} + + ds.instanceLastStat = make(map[string]payloads.CiaoServerStats) + ds.instanceLastStatLock = &sync.RWMutex{} + + // warning, do not use the tenant cache to get + // networking information right now. that is not + // updated, just the resources + ds.tenants = make(map[string]*tenant) + ds.allSubnets = make(map[int]bool) + ds.tenantsLock = &sync.RWMutex{} + + // cache all our instances prior to getting tenants + ds.instancesLock = &sync.RWMutex{} + ds.instances = make(map[string]*types.Instance) + + instances, err := ds.GetAllInstances() + if err != nil { + glog.Warning(err) + } else { + for i := range instances { + ds.instances[instances[i].Id] = instances[i] + } + } + + // cache our current tenants into a map that we can + // quickly index + tenants, err := ds.getTenants() + if err == nil { + for i := range tenants { + ds.tenants[tenants[i].Id] = tenants[i] + } + } + + // cache the workloads into a map so that we can + // quickly index + ds.workloadsLock = &sync.RWMutex{} + ds.workloads = make(map[string]*workload) + workloads, err := ds.getWorkloads() + if err != nil { + glog.Warning(err) + } else { + for i := range workloads { + ds.workloads[workloads[i].Id] = workloads[i] + } + } + + ds.nodesLock = &sync.RWMutex{} + ds.nodes = make(map[string]node) + + for key, i := range ds.instances { + n, ok := ds.nodes[i.NodeId] + if !ok { + newNode := types.Node{ + ID: i.NodeId, + } + n = node{ + Node: newNode, + instances: make(map[string]*types.Instance), + } + ds.nodes[i.NodeId] = n + } + ds.nodes[i.NodeId].instances[key] = i + } + + ds.tenantUsage = make(map[string][]payloads.CiaoUsage) + ds.tenantUsageLock = &sync.RWMutex{} + + return +} + +// Connect creates two sqlite3 databases. One database is for +// persistent state that needs to be restored on restart, the +// other is for transient data that does not need to be restored +// on restart. +func (ds *Datastore) Connect(persistentURI string, transientURI string) (err error) { + sql.Register("sqlite_attach_tdb", &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + cmd := fmt.Sprintf("ATTACH '%s' AS tdb", transientURI) + conn.Exec(cmd, nil) + return nil + }, + }) + + connectString := persistentURI + datastore, err := sql.Open("sqlite_attach_tdb", connectString) + if err != nil { + return err + } + + ds.dbName = persistentURI + ds.tdbName = transientURI + + _, err = datastore.Exec("PRAGMA page_size = 32768;") + if err != nil { + glog.Warning("unable to increase page size size", err) + } + + _, err = datastore.Exec("PRAGMA synchronous = OFF") + if err != nil { + glog.Warning("unable to turn off synchronous", err) + } + + _, err = datastore.Exec("PRAGMA temp_store = MEMORY") + if err != nil { + glog.Warning("unable to change temp_store", err) + } + + err = datastore.Ping() + if err != nil { + glog.Warning("unable to ping database") + return + } + + ds.db = datastore + + // if I turn off foreign key support, I can do some work + // asynchronously + //_, err = datastore.Exec("PRAGMA foreign_keys = ON") + //if err != nil { + // glog.Warning("unable to turn on foreign key support", err) + //} + + // TBD - what's the best busy_timeout value (ms)? + _, err = datastore.Exec("PRAGMA busy_timeout = 1000") + if err != nil { + glog.Warning("unable to set busy_timeout", err) + } + + _, err = datastore.Exec("PRAGMA journal_mode=WAL") + if err != nil { + glog.Warning("unable to set journal_mode", err) + } + + connectString = transientURI + sql.Register("sqlite_attach_db", &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + cmd := fmt.Sprintf("ATTACH '%s' AS db", persistentURI) + conn.Exec(cmd, nil) + return nil + }, + }) + datastore, err = sql.Open("sqlite_attach_db", connectString) + if err != nil { + return err + } + + _, err = datastore.Exec("PRAGMA page_size = 32768;") + if err != nil { + glog.Warning("unable to increase page size size", err) + } + + err = datastore.Ping() + if err != nil { + glog.Warning("unable to ping database") + return + } + + ds.tdb = datastore + + _, err = datastore.Exec("PRAGMA foreign_keys = ON") + if err != nil { + glog.Warning("unable to turn on foreign key support", err) + } + + // TBD - what's the best busy_timeout value (ms)? + _, err = datastore.Exec("PRAGMA busy_timeout = 500") + if err != nil { + glog.Warning("unable to set busy_timeout", err) + } + + _, err = datastore.Exec("PRAGMA journal_mode=WAL") + if err != nil { + glog.Warning("unable to set journal_mode", err) + } + + _, err = datastore.Exec("PRAGMA synchronous = OFF") + if err != nil { + glog.Warning("unable to turn off synchronous", err) + } + + _, err = datastore.Exec("PRAGMA temp_store = MEMORY") + if err != nil { + glog.Warning("unable to change temp_store", err) + } + + return +} + +// Disconnect is used to close the connection to the sql database +func (ds *Datastore) Disconnect() { + ds.db.Close() +} + +func (ds *Datastore) logEvent(tenantID string, eventType string, message string) error { + glog.V(2).Info("log event: ", message) + cmd := `INSERT INTO log (tenant_id, type, message) + VALUES('%s', '%s', '%s');` + + ds.tdbLock.Lock() + + str := fmt.Sprintf(cmd, tenantID, eventType, message) + err := ds.exec(ds.getTableDB("log"), str) + if err != nil { + glog.V(2).Info("could not log event: ", message, " ", err) + } + + ds.tdbLock.Unlock() + + return err +} + +// ClearLog will remove all the event entries from the event log +func (ds *Datastore) ClearLog() error { + db := ds.getTableDB("log") + + ds.tdbLock.Lock() + + err := ds.exec(db, "DELETE FROM log") + if err != nil { + glog.V(2).Info("could not clear log: ", err) + } + + ds.tdbLock.Unlock() + + return err +} + +// AddTenantChan allows a caller to pass in a channel for CNCI Launch status. +// When a CNCI has been added to the datastore and a channel exists, +// success will be indicated on the channel. If a CNCI failure occurred +// and a channel exists, failure will be indicated on the channel. +func (ds *Datastore) AddTenantChan(c chan bool, tenantID string) { + ds.cnciAddedLock.Lock() + ds.cnciAddedChans[tenantID] = c + ds.cnciAddedLock.Unlock() +} + +// GetCNCIWorkloadID returns the UUID of the workload template +// for the CNCI workload +func (ds *Datastore) GetCNCIWorkloadID() (id string, err error) { + db := ds.getTableDB("workload_template") + + err = db.QueryRow("SELECT id FROM workload_template WHERE description = 'CNCI'").Scan(&id) + if err != nil { + return + } + return +} + +func (ds *Datastore) getConfig(id string) (config string, err error) { + // check our cache first + ds.workloadsLock.RLock() + wl := ds.workloads[id] + ds.workloadsLock.RUnlock() + + var configFile string + + if wl == nil { + db := ds.getTableDB("workload_template") + + err = db.QueryRow("SELECT filename FROM workload_template where id = ?", id).Scan(&configFile) + + if err != nil { + return config, err + } + } else { + return wl.Config, nil + } + + path := fmt.Sprintf("%s/%s", ds.workloadsPath, configFile) + bytes, err := ioutil.ReadFile(path) + config = string(bytes) + return config, err +} + +func (ds *Datastore) getImageInfo(workloadID string) (imageID string, fwType string, err error) { + // check the cache first + ds.workloadsLock.RLock() + wl := ds.workloads[workloadID] + ds.workloadsLock.RUnlock() + + if wl != nil { + return wl.ImageID, wl.FWType, nil + } + + db := ds.getTableDB("workload_template") + + err = db.QueryRow("SELECT image_id, fw_type FROM workload_template where id = ?", workloadID).Scan(&imageID, &fwType) + + if err != nil { + return + } + + return +} + +func (ds *Datastore) getWorkloadDefaults(id string) (defaults []payloads.RequestedResource, err error) { + // check the cache first + ds.workloadsLock.RLock() + wl := ds.workloads[id] + ds.workloadsLock.RUnlock() + + if wl != nil { + return wl.Defaults, nil + } + + query := `SELECT resources.name, default_value, mandatory FROM workload_resources + JOIN resources + ON workload_resources.resource_id=resources.id + WHERE workload_id = ?` + db := ds.getTableDB("workload_resources") + + rows, err := db.Query(query, id) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var val int + var rname string + var mandatory bool + + err = rows.Scan(&rname, &val, &mandatory) + if err != nil { + return nil, err + } + r := payloads.RequestedResource{ + Type: payloads.Resource(rname), + Value: val, + Mandatory: mandatory, + } + defaults = append(defaults, r) + } + return +} + +// AddLimit allows the caller to store a limt for a specific resource for a tenant. +func (ds *Datastore) AddLimit(tenantID string, resourceID int, limit int) (err error) { + ds.dbLock.Lock() + err = ds.create("limits", resourceID, tenantID, limit) + ds.dbLock.Unlock() + if err != nil { + return + } + + // update cache + ds.tenantsLock.Lock() + tenant := ds.tenants[tenantID] + if tenant != nil { + resources := tenant.Resources + for i := range resources { + if resources[i].Rtype == resourceID { + resources[i].Limit = limit + break + } + } + } + ds.tenantsLock.Unlock() + return +} + +func (ds *Datastore) getTenantResources(id string) (resources []*types.Resource, err error) { + // check cache first + ds.tenantsLock.RLock() + tenant := ds.tenants[id] + ds.tenantsLock.RUnlock() + if tenant != nil { + resources = tenant.Resources + return + } + + query := `WITH instances_usage AS + ( + SELECT resource_id, value + FROM usage + LEFT JOIN instances + ON usage.instance_id = instances.id + WHERE instances.tenant_id = ? + ) + SELECT resources.name, resources.id, limits.max_value, + CASE resources.id + WHEN resources.id = 1 then + ( + SELECT COUNT(instances.id) + FROM instances + WHERE instances.tenant_id = ? + ) + ELSE SUM(instances_usage.value) + END + FROM resources + LEFT JOIN instances_usage + ON instances_usage.resource_id = resources.id + LEFT JOIN limits + ON resources.id=limits.resource_id + AND limits.tenant_id = ? + GROUP BY resources.id` + datastore := ds.db + + rows, err := datastore.Query(query, id, id, id) + if err != nil { + glog.Warning("Failed to get tenant usage") + return nil, err + } + defer rows.Close() + for rows.Next() { + var id int + var name string + var sqlMaxVal sql.NullInt64 + var sqlCurVal sql.NullInt64 + var maxVal = -1 + var curVal = 0 + + err = rows.Scan(&name, &id, &sqlMaxVal, &sqlCurVal) + if err != nil { + return nil, err + } + + if sqlMaxVal.Valid { + maxVal = int(sqlMaxVal.Int64) + } + if sqlCurVal.Valid { + curVal = int(sqlCurVal.Int64) + } + r := types.Resource{ + Rname: name, + Rtype: id, + Limit: maxVal, + Usage: curVal, + } + resources = append(resources, &r) + } + + return +} + +func newHardwareAddr() (hw net.HardwareAddr, err error) { + buf := make([]byte, 6) + _, err = rand.Read(buf) + if err != nil { + return + } + + // vnic creation seems to require not just the + // bit 1 to be set, but the entire byte to be + // set to 2. Also, ensure that we get no + // overlap with tenant mac addresses by not allowing + // byte 1 to ever be zero. + buf[0] = 2 + if buf[1] == 0 { + buf[1] = 3 + } + hw = net.HardwareAddr(buf) + return +} + +// AddTenant stores information about a tenant into the datastore. +// it creates a MAC address for the tenant network and makes sure +// that this new tenant is cached. +func (ds *Datastore) AddTenant(id string) (tenant *types.Tenant, err error) { + hw, err := newHardwareAddr() + if err != nil { + glog.V(2).Info("error creating mac address", err) + return + } + ds.dbLock.Lock() + err = ds.create("tenants", id, "", "", hw.String(), "") + ds.dbLock.Unlock() + + t, err := ds.getTenant(id) + if err != nil || tenant == nil { + glog.V(2).Info(err, " unable to get tenant: ", id) + } + + ds.tenantsLock.Lock() + ds.tenants[id] = t + ds.tenantsLock.Unlock() + + return &t.Tenant, err +} + +func (ds *Datastore) getTenant(id string) (t *tenant, err error) { + // check cache first + ds.tenantsLock.RLock() + t = ds.tenants[id] + ds.tenantsLock.RUnlock() + + if t != nil { + return + } + + query := `SELECT tenants.id, + tenants.name, + tenants.cnci_id, + tenants.cnci_mac, + tenants.cnci_ip + FROM tenants + WHERE tenants.id = ?` + datastore := ds.db + + row := datastore.QueryRow(query, id) + + t = new(tenant) + + err = row.Scan(&t.Id, &t.Name, &t.CNCIID, &t.CNCIMAC, &t.CNCIIP) + if err != nil { + glog.Warning("unable to retrieve tenant from tenants") + if err == sql.ErrNoRows { + // not an error, it's just not there. + err = nil + } + return nil, err + } + + t.Resources, err = ds.getTenantResources(id) + + err = ds.getTenantNetwork(t) + if err != nil { + glog.V(2).Info(err) + } + + t.instances = make(map[string]*types.Instance) + instances, err := ds.GetAllInstancesFromTenant(t.Id) + if err != nil { + for i := range instances { + t.instances[instances[i].Id] = instances[i] + } + } + return t, err +} + +// GetTenant returns details about a tenant referenced by the uuid +func (ds *Datastore) GetTenant(id string) (tenant *types.Tenant, err error) { + // check cache first + ds.tenantsLock.RLock() + t := ds.tenants[id] + ds.tenantsLock.RUnlock() + + if t != nil { + return &t.Tenant, nil + } + + query := `SELECT tenants.id, + tenants.name, + tenants.cnci_id, + tenants.cnci_mac, + tenants.cnci_ip + FROM tenants + WHERE tenants.id = ?` + datastore := ds.db + + row := datastore.QueryRow(query, id) + + tenant = new(types.Tenant) + + err = row.Scan(&tenant.Id, &tenant.Name, &tenant.CNCIID, &tenant.CNCIMAC, &tenant.CNCIIP) + if err != nil { + glog.Warning("unable to retrieve tenant from tenants") + if err == sql.ErrNoRows { + // not an error, it's just not there. + err = nil + } + return nil, err + } + + tenant.Resources, err = ds.getTenantResources(id) + + return tenant, err +} + +func (ds *Datastore) getWorkload(id string) (*workload, error) { + // check the cache first + ds.workloadsLock.RLock() + wl := ds.workloads[id] + ds.workloadsLock.RUnlock() + + if wl != nil { + return wl, nil + } + + datastore := ds.db + + query := `SELECT id, + description, + filename, + fw_type, + vm_type, + image_id, + image_name + FROM workload_template + WHERE id = ?` + + work := new(workload) + + var VMType string + + err := datastore.QueryRow(query, id).Scan(&work.Id, &work.Description, &work.filename, &work.FWType, &VMType, &work.ImageID, &work.ImageName) + if err != nil { + return nil, err + } + + work.VMType = payloads.Hypervisor(VMType) + + work.Config, err = ds.getConfig(id) + if err != nil { + return nil, err + } + + work.Defaults, err = ds.getWorkloadDefaults(id) + if err != nil { + return nil, err + } + + return work, nil +} + +// GetWorkload returns details about a specific workload referenced by id +func (ds *Datastore) GetWorkload(id string) (*types.Workload, error) { + // check the cache first + ds.workloadsLock.RLock() + wl := ds.workloads[id] + ds.workloadsLock.RUnlock() + + if wl != nil { + return &wl.Workload, nil + } + + wl, err := ds.getWorkload(id) + if err != nil { + return nil, err + } + + return &wl.Workload, nil +} + +func (ds *Datastore) getWorkloads() ([]*workload, error) { + var workloads []*workload + + // check the cache first + ds.workloadsLock.RLock() + if len(ds.workloads) > 0 { + for _, wl := range ds.workloads { + workloads = append(workloads, wl) + } + ds.workloadsLock.RUnlock() + return workloads, nil + } + ds.workloadsLock.RUnlock() + + datastore := ds.db + + query := `SELECT id, + description, + filename, + fw_type, + vm_type, + image_id, + image_name + FROM workload_template + WHERE internal = 0` + + rows, err := datastore.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + wl := new(workload) + + var VMType string + + err = rows.Scan(&wl.Id, &wl.Description, &wl.filename, &wl.FWType, &VMType, &wl.ImageID, &wl.ImageName) + if err != nil { + return nil, err + } + + wl.Config, err = ds.getConfig(wl.Id) + if err != nil { + return nil, err + } + + wl.Defaults, err = ds.getWorkloadDefaults(wl.Id) + if err != nil { + return nil, err + } + + wl.VMType = payloads.Hypervisor(VMType) + + workloads = append(workloads, wl) + } + if err = rows.Err(); err != nil { + return nil, err + } + return workloads, nil +} + +// GetWorkloads returns all known tenant workloads +func (ds *Datastore) GetWorkloads() ([]*types.Workload, error) { + var workloads []*types.Workload + + // check the cache first + ds.workloadsLock.RLock() + if len(ds.workloads) > 0 { + for _, wl := range ds.workloads { + workloads = append(workloads, &wl.Workload) + } + ds.workloadsLock.RUnlock() + return workloads, nil + } + ds.workloadsLock.RUnlock() + + wls, err := ds.getWorkloads() + if err != nil { + return workloads, err + } + + if len(wls) > 0 { + for _, wl := range wls { + workloads = append(workloads, &wl.Workload) + } + } + + return workloads, nil +} + +func (ds *Datastore) getTenantCNCI(tenantID string) (cnciID string, cnciIP string, cnciMAC string, err error) { + // check the cache + ds.tenantsLock.RLock() + t := ds.tenants[tenantID] + ds.tenantsLock.RUnlock() + + if t != nil { + return t.CNCIID, t.CNCIIP, t.CNCIMAC, nil + } + + datastore := ds.db + + err = datastore.QueryRow("SELECT cnci_id, cnci_ip, cnci_mac FROM tenants WHERE tenants.id = ?", tenantID).Scan(&cnciID, &cnciIP, &cnciMAC) + return +} + +func (ds *Datastore) getTenantByCNCIMAC(cnciMAC string) (tenantID string, err error) { + db := ds.getTableDB("tenants") + + err = db.QueryRow("SELECT id FROM tenants WHERE cnci_mac = ?", cnciMAC).Scan(&tenantID) + return +} + +// AddCNCIIP will associate a new IP address with an existing CNCI +// via the mac address +func (ds *Datastore) AddCNCIIP(cnciMAC string, ip string) (err error) { + // update tenants cache + ds.tenantsLock.Lock() + tenantID, err := ds.getTenantByCNCIMAC(cnciMAC) + if err != nil { + ds.tenantsLock.Unlock() + return + } + + if ds.tenants[tenantID] != nil { + ds.tenants[tenantID].CNCIIP = ip + } + ds.tenantsLock.Unlock() + + db := ds.getTableDB("tenants") + cmd := fmt.Sprintf("UPDATE tenants SET cnci_ip = '%s' WHERE cnci_mac = '%s'", ip, cnciMAC) + ds.dbLock.Lock() + err = ds.exec(db, cmd) + ds.dbLock.Unlock() + if err != nil { + glog.Warning("Failed to update CNCI IP") + } + + c := ds.cnciAddedChans[tenantID] + if c != nil { + ds.cnciAddedLock.Lock() + ds.cnciAddedChans[tenantID] = nil + ds.cnciAddedLock.Unlock() + c <- true + } + return +} + +// AddTenantCNCI will associate a new CNCI instance with a specific tenant. +// The instanceID of the new CNCI instance and the MAC address of the new instance +// are stored in the sql database and updated in the cache. +func (ds *Datastore) AddTenantCNCI(tenantID string, instanceID string, mac string) (err error) { + // update tenants cache + ds.tenantsLock.Lock() + if ds.tenants[tenantID] != nil { + ds.tenants[tenantID].CNCIID = instanceID + ds.tenants[tenantID].CNCIMAC = mac + } + ds.tenantsLock.Unlock() + + db := ds.getTableDB("tenants") + cmd := fmt.Sprintf("UPDATE tenants SET cnci_id = '%s', cnci_mac = '%s' WHERE id = '%s'", instanceID, mac, tenantID) + ds.dbLock.Lock() + err = ds.exec(db, cmd) + ds.dbLock.Unlock() + + return +} + +func (ds *Datastore) removeTenantCNCI(tenantID string, cnciID string) (err error) { + // update tenants cache + ds.tenantsLock.Lock() + if ds.tenants[tenantID] != nil { + ds.tenants[tenantID].CNCIID = "" + ds.tenants[tenantID].CNCIIP = "" + } + ds.tenantsLock.Unlock() + + db := ds.getTableDB("tenants") + cmd := fmt.Sprintf("UPDATE tenants SET cnci_id = '', cnci_ip = '' WHERE cnci_id = '%s'", cnciID) + ds.dbLock.Lock() + err = ds.exec(db, cmd) + ds.dbLock.Unlock() + + return +} + +func (ds *Datastore) getCNCITenant(cnciID string) (tenantID string, err error) { + db := ds.getTableDB("tenants") + + err = db.QueryRow("SELECT id FROM tenants WHERE cnci_id = ?", cnciID).Scan(&tenantID) + return +} + +func (ds *Datastore) isInstanceCNCI(instanceID string) (b bool, err error) { + datastore := ds.getTableDB("tenants") + + var c int + err = datastore.QueryRow("SELECT count(cnci_id) FROM tenants WHERE cnci_id = ?", instanceID).Scan(&c) + b = (c > 0) + return +} + +func (ds *Datastore) getTenants() ([]*tenant, error) { + var tenants []*tenant + + // check the cache first + ds.tenantsLock.RLock() + if len(ds.tenants) > 0 { + for _, value := range ds.tenants { + tenants = append(tenants, value) + } + ds.tenantsLock.RUnlock() + return tenants, nil + } + ds.tenantsLock.RUnlock() + + datastore := ds.getTableDB("tenants") + + query := `SELECT tenants.id, + tenants.name, + tenants.cnci_id, + tenants.cnci_mac, + tenants.cnci_ip + FROM tenants ` + + rows, err := datastore.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var id sql.NullString + var name sql.NullString + var cnciID sql.NullString + var cnciMAC sql.NullString + var cnciIP sql.NullString + + t := new(tenant) + err = rows.Scan(&id, &name, &cnciID, &cnciMAC, &cnciIP) + if err != nil { + return nil, err + } + + if id.Valid { + t.Id = id.String + } + if name.Valid { + t.Name = name.String + } + if cnciID.Valid { + t.CNCIID = cnciID.String + } + if cnciMAC.Valid { + t.CNCIMAC = cnciMAC.String + } + if cnciIP.Valid { + t.CNCIIP = cnciIP.String + } + + t.Resources, err = ds.getTenantResources(t.Id) + if err != nil { + return nil, err + } + + err = ds.getTenantNetwork(t) + if err != nil { + return nil, err + } + t.instances = make(map[string]*types.Instance) + instances, err := ds.GetAllInstancesFromTenant(t.Id) + if err != nil { + for i := range instances { + t.instances[instances[i].Id] = instances[i] + } + } + + tenants = append(tenants, t) + } + if err = rows.Err(); err != nil { + return nil, err + } + + return tenants, nil +} + +// GetAllTenants returns all the tenants from the datastore. +func (ds *Datastore) GetAllTenants() ([]*types.Tenant, error) { + var tenants []*types.Tenant + + // check the cache first + ds.tenantsLock.RLock() + if len(ds.tenants) > 0 { + for _, value := range ds.tenants { + tenants = append(tenants, &value.Tenant) + } + ds.tenantsLock.RUnlock() + return tenants, nil + } + ds.tenantsLock.RUnlock() + + datastore := ds.getTableDB("tenants") + + query := `SELECT tenants.id, + tenants.name, + tenants.cnci_id, + tenants.cnci_mac, + tenants.cnci_ip + FROM tenants ` + + rows, err := datastore.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var id sql.NullString + var name sql.NullString + var cnciID sql.NullString + var cnciMAC sql.NullString + var cnciIP sql.NullString + + t := new(types.Tenant) + err = rows.Scan(&id, &name, &cnciID, &cnciMAC, &cnciIP) + if err != nil { + return nil, err + } + + if id.Valid { + t.Id = id.String + } + if name.Valid { + t.Name = name.String + } + if cnciID.Valid { + t.CNCIID = cnciID.String + } + if cnciMAC.Valid { + t.CNCIMAC = cnciMAC.String + } + if cnciIP.Valid { + t.CNCIIP = cnciIP.String + } + + t.Resources, err = ds.getTenantResources(t.Id) + if err != nil { + return nil, err + } + + tenants = append(tenants, t) + } + if err = rows.Err(); err != nil { + return nil, err + } + + return tenants, nil +} + +// ReleaseTenantIP will return an IP address previously allocated to the pool. +// Once a tenant IP address is released, it can be reassigned to another +// instance. +func (ds *Datastore) ReleaseTenantIP(tenantID string, ip string) (err error) { + datastore := ds.getTableDB("tenant_network") + ipAddr := net.ParseIP(ip) + if ipAddr == nil { + return errors.New("Invalid IPv4 Address") + } + + ipBytes := ipAddr.To4() + if ipBytes == nil { + return errors.New("Unable to convert ip to bytes") + } + subnetInt := binary.BigEndian.Uint16(ipBytes[1:3]) + + // clear from cache + ds.tenantsLock.Lock() + if ds.tenants[tenantID] != nil { + ds.tenants[tenantID].network[int(subnetInt)][int(ipBytes[3])] = false + } + ds.tenantsLock.Unlock() + + cmd := fmt.Sprintf("DELETE FROM tenant_network WHERE tenant_id = '%s' AND subnet = %d AND rest = %d", tenantID, subnetInt, int(ipBytes[3])) + ds.dbLock.Lock() + err = ds.exec(datastore, cmd) + ds.dbLock.Unlock() + + return +} + +func (ds *Datastore) getTenantNetwork(tenant *tenant) (err error) { + tenant.network = make(map[int]map[int]bool) + + // serialize + ds.dbLock.Lock() + datastore := ds.getTableDB("tenant_network") + tx, err := datastore.Begin() + if err != nil { + ds.dbLock.Unlock() + return + } + + // get all subnet,rest values for this tenant + query := `SELECT subnet, rest + FROM tenant_network + WHERE tenant_id = ?` + rows, err := tx.Query(query, tenant.Id) + if err != nil { + glog.Warning(err) + tx.Rollback() + ds.dbLock.Unlock() + return + } + defer rows.Close() + + for rows.Next() { + var subnetInt uint16 + var rest uint8 + + err = rows.Scan(&subnetInt, &rest) + if err != nil { + glog.Warning(err) + tx.Rollback() + ds.dbLock.Unlock() + return + } + sub, ok := tenant.network[int(subnetInt)] + if !ok { + sub = make(map[int]bool) + tenant.network[int(subnetInt)] = sub + } + /* Only add to the subnet list for the first host */ + if len(tenant.network[int(subnetInt)]) == 0 { + tenant.subnets = append(tenant.subnets, int(subnetInt)) + } + tenant.network[int(subnetInt)][int(rest)] = true + + } + tx.Commit() + ds.dbLock.Unlock() + return +} + +// AllocateTenantIP will find a free IP address within a tenant network. +// For now we make each tenant have unique subnets even though it +// isn't actually needed because of a docker issue. +func (ds *Datastore) AllocateTenantIP(tenantID string) (ip net.IP, err error) { + var subnetInt uint16 + subnetInt = 0 + + ds.tenantsLock.Lock() + network := ds.tenants[tenantID].network + subnets := ds.tenants[tenantID].subnets + + // find any subnet assigned to this tenant with available addresses + sort.Ints(subnets) + for _, k := range subnets { + if len(network[k]) < 253 { + subnetInt = uint16(k) + } + } + + var subnetBytes = []byte{16, 0} + if subnetInt == 0 { + i := binary.BigEndian.Uint16(subnetBytes) + + for { + // sub, ok := network[int(i)] + // for now, prevent overlapping subnets + // due to bug in docker. + ok := ds.allSubnets[int(i)] + if !ok { + sub := make(map[int]bool) + network[int(i)] = sub + + // claim so no one else can use it + ds.allSubnets[int(i)] = true + + break + } + if subnetBytes[1] == 255 { + if subnetBytes[0] == 31 { + // out of possible subnets + glog.Warning("Out of Subnets") + ds.tenantsLock.Unlock() + return nil, errors.New("Out of subnets") + } + subnetBytes[0]++ + subnetBytes[1] = 0 + } else { + subnetBytes[1]++ + } + i = binary.BigEndian.Uint16(subnetBytes) + } + subnetInt = i + ds.tenants[tenantID].subnets = append(subnets, int(subnetInt)) + } else { + binary.BigEndian.PutUint16(subnetBytes, subnetInt) + } + + hosts := network[int(subnetInt)] + + rest := 2 + for { + if hosts[rest] == false { + hosts[rest] = true + break + } + + if rest == 255 { + // this should never happen + glog.Warning("ran out of host numbers") + ds.tenantsLock.Unlock() + return nil, errors.New("rand out of host numbers") + } + rest++ + } + + // at this point we have a subnet and host number, we should + // claim this in the datastore + go func(subnetInt uint16, rest int) { + datastore := ds.getTableDB("tenant_network") + ds.dbLock.Lock() + tx, err := datastore.Begin() + if err != nil { + ds.dbLock.Unlock() + return + } + + cmd := `INSERT INTO tenant_network VALUES('%s', %d, %d);` + str := fmt.Sprintf(cmd, tenantID, subnetInt, rest) + _, err = tx.Exec(str) + if err != nil { + glog.Warning(cmd, err) + tx.Rollback() + ds.dbLock.Unlock() + return + } + + tx.Commit() + ds.dbLock.Unlock() + }(subnetInt, rest) + + ds.tenantsLock.Unlock() + + // convert to IP type. + next := net.IPv4(172, subnetBytes[0], subnetBytes[1], byte(rest)) + return next, err +} + +// GetAllInstances retrieves all instances out of the datastore. +func (ds *Datastore) GetAllInstances() (instances []*types.Instance, err error) { + // always get from cache + ds.instancesLock.RLock() + if len(ds.instances) > 0 { + for _, val := range ds.instances { + instances = append(instances, val) + } + ds.instancesLock.RUnlock() + return + } + ds.instancesLock.RUnlock() + + // delete the below? + datastore := ds.getTableDB("instances") + + ds.tdbLock.RLock() + + tx, err := datastore.Begin() + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + + query := ` + WITH latest AS + ( + SELECT max(tdb.instance_statistics.timestamp), + tdb.instance_statistics.instance_id, + tdb.instance_statistics.state, + tdb.instance_statistics.ssh_ip, + tdb.instance_statistics.ssh_port, + tdb.instance_statistics.node_id + FROM tdb.instance_statistics + GROUP BY tdb.instance_statistics.instance_id + ) + SELECT instances.id, + instances.tenant_id, + IFNULL(latest.state, "pending") AS state, + workload_id, + IFNULL(latest.ssh_ip, "Not Assigned") as ssh_ip, + latest.ssh_port as ssh_port, + IFNULL(latest.node_id, "Not Assigned") as node_id, + mac_address, + ip + FROM instances + LEFT JOIN latest + ON instances.id = latest.instance_id + ` + + rows, err := tx.Query(query) + if err != nil { + return nil, err + ds.tdbLock.RUnlock() + tx.Rollback() + } + defer rows.Close() + + for rows.Next() { + var i types.Instance + + var sshPort sql.NullInt64 + + err = rows.Scan(&i.Id, &i.TenantId, &i.State, &i.WorkloadId, &i.SSHIP, &sshPort, &i.NodeId, &i.MACAddress, &i.IPAddress) + if err != nil { + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + + if sshPort.Valid { + i.SSHPort = int(sshPort.Int64) + } + + instances = append(instances, &i) + } + if err = rows.Err(); err != nil { + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + + tx.Commit() + + ds.tdbLock.RUnlock() + + return instances, nil +} + +// GetAllInstancesFromTenant will retrieve all instances belonging to a specific tenant +func (ds *Datastore) GetAllInstancesFromTenant(tenantID string) (instances []*types.Instance, err error) { + ds.tenantsLock.RLock() + t, ok := ds.tenants[tenantID] + if ok { + for _, val := range t.instances { + instances = append(instances, val) + } + ds.tenantsLock.RUnlock() + return + } + ds.tenantsLock.RUnlock() + + datastore := ds.getTableDB("instances") + + ds.tdbLock.RLock() + + tx, err := datastore.Begin() + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + + query := ` + WITH latest AS + ( + SELECT max(tdb.instance_statistics.timestamp), + tdb.instance_statistics.instance_id, + tdb.instance_statistics.state, + tdb.instance_statistics.ssh_ip, + tdb.instance_statistics.ssh_port, + tdb.instance_statistics.node_id + FROM tdb.instance_statistics + GROUP BY tdb.instance_statistics.instance_id + ) + SELECT instances.id, + instances.tenant_id, + IFNULL(latest.state, "pending") AS state, + IFNULL(latest.ssh_ip, "Not Assigned") AS ssh_ip, + latest.ssh_port AS ssh_port, + workload_id, + latest.node_id, + mac_address, + ip + FROM instances + LEFT JOIN latest + ON instances.id = latest.instance_id + WHERE instances.tenant_id = ? + ` + + rows, err := tx.Query(query, tenantID) + if err != nil { + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + defer rows.Close() + + for rows.Next() { + var id sql.NullString + var tenantID sql.NullString + var state sql.NullString + var workloadID sql.NullString + var nodeID sql.NullString + var macAddress sql.NullString + var ipAddress sql.NullString + var sshIP sql.NullString + var sshPort sql.NullInt64 + + i := new(types.Instance) + err = rows.Scan(&id, &tenantID, &state, &sshIP, &sshPort, &workloadID, &nodeID, &macAddress, &ipAddress) + if err != nil { + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + + if id.Valid { + i.Id = id.String + } + if tenantID.Valid { + i.TenantId = tenantID.String + } + if state.Valid { + i.State = state.String + } + if workloadID.Valid { + i.WorkloadId = workloadID.String + } + if macAddress.Valid { + i.MACAddress = macAddress.String + } + if ipAddress.Valid { + i.IPAddress = ipAddress.String + } + if nodeID.Valid { + i.NodeId = nodeID.String + } + if sshIP.Valid { + i.SSHIP = sshIP.String + } + if sshPort.Valid { + i.SSHPort = int(sshPort.Int64) + } + instances = append(instances, i) + } + if err = rows.Err(); err != nil { + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + tx.Commit() + + ds.tdbLock.RUnlock() + + return instances, nil +} + +// GetAllInstancesByNode will retrieve all the instances running on a specific compute Node. +func (ds *Datastore) GetAllInstancesByNode(nodeID string) (instances []*types.Instance, err error) { + ds.nodesLock.RLock() + n, ok := ds.nodes[nodeID] + if ok { + for _, val := range n.instances { + instances = append(instances, val) + } + ds.nodesLock.RUnlock() + return + } + ds.nodesLock.RUnlock() + + datastore := ds.getTableDB("instances") + + ds.tdbLock.RLock() + + tx, err := datastore.Begin() + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + + query := ` + WITH latest AS + ( + SELECT max(tdb.instance_statistics.timestamp), + tdb.instance_statistics.instance_id, + tdb.instance_statistics.state, + tdb.instance_statistics.ssh_ip, + tdb.instance_statistics.ssh_port, + tdb.instance_statistics.node_id + FROM tdb.instance_statistics + GROUP BY tdb.instance_statistics.instance_id + ) + SELECT instances.id, + instances.tenant_id, + IFNULL(latest.state, "pending") AS state, + IFNULL(latest.ssh_ip, "Not Assigned") AS ssh_ip, + latest.ssh_port AS ssh_port, + workload_id, + latest.node_id, + mac_address, + ip + FROM instances + LEFT JOIN latest + ON instances.id = latest.instance_id + WHERE latest.node_id = ? + ` + rows, err := tx.Query(query, nodeID) + if err != nil { + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + defer rows.Close() + + for rows.Next() { + var id sql.NullString + var tenantID sql.NullString + var state sql.NullString + var workloadID sql.NullString + var nodeID sql.NullString + var macAddress sql.NullString + var ipAddress sql.NullString + var sshIP sql.NullString + var sshPort sql.NullInt64 + + i := new(types.Instance) + + err = rows.Scan(&id, &tenantID, &state, &sshIP, &sshPort, &workloadID, &nodeID, &macAddress, &ipAddress) + if err != nil { + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + + if id.Valid { + i.Id = id.String + } + if tenantID.Valid { + i.TenantId = tenantID.String + } + if state.Valid { + i.State = state.String + } + if workloadID.Valid { + i.WorkloadId = workloadID.String + } + if macAddress.Valid { + i.MACAddress = macAddress.String + } + if ipAddress.Valid { + i.IPAddress = ipAddress.String + } + if nodeID.Valid { + i.NodeId = nodeID.String + } + if sshIP.Valid { + i.SSHIP = sshIP.String + } + if sshPort.Valid { + i.SSHPort = int(sshPort.Int64) + } + + instances = append(instances, i) + } + if err = rows.Err(); err != nil { + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + + tx.Commit() + + ds.tdbLock.RUnlock() + + return instances, nil +} + +// GetInstanceFromTenant will be replaced soon with something else that makes more sense. +func (ds *Datastore) GetInstanceFromTenant(tenantID string, instanceID string) (*types.Instance, error) { + var i types.Instance + + datastore := ds.getTableDB("instances") + + ds.tdbLock.RLock() + + tx, err := datastore.Begin() + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + + query := ` + WITH latest AS + ( + SELECT max(tdb.instance_statistics.timestamp), + tdb.instance_statistics.instance_id, + tdb.instance_statistics.state, + tdb.instance_statistics.ssh_ip, + tdb.instance_statistics.ssh_port, + tdb.instance_statistics.node_id + FROM tdb.instance_statistics + GROUP BY tdb.instance_statistics.instance_id + ) + SELECT instances.id, + instances.tenant_id, + IFNULL(latest.state, "pending") AS state, + IFNULL(latest.ssh_ip, "Not Assigned") AS ssh_ip, + latest.ssh_port AS ssh_port, + workload_id, + latest.node_id, + mac_address, + ip + FROM instances + LEFT JOIN latest + ON instances.id = latest.instance_id + WHERE instances.tenant_id = ? + AND instances.id = ? + ` + + row := tx.QueryRow(query, tenantID, instanceID) + + var ID sql.NullString + var tID sql.NullString + var state sql.NullString + var workloadID sql.NullString + var nodeID sql.NullString + var macAddress sql.NullString + var ipAddress sql.NullString + var sshIP sql.NullString + var sshPort sql.NullInt64 + + err = row.Scan(&ID, &tID, &state, &sshIP, &sshPort, &workloadID, &nodeID, &macAddress, &ipAddress) + if err != nil { + glog.V(2).Info("unable to retrieve instance %s from tenant %s (%s)", instanceID, tenantID, err) + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + + tx.Commit() + ds.tdbLock.RUnlock() + + if ID.Valid { + i.Id = ID.String + } + if tID.Valid { + i.TenantId = tID.String + } + if state.Valid { + i.State = state.String + } + if workloadID.Valid { + i.WorkloadId = workloadID.String + } + if nodeID.Valid { + i.NodeId = nodeID.String + } + if macAddress.Valid { + i.MACAddress = macAddress.String + } + if ipAddress.Valid { + i.IPAddress = ipAddress.String + } + if sshIP.Valid { + i.SSHIP = sshIP.String + } + if sshPort.Valid { + i.SSHPort = int(sshPort.Int64) + } + + return &i, err +} + +// AddInstance will store a new instance in the datastore. +// The instance will be updated both in the cache and in the database +func (ds *Datastore) AddInstance(instance *types.Instance) (err error) { + // add to cache + ds.instancesLock.Lock() + ds.instances[instance.Id] = instance + ds.instancesLock.Unlock() + + ds.tenantsLock.Lock() + ds.tenants[instance.TenantId].instances[instance.Id] = instance + ds.tenantsLock.Unlock() + + ds.dbLock.Lock() + err = ds.create("instances", instance.Id, instance.TenantId, instance.WorkloadId, instance.MACAddress, instance.IPAddress) + ds.dbLock.Unlock() + + instanceStat := payloads.CiaoServerStats{ + ID: instance.Id, + TenantID: instance.TenantId, + NodeID: instance.NodeId, + Timestamp: time.Now(), + Status: instance.State, + } + + ds.instanceLastStatLock.Lock() + ds.instanceLastStat[instance.Id] = instanceStat + ds.instanceLastStatLock.Unlock() + + return +} + +// RestartFailure logs a RestartFailure in the datastore +func (ds *Datastore) RestartFailure(instanceID string, reason payloads.RestartFailureReason) (err error) { + tenantID, err := ds.getInstanceOwner(instanceID) + if err != nil { + return + } + + msg := fmt.Sprintf("Restart Failure %s: %s", instanceID, reason.String()) + ds.logEvent(tenantID, string(userError), msg) + return +} + +// StopFailure logs a StopFailure in the datastore +func (ds *Datastore) StopFailure(instanceID string, reason payloads.StopFailureReason) (err error) { + tenantID, err := ds.getInstanceOwner(instanceID) + if err != nil { + return + } + + msg := fmt.Sprintf("Stop Failure %s: %s", instanceID, reason.String()) + ds.logEvent(tenantID, string(userError), msg) + return +} + +// StartFailure will clean up after a failure to start an instance. +// If an instance was a CNCI, this function will remove the CNCI instance +// for this tenant. If the instance was a normal tenant instance, the +// IP address will be released and the instance will be deleted from the +// datastore. +func (ds *Datastore) StartFailure(instanceID string, reason payloads.StartFailureReason) (err error) { + var tenantID string + cnci, err := ds.isInstanceCNCI(instanceID) + if err != nil { + fmt.Println(err) + return + } + if cnci == true { + glog.Warning("CNCI ", instanceID, " Failed to start") + tenantID, err = ds.getCNCITenant(instanceID) + if err != nil { + glog.Warning(err) + } + err = ds.removeTenantCNCI(tenantID, instanceID) + if err != nil { + glog.Warning(err) + } + msg := fmt.Sprintf("CNCI Start Failure %s: %s", instanceID, reason.String()) + ds.logEvent(tenantID, string(userError), msg) + c := ds.cnciAddedChans[tenantID] + if c != nil { + ds.cnciAddedLock.Lock() + ds.cnciAddedChans[tenantID] = nil + ds.cnciAddedLock.Unlock() + c <- false + } + return + } + + tenantID, ipAddress, err := ds.getInstanceTenantNetwork(instanceID) + if err != nil { + return err + } + + switch reason { + case payloads.FullCloud, + payloads.FullComputeNode, + payloads.NoComputeNodes, + payloads.NoNetworkNodes, + payloads.InvalidPayload, + payloads.InvalidData, + payloads.ImageFailure, + payloads.NetworkFailure: + + ds.instancesLock.Lock() + delete(ds.instances, instanceID) + ds.instancesLock.Unlock() + + ds.tenantsLock.Lock() + delete(ds.tenants[tenantID].instances, instanceID) + ds.tenantsLock.Unlock() + + err = ds.deleteAllUsage(instanceID, tenantID) + if err != nil { + glog.Warning(err) + } + + cmd := `DELETE FROM instances WHERE id = '%s';` + str := fmt.Sprintf(cmd, instanceID) + ds.dbLock.Lock() + err = ds.exec(ds.getTableDB("instances"), str) + ds.dbLock.Unlock() + if err != nil { + return err + } + + err = ds.ReleaseTenantIP(tenantID, ipAddress) + if err != nil { + glog.V(2).Info("StartFailure: ", err) + } + case payloads.LaunchFailure, + payloads.AlreadyRunning, + payloads.InstanceExists: + } + msg := fmt.Sprintf("Start Failure %s: %s", instanceID, reason.String()) + ds.logEvent(tenantID, string(userError), msg) + return +} + +// DeleteInstance removes an instance from the datastore. +func (ds *Datastore) DeleteInstance(instanceID string) (err error) { + ds.instanceLastStatLock.Lock() + delete(ds.instanceLastStat, instanceID) + ds.instanceLastStatLock.Unlock() + + ds.instancesLock.Lock() + i := ds.instances[instanceID] + delete(ds.instances, instanceID) + ds.instancesLock.Unlock() + + ds.tenantsLock.Lock() + delete(ds.tenants[i.TenantId].instances, instanceID) + ds.tenantsLock.Unlock() + + ds.nodesLock.Lock() + delete(ds.nodes[i.NodeId].instances, instanceID) + ds.nodesLock.Unlock() + + tenantID, ipAddress, err := ds.getInstanceTenantNetwork(instanceID) + if err != nil { + glog.V(2).Info("DeleteInstance: ", err) + return + } + + err = ds.deleteAllUsage(instanceID, tenantID) + if err != nil { + glog.Warning(err) + } + + cmd := `DELETE FROM instances WHERE id = '%s';` + str := fmt.Sprintf(cmd, instanceID) + ds.dbLock.Lock() + err = ds.exec(ds.getTableDB("instances"), str) + ds.dbLock.Unlock() + if err != nil { + glog.V(2).Info("DeleteInstance: ", err) + return + } + + err = ds.ReleaseTenantIP(tenantID, ipAddress) + if err != nil { + glog.V(2).Info("DeleteInstance: ", err) + return + } + + msg := fmt.Sprintf("Deleted Instance %s", instanceID) + ds.logEvent(tenantID, string(userInfo), msg) + return +} + +// GetInstanceInfo will be replaced by something else soon that makes more sense. +func (ds *Datastore) GetInstanceInfo(instanceID string) (nodeID string, state string, err error) { + // check cache of last stats first + ds.instanceLastStatLock.RLock() + instanceStat, ok := ds.instanceLastStat[instanceID] + ds.instanceLastStatLock.RUnlock() + + if ok { + return instanceStat.NodeID, instanceStat.Status, nil + } + + datastore := ds.getTableDB("instances") + + query := ` + WITH latest AS + ( + SELECT max(tdb.instance_statistics.timestamp), + tdb.instance_statistics.instance_id, + tdb.instance_statistics.state, + tdb.instance_statistics.node_id + FROM tdb.instance_statistics + GROUP BY tdb.instance_statistics.instance_id + ) + SELECT latest.node_id, + IFNULL(latest.state, "pending") AS state + FROM instances + LEFT JOIN latest + ON instances.id = latest.instance_id + WHERE instances.id = ? + ` + + var nullNodeID sql.NullString + var nullState sql.NullString + + err = datastore.QueryRow(query, instanceID).Scan(&nullNodeID, &nullState) + + if nullNodeID.Valid { + nodeID = nullNodeID.String + } + + if nullState.Valid { + state = nullState.String + } + + return + +} + +func (ds *Datastore) getInstanceTenantNetwork(instanceID string) (tenantID string, ipAddress string, err error) { + ds.instancesLock.RLock() + i, ok := ds.instances[instanceID] + ds.instancesLock.RUnlock() + + if ok { + return i.TenantId, i.IPAddress, nil + } + + datastore := ds.getTableDB("instances") + + err = datastore.QueryRow("SELECT tenant_id, ip FROM instances WHERE id = ?", instanceID).Scan(&tenantID, &ipAddress) + return + +} + +func (ds *Datastore) getInstanceOwner(instanceID string) (tenantID string, err error) { + ds.instancesLock.RLock() + i, ok := ds.instances[instanceID] + ds.instancesLock.RUnlock() + + if ok { + return i.TenantId, nil + } + + datastore := ds.getTableDB("instances") + + err = datastore.QueryRow("SELECT tenant_id FROM instances WHERE id = ?", instanceID).Scan(&tenantID) + return + +} + +// AddUsage updates the accounting against this tenant's limits. +// usage is a map of resource name to the delta +func (ds *Datastore) AddUsage(tenantID string, instanceID string, usage map[string]int) (err error) { + // update tenant cache + ds.tenantsLock.Lock() + tenant := ds.tenants[tenantID] + if tenant != nil { + for name, val := range usage { + for i := range tenant.Resources { + if tenant.Resources[i].Rname == name { + tenant.Resources[i].Usage += val + break + } + } + } + // increment instances count + for i := range tenant.Resources { + if tenant.Resources[i].Rtype == 1 { + tenant.Resources[i].Usage++ + break + } + } + } + ds.tenantsLock.Unlock() + + go func(instanceID string, usage map[string]int) { + cmd := `INSERT INTO usage (instance_id, resource_id, value) + SELECT '%s', resources.id, %d FROM resources + WHERE name = '%s';` + + for key, val := range usage { + str := fmt.Sprintf(cmd, instanceID, val, key) + ds.dbLock.Lock() + err := ds.exec(ds.getTableDB("usage"), str) + ds.dbLock.Unlock() + if err != nil { + glog.V(2).Info(err) + // but keep going + } + } + }(instanceID, usage) + return +} + +func (ds *Datastore) deleteAllUsage(instanceID string, tenantID string) (err error) { + // remove old tenant info from cache + ds.tenantsLock.Lock() + delete(ds.tenants, tenantID) + ds.tenantsLock.Unlock() + + cmd := fmt.Sprintf("DELETE FROM usage WHERE instance_id = '%s';", instanceID) + ds.dbLock.Lock() + err = ds.exec(ds.getTableDB("usage"), cmd) + ds.dbLock.Unlock() + + // update cache + tenant, err := ds.getTenant(tenantID) + if err != nil || tenant == nil { + glog.V(2).Info(err, " unable to get tenant: ", tenantID) + } + + ds.tenantsLock.Lock() + ds.tenants[tenantID] = tenant + ds.tenantsLock.Unlock() + + return +} + +// HandleStats makes sure that the data from the stat payload is stored. +func (ds *Datastore) HandleStats(stat payloads.Stat) (err error) { + if stat.Load != -1 { + ds.addNodeStat(stat) + } + + err = ds.addInstanceStats(stat.Instances, stat.NodeUUID) + if err != nil { + glog.Warning(err) + } + + return +} + +// HandleTraceReport stores the provided trace data in the datastore. +func (ds *Datastore) HandleTraceReport(trace payloads.Trace) (err error) { + for index := range trace.Frames { + i := trace.Frames[index] + err = ds.addFrameStat(i) + if err != nil { + glog.Warning(err) + } + } + return nil +} + +// GetInstanceLastStats retrieves the last instances stats recieved for this node. +// It returns it in a format suitable for the compute API. +func (ds *Datastore) GetInstanceLastStats(nodeID string) payloads.CiaoServersStats { + var serversStats payloads.CiaoServersStats + + ds.instanceLastStatLock.RLock() + for _, instance := range ds.instanceLastStat { + if instance.NodeID != nodeID { + continue + } + serversStats.Servers = append(serversStats.Servers, instance) + } + ds.instanceLastStatLock.RUnlock() + + return serversStats +} + +// GetNodeLastStats retrieves the last nodes stats recieved for this node. +// It returns it in a format suitable for the compute API. +func (ds *Datastore) GetNodeLastStats() payloads.CiaoComputeNodes { + var computeNodes payloads.CiaoComputeNodes + + ds.nodeLastStatLock.RLock() + for _, node := range ds.nodeLastStat { + computeNodes.Nodes = append(computeNodes.Nodes, node) + } + ds.nodeLastStatLock.RUnlock() + + return computeNodes +} + +func (ds *Datastore) addNodeStat(stat payloads.Stat) (err error) { + ds.nodesLock.Lock() + n, ok := ds.nodes[stat.NodeUUID] + if !ok { + n = node{} + n.instances = make(map[string]*types.Instance) + ds.nodes[stat.NodeUUID] = n + } + n.ID = stat.NodeUUID + n.Hostname = stat.NodeHostName + ds.nodesLock.Unlock() + + cmd := `INSERT INTO node_statistics (node_id, mem_total_mb, mem_available_mb, disk_total_mb, disk_available_mb, load, cpus_online) + VALUES('%s', %d, %d, %d, %d, %d, %d);` + + str := fmt.Sprintf(cmd, stat.NodeUUID, stat.MemTotalMB, stat.MemAvailableMB, stat.DiskTotalMB, stat.DiskAvailableMB, stat.Load, stat.CpusOnline) + + ds.tdbLock.Lock() + + err = ds.exec(ds.getTableDB("node_statistics"), str) + + ds.tdbLock.Unlock() + + cnStat := payloads.CiaoComputeNode{ + ID: stat.NodeUUID, + Status: stat.Status, + Load: stat.Load, + MemTotal: stat.MemTotalMB, + MemAvailable: stat.MemAvailableMB, + DiskTotal: stat.DiskTotalMB, + DiskAvailable: stat.DiskAvailableMB, + OnlineCPUs: stat.CpusOnline, + } + + ds.nodeLastStatLock.Lock() + + delete(ds.nodeLastStat, stat.NodeUUID) + ds.nodeLastStat[stat.NodeUUID] = cnStat + + ds.nodeLastStatLock.Unlock() + + return +} + +var tenantUsagePeriodMinutes float64 = 5 + +func (ds *Datastore) updateTenantUsageNeeded(delta payloads.CiaoUsage, tenantID string) bool { + if delta.VCPU == 0 && + delta.Memory == 0 && + delta.Disk == 0 { + return false + } + + return true +} + +func (ds *Datastore) updateTenantUsage(delta payloads.CiaoUsage, tenantID string) { + if ds.updateTenantUsageNeeded(delta, tenantID) == false { + return + } + + createNewUsage := true + lastUsage := payloads.CiaoUsage{} + + ds.tenantUsageLock.Lock() + + tenantUsage := ds.tenantUsage[tenantID] + if len(tenantUsage) != 0 { + lastUsage = tenantUsage[len(tenantUsage)-1] + // We will not create more than one entry per tenant every tenantUsagePeriodMinutes + if time.Since(lastUsage.Timestamp).Minutes() < tenantUsagePeriodMinutes { + createNewUsage = false + } + } + + newUsage := payloads.CiaoUsage{ + VCPU: lastUsage.VCPU + delta.VCPU, + Memory: lastUsage.Memory + delta.Memory, + Disk: lastUsage.Disk + delta.Disk, + } + + // If we need to create a new usage entry, we timestamp it now. + // If not we just update the last entry. + if createNewUsage == true { + newUsage.Timestamp = time.Now() + ds.tenantUsage[tenantID] = append(ds.tenantUsage[tenantID], newUsage) + } else { + newUsage.Timestamp = lastUsage.Timestamp + tenantUsage[len(tenantUsage)-1] = newUsage + } + + ds.tenantUsageLock.Unlock() +} + +func (ds *Datastore) GetTenantUsage(tenantID string, start time.Time, end time.Time) ([]payloads.CiaoUsage, error) { + ds.tenantUsageLock.RLock() + defer ds.tenantUsageLock.RUnlock() + + tenantUsage := ds.tenantUsage[tenantID] + if tenantUsage == nil || len(tenantUsage) == 0 { + return nil, fmt.Errorf("No usage history for %s", tenantID) + } + + historyLength := len(tenantUsage) + if tenantUsage[0].Timestamp.After(end) == true || + start.After(tenantUsage[historyLength-1].Timestamp) == true { + return nil, nil + } + + first := 0 + last := 0 + for _, u := range tenantUsage { + if start.After(u.Timestamp) == true { + first++ + } + + if end.After(u.Timestamp) == true { + last++ + } + } + + return tenantUsage[first:last], nil +} + +func reduceToZero(v int) int { + if v < 0 { + return 0 + } + + return v +} + +// BUG(kristen) +// we don't lock the tdb database yet. Do we care enough about the +// temp db that we should protect it beyond just with transactions? +func (ds *Datastore) addInstanceStats(stats []payloads.InstanceStat, nodeID string) (err error) { + ds.instancesLock.Lock() + for index := range stats { + stat := stats[index] + instance, ok := ds.instances[stat.InstanceUUID] + if ok { + instance.State = stat.State + instance.NodeId = nodeID + instance.SSHIP = stat.SSHIP + instance.SSHPort = stat.SSHPort + ds.nodesLock.Lock() + ds.nodes[nodeID].instances[instance.Id] = instance + ds.nodesLock.Unlock() + } + } + ds.instancesLock.Unlock() + + datastore := ds.getTableDB("instance_statistics") + + ds.tdbLock.Lock() + + tx, err := datastore.Begin() + if err != nil { + ds.tdbLock.Unlock() + return + } + + cmd := `INSERT INTO instance_statistics (instance_id, memory_usage_mb, disk_usage_mb, cpu_usage, state, node_id, ssh_ip, ssh_port) + VALUES(?, ?, ?, ?, ?, ?, ?, ?)` + + stmt, err := tx.Prepare(cmd) + if err != nil { + tx.Rollback() + ds.tdbLock.Unlock() + return + } + defer stmt.Close() + + for index := range stats { + stat := stats[index] + + _, err = stmt.Exec(stat.InstanceUUID, stat.MemoryUsageMB, stat.DiskUsageMB, stat.CPUUsage, stat.State, nodeID, stat.SSHIP, stat.SSHPort) + if err != nil { + glog.Warning(err) + // but keep going + } + + instanceStat := payloads.CiaoServerStats{ + ID: stat.InstanceUUID, + NodeID: nodeID, + Timestamp: time.Now(), + Status: stat.State, + VCPUUsage: reduceToZero(stat.CPUUsage), + MemUsage: reduceToZero(stat.MemoryUsageMB), + DiskUsage: reduceToZero(stat.DiskUsageMB), + } + + ds.instanceLastStatLock.Lock() + + lastInstanceStat := ds.instanceLastStat[stat.InstanceUUID] + + deltaUsage := payloads.CiaoUsage{ + VCPU: instanceStat.VCPUUsage - lastInstanceStat.VCPUUsage, + Memory: instanceStat.MemUsage - lastInstanceStat.MemUsage, + Disk: instanceStat.DiskUsage - lastInstanceStat.DiskUsage, + } + + go ds.updateTenantUsage(deltaUsage, lastInstanceStat.TenantID) + + instanceStat.TenantID = lastInstanceStat.TenantID + + delete(ds.instanceLastStat, stat.InstanceUUID) + ds.instanceLastStat[stat.InstanceUUID] = instanceStat + + ds.instanceLastStatLock.Unlock() + + } + tx.Commit() + + ds.tdbLock.Unlock() + + return +} + +func (ds *Datastore) addFrameStat(stat payloads.FrameTrace) (err error) { + datastore := ds.getTableDB("frame_statistics") + + ds.tdbLock.Lock() + + tx, err := datastore.Begin() + if err != nil { + ds.tdbLock.Unlock() + return + } + + cmd := `INSERT INTO frame_statistics (label, type, operand, start_timestamp, end_timestamp) + VALUES('%s', '%s', '%s', '%s', '%s')` + str := fmt.Sprintf(cmd, stat.Label, stat.Type, stat.Operand, stat.StartTimestamp, stat.EndTimestamp) + _, err = tx.Exec(str) + if err != nil { + tx.Rollback() + ds.tdbLock.Unlock() + return + } + + var id int + err = tx.QueryRow("SELECT last_insert_rowid();").Scan(&id) + if err != nil { + tx.Rollback() + ds.tdbLock.Unlock() + return + } + + for index := range stat.Nodes { + t := stat.Nodes[index] + cmd := `INSERT INTO trace_data (frame_id, ssntp_uuid, tx_timestamp, rx_timestamp) + VALUES(%d, '%s', '%s', '%s');` + str := fmt.Sprintf(cmd, id, t.SSNTPUUID, t.TxTimestamp, t.RxTimestamp) + _, err = tx.Exec(str) + if err != nil { + tx.Rollback() + ds.tdbLock.Unlock() + return + } + } + tx.Commit() + ds.tdbLock.Unlock() + return +} + +// GetEventLog retrieves all the log entries stored in the datastore. +func (ds *Datastore) GetEventLog() (logEntries []*types.LogEntry, err error) { + datastore := ds.getTableDB("log") + + ds.tdbLock.RLock() + + rows, err := datastore.Query("SELECT timestamp, tenant_id, type, message FROM log") + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + defer rows.Close() + + logEntries = make([]*types.LogEntry, 0) + for rows.Next() { + var e types.LogEntry + err = rows.Scan(&e.Timestamp, &e.TenantId, &e.EventType, &e.Message) + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + logEntries = append(logEntries, &e) + } + + ds.tdbLock.RUnlock() + + return logEntries, err +} + +// ClearNodeStats will delete all the log entries that were entered prior to the given time. +func (ds *Datastore) ClearNodeStats(before time.Time) (err error) { + ds.tdbLock.Lock() + cmd := "DELETE FROM node_statistics WHERE timestamp < '%s'" + str := fmt.Sprintf(cmd, before) + err = ds.exec(ds.getTableDB("node_statistics"), str) + ds.tdbLock.Unlock() + return +} + +// GetNodeStats returns all node stats received between start and end time. +func (ds *Datastore) GetNodeStats(start time.Time, end time.Time) (statsRows []*types.NodeStats, err error) { + datastore := ds.getTableDB("node_statistics") + ds.tdbLock.RLock() + + rows, err := datastore.Query("SELECT timestamp, node_id, load, mem_total_mb, mem_available_mb, disk_total_mb, disk_available_mb, cpus_online FROM node_statistics WHERE timestamp BETWEEN ? AND ?", start, end) + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + defer rows.Close() + + for rows.Next() { + var r types.NodeStats + + err = rows.Scan(&r.Timestamp, &r.NodeId, &r.Load, &r.MemTotalMB, &r.MemAvailableMB, &r.DiskTotalMB, &r.DiskAvailableMB, &r.CpusOnline) + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + statsRows = append(statsRows, &r) + } + + if len(statsRows) == 0 { + ds.tdbLock.RUnlock() + return nil, err + } + ds.tdbLock.RUnlock() + + return statsRows, err +} + +// GetNodeSummary provides a summary the state and count of instances running per node. +func (ds *Datastore) GetNodeSummary() (Summary []*types.NodeSummary, err error) { + datastore := ds.getTableDB("instance_statistics") + ds.tdbLock.RLock() + tx, err := datastore.Begin() + if err != nil { + ds.tdbLock.RUnlock() + return + } + + query := ` +WITH instances AS +( + WITH latest AS + ( + SELECT max(timestamp), + instance_id, + node_id, + state + FROM instance_statistics + GROUP BY instance_id + ) + SELECT db.instances.id AS instance_id, + IFNULL(latest.state, "pending") AS state, + IFNULL(latest.node_id, "Not Assigned") AS node_id + FROM db.instances + LEFT JOIN latest + ON db.instances.id = latest.instance_id +), +total_instances AS +( + SELECT IFNULL(instances.node_id, "Not Assigned to Node") AS node_id, + count(instances.instance_id) AS total + FROM instances + GROUP BY node_id +), +total_running AS +( + SELECT instances.node_id AS node_id, + count(instances.instance_id) AS total + FROM instances + WHERE state='running' + GROUP BY node_id +), +total_pending AS +( + SELECT instances.node_id AS node_id, + count(instances.instance_id) AS total + FROM instances + WHERE state='pending' + GROUP BY node_id +), +total_exited AS +( + SELECT instances.node_id, + count(instances.instance_id) AS total + FROM instances + WHERE state='exited' + GROUP BY node_id +) +SELECT total_instances.node_id, + total_instances.total, + IFNULL(total_running.total, 0), + IFNULL(total_pending.total, 0), + IFNULL(total_exited.total, 0) +FROM total_instances +LEFT JOIN total_running +ON total_instances.node_id = total_running.node_id +LEFT JOIN total_pending +ON total_instances.node_id = total_pending.node_id +LEFT JOIN total_exited +ON total_instances.node_id = total_exited.node_id +` + + rows, err := tx.Query(query) + if err != nil { + glog.V(2).Info("Failed to get Node Summary: ", err) + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + defer rows.Close() + + Summary = make([]*types.NodeSummary, 0) + for rows.Next() { + var n types.NodeSummary + err = rows.Scan(&n.NodeId, &n.TotalInstances, &n.TotalRunningInstances, &n.TotalPendingInstances, &n.TotalPausedInstances) + if err != nil { + tx.Rollback() + ds.tdbLock.RUnlock() + return nil, err + } + Summary = append(Summary, &n) + } + tx.Commit() + ds.tdbLock.RUnlock() + + return Summary, err +} + +// GetTenantCNCISummary retrieves information about a given CNCI id, or all CNCIs +// If the cnci string is the null string, then this function will retrieve all +// tenants. If cnci is not null, it will only provide information about a specific +// cnci. +func (ds *Datastore) GetTenantCNCISummary(cnci string) (cncis []types.TenantCNCI, err error) { + datastore := ds.getTableDB("tenants") + var query string + + if cnci == "" { + query = "SELECT id, cnci_ip, cnci_mac, cnci_id FROM tenants" + } else { + query = fmt.Sprintf("SELECT id, cnci_ip, cnci_mac, cnci_id FROM tenants WHERE cnci_id = '%s'", cnci) + } + rows, err := datastore.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + subnetBytes := []byte{0, 0} + cncis = make([]types.TenantCNCI, 0) + for rows.Next() { + var cn types.TenantCNCI + err = rows.Scan(&cn.TenantID, &cn.IPAddress, &cn.MACAddress, &cn.InstanceID) + if err != nil { + return + } + + tenant, err := ds.getTenant(cn.TenantID) + if err != nil && tenant != nil { + continue + } + + for _, subnet := range tenant.subnets { + binary.BigEndian.PutUint16(subnetBytes, (uint16)(subnet)) + cn.Subnets = append(cn.Subnets, fmt.Sprintf("Subnet 172.%d.%d.0/8", subnetBytes[0], subnetBytes[1])) + } + + cncis = append(cncis, cn) + } + + return cncis, err +} + +// GetFrameStatistics will return trace data by label id. +func (ds *Datastore) GetFrameStatistics(label string) (stats []types.FrameStat, err error) { + ds.tdbLock.RLock() + query := `WITH total AS + ( + SELECT id, + start_timestamp, + end_timestamp, + (julianday(end_timestamp) - julianday(start_timestamp)) * 24 * 60 * 60 AS total_elapsed + FROM frame_statistics + WHERE label = ? + ), + total_start AS + ( + SELECT trace_data.frame_id, + trace_data.ssntp_uuid, + (julianday(trace_data.tx_timestamp) - julianday(total.start_timestamp)) * 24 * 60 * 60 AS total_elapsed + FROM trace_data + JOIN total + WHERE rx_timestamp = '' and trace_data.frame_id = total.id + ), + total_end AS + ( + SELECT trace_data.frame_id, + trace_data.ssntp_uuid, + (julianday(total.end_timestamp) - julianday(trace_data.rx_timestamp)) * 24 * 60 * 60 AS total_elapsed + FROM trace_data + JOIN total + WHERE tx_timestamp = '' and trace_data.frame_id = total.id + ), + total_per_node AS + ( + SELECT trace_data.frame_id, + trace_data.ssntp_uuid, + (julianday(trace_data.tx_timestamp) - julianday(trace_data.rx_timestamp)) * 24 * 60 * 60 AS total_elapsed + FROM trace_data + WHERE tx_timestamp != '' and rx_timestamp != '' + ) + SELECT total_end.ssntp_uuid, + total.total_elapsed, + total_start.total_elapsed, + total_end.total_elapsed, + total_per_node.total_elapsed + FROM total + LEFT JOIN total_start + ON total.id = total_start.frame_id + LEFT JOIN total_end + ON total_start.frame_id = total_end.frame_id + LEFT JOIN total_per_node + ON total_start.frame_id = total_per_node.frame_id + ORDER BY total.start_timestamp;` + + datastore := ds.getTableDB("frame_statistics") + + rows, err := datastore.Query(query, label) + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + defer rows.Close() + + stats = make([]types.FrameStat, 0) + for rows.Next() { + var stat types.FrameStat + var uuid sql.NullString + var controllerTime sql.NullFloat64 + var launcherTime sql.NullFloat64 + var schedulerTime sql.NullFloat64 + var totalTime sql.NullFloat64 + err = rows.Scan(&uuid, &totalTime, &controllerTime, &launcherTime, &schedulerTime) + if err != nil { + ds.tdbLock.RUnlock() + return + } + if uuid.Valid { + stat.ID = uuid.String + } + if controllerTime.Valid { + stat.ControllerTime = controllerTime.Float64 + } + if launcherTime.Valid { + stat.LauncherTime = launcherTime.Float64 + } + if schedulerTime.Valid { + stat.SchedulerTime = schedulerTime.Float64 + } + if totalTime.Valid { + stat.TotalElapsedTime = totalTime.Float64 + } + stats = append(stats, stat) + } + ds.tdbLock.RUnlock() + + return stats, err +} + +// GetBatchFrameSummary will retieve the count of traces we have for a specific label +func (ds *Datastore) GetBatchFrameSummary() (stats []types.BatchFrameSummary, err error) { + datastore := ds.getTableDB("frame_statistics") + + ds.tdbLock.RLock() + + query := `SELECT label, count(id) + FROM frame_statistics + GROUP BY label;` + + rows, err := datastore.Query(query) + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + defer rows.Close() + + stats = make([]types.BatchFrameSummary, 0) + for rows.Next() { + var stat types.BatchFrameSummary + err = rows.Scan(&stat.BatchID, &stat.NumInstances) + if err != nil { + ds.tdbLock.RUnlock() + return + } + stats = append(stats, stat) + } + ds.tdbLock.RUnlock() + + return stats, err +} + +// GetBatchFrameStatistics will show individual trace data per instance for a batch of trace data. +// The batch is identified by the label. +func (ds *Datastore) GetBatchFrameStatistics(label string) (stats []types.BatchFrameStat, err error) { + datastore := ds.getTableDB("frame_statistics") + + query := `WITH total AS + ( + SELECT id, + start_timestamp, + end_timestamp, + (julianday(end_timestamp) - julianday(start_timestamp)) * 24 * 60 * 60 AS total_elapsed + FROM frame_statistics + WHERE label = ? + ), + total_start AS + ( + SELECT trace_data.frame_id, + trace_data.ssntp_uuid, + (julianday(trace_data.tx_timestamp) - julianday(total.start_timestamp)) * 24 * 60 * 60 AS total_elapsed + FROM trace_data + JOIN total + WHERE rx_timestamp = '' and trace_data.frame_id = total.id + ), + total_end AS + ( + SELECT trace_data.frame_id, + trace_data.ssntp_uuid, + (julianday(total.end_timestamp) - julianday(trace_data.rx_timestamp)) * 24 * 60 * 60 AS total_elapsed + FROM trace_data + JOIN total + WHERE tx_timestamp = '' and trace_data.frame_id = total.id + ), + total_per_node AS + ( + SELECT trace_data.frame_id, + trace_data.ssntp_uuid, + (julianday(trace_data.tx_timestamp) - julianday(trace_data.rx_timestamp)) * 24 * 60 *60 AS total_elapsed + FROM trace_data + WHERE tx_timestamp != '' and rx_timestamp != '' + ), + diffs AS + ( + SELECT total.id AS id, + total.total_elapsed AS total_elapsed, + total_start.total_elapsed AS controller_elapsed, + total_end.total_elapsed AS launcher_elapsed, + total_per_node.total_elapsed AS scheduler_elapsed + FROM total + LEFT JOIN total_start + ON total.id = total_start.frame_id + LEFT JOIN total_end + ON total_start.frame_id = total_end.frame_id + LEFT JOIN total_per_node + ON total_start.frame_id = total_per_node.frame_id + ), + averages AS + ( + SELECT avg(diffs.total_elapsed) AS avg_total_elapsed, + avg(diffs.controller_elapsed) AS avg_controller, + avg(diffs.launcher_elapsed) AS avg_launcher, + avg(diffs.scheduler_elapsed) AS avg_scheduler + FROM diffs + ), + variance AS + ( + SELECT avg((total_start.total_elapsed - averages.avg_controller) * (total_start.total_elapsed - averages.avg_controller)) AS controller, + avg((total_end.total_elapsed - averages.avg_launcher) * (total_end.total_elapsed - averages.avg_launcher)) AS launcher, + avg((total_per_node.total_elapsed - averages.avg_scheduler) * (total_per_node.total_elapsed - averages.avg_scheduler)) AS scheduler + FROM total_start + LEFT JOIN total_end + ON total_start.frame_id = total_end.frame_id + LEFT JOIN total_per_node + ON total_start.frame_id = total_per_node.frame_id + JOIN averages + ) + SELECT count(total.id) AS num_instances, + (julianday(max(total.end_timestamp)) - julianday(min(total.start_timestamp))) * 24 * 60 * 60 AS total_elapsed, + averages.avg_total_elapsed AS average_total_elapsed, + averages.avg_controller AS average_controller_elapsed, + averages.avg_launcher AS average_launcher_elapsed, + averages.avg_scheduler AS average_scheduler_elapsed, + variance.controller AS controller_variance, + variance.launcher AS launcher_variance, + variance.scheduler AS scheduler_variance + FROM variance + JOIN total + JOIN averages;` + ds.tdbLock.RLock() + rows, err := datastore.Query(query, label) + if err != nil { + ds.tdbLock.RUnlock() + return nil, err + } + defer rows.Close() + + stats = make([]types.BatchFrameStat, 0) + for rows.Next() { + var stat types.BatchFrameStat + var numInstances sql.NullInt64 + var totalElapsed sql.NullFloat64 + var averageElapsed sql.NullFloat64 + var averageControllerElapsed sql.NullFloat64 + var averageLauncherElapsed sql.NullFloat64 + var averageSchedulerElapsed sql.NullFloat64 + var varianceController sql.NullFloat64 + var varianceLauncher sql.NullFloat64 + var varianceScheduler sql.NullFloat64 + + err = rows.Scan(&numInstances, &totalElapsed, &averageElapsed, &averageControllerElapsed, &averageLauncherElapsed, &averageSchedulerElapsed, &varianceController, &varianceLauncher, &varianceScheduler) + if err != nil { + ds.tdbLock.RUnlock() + return + } + if numInstances.Valid { + stat.NumInstances = int(numInstances.Int64) + } + if totalElapsed.Valid { + stat.TotalElapsed = totalElapsed.Float64 + } + if averageElapsed.Valid { + stat.AverageElapsed = averageElapsed.Float64 + } + if averageControllerElapsed.Valid { + stat.AverageControllerElapsed = averageControllerElapsed.Float64 + } + if averageLauncherElapsed.Valid { + stat.AverageLauncherElapsed = averageLauncherElapsed.Float64 + } + if averageSchedulerElapsed.Valid { + stat.AverageSchedulerElapsed = averageSchedulerElapsed.Float64 + } + if varianceController.Valid { + stat.VarianceController = varianceController.Float64 + } + if varianceLauncher.Valid { + stat.VarianceLauncher = varianceLauncher.Float64 + } + if varianceScheduler.Valid { + stat.VarianceScheduler = varianceScheduler.Float64 + } + stats = append(stats, stat) + } + + ds.tdbLock.RUnlock() + + return stats, err +} + +// GetNodes will retrieve a list of all the nodes that we have information on +func (ds *Datastore) GetNodes() (nodes []types.Node, err error) { + ds.nodesLock.RLock() + + for i := range ds.nodes { + nodes = append(nodes, ds.nodes[i].Node) + } + + ds.nodesLock.RUnlock() + + return nodes, nil +} diff --git a/ciao-controller/internal/datastore/datastore_test.go b/ciao-controller/internal/datastore/datastore_test.go new file mode 100644 index 000000000..b6cf2bd74 --- /dev/null +++ b/ciao-controller/internal/datastore/datastore_test.go @@ -0,0 +1,1927 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package datastore + +import ( + "database/sql" + "encoding/binary" + "errors" + "flag" + "fmt" + "github.com/01org/ciao/ciao-controller/types" + "github.com/01org/ciao/payloads" + "github.com/docker/distribution/uuid" + "net" + "os" + "testing" + "time" +) + +func newTenantHardwareAddr(ip net.IP) (hw net.HardwareAddr) { + buf := make([]byte, 6) + ipBytes := ip.To4() + buf[0] |= 2 + buf[1] = 0 + copy(buf[2:6], ipBytes) + hw = net.HardwareAddr(buf) + return +} + +func addTestInstance(tenant *types.Tenant, workload *types.Workload) (instance *types.Instance, err error) { + id := uuid.Generate() + + ip, err := ds.AllocateTenantIP(tenant.Id) + if err != nil { + return + } + + mac := newTenantHardwareAddr(ip) + + instance = &types.Instance{ + TenantId: tenant.Id, + WorkloadId: workload.Id, + State: payloads.Pending, + Id: id.String(), + CNCI: false, + IPAddress: ip.String(), + MACAddress: mac.String(), + } + + err = ds.AddInstance(instance) + if err != nil { + return + } + + resources := make(map[string]int) + rr := workload.Defaults + + for i := range rr { + resources[string(rr[i].Type)] = rr[i].Value + } + + err = ds.AddUsage(tenant.Id, instance.Id, resources) + + return +} + +func addTestTenant() (tenant *types.Tenant, err error) { + /* add a new tenant */ + tuuid := uuid.Generate() + tenant, err = ds.AddTenant(tuuid.String()) + if err != nil { + return + } + + // Add fake CNCI + err = ds.AddTenantCNCI(tuuid.String(), uuid.Generate().String(), tenant.CNCIMAC) + if err != nil { + return + } + err = ds.AddCNCIIP(tenant.CNCIMAC, "192.168.0.1") + if err != nil { + return + } + return +} + +func BenchmarkGetTenantResources(b *testing.B) { + /* add a new tenant */ + tuuid := uuid.Generate().String() + _, err := ds.AddTenant(tuuid) + if err != nil { + b.Error(err) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, err = ds.getTenantResources(tuuid) + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkAllocateTenantIP(b *testing.B) { + /* add a new tenant */ + tuuid := uuid.Generate().String() + _, err := ds.AddTenant(tuuid) + if err != nil { + b.Error(err) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, err = ds.AllocateTenantIP(tuuid) + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkGetAllInstances(b *testing.B) { + for n := 0; n < b.N; n++ { + _, err := ds.GetAllInstances() + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkGetTenantCNCI(b *testing.B) { + tenant, err := addTestTenant() + if err != nil { + b.Error(err) + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, _, _, err := ds.getTenantCNCI(tenant.Id) + if err != nil { + b.Error(err) + } + } +} + +func TestTenantCreate(t *testing.T) { + var err error + + /* add a new tenant */ + tuuid := uuid.Generate() + tenant, err := ds.AddTenant(tuuid.String()) + if err != nil { + t.Error(err) + } + tenant, err = ds.GetTenant(tuuid.String()) + if err != nil { + t.Error(err) + } + if tenant == nil { + t.Error(err) + } +} + +func TestGetWorkloads(t *testing.T) { + wls, err := ds.getWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } +} + +func TestAddInstance(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + _, err = addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } +} + +func TestDeleteInstance(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + instance, err := addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + + // update tenant Info + tenantBefore, err := ds.getTenant(tenant.Id) + if err != nil { + t.Error(err) + } + + resourcesBefore := make(map[string]int) + for i := range tenantBefore.Resources { + r := tenantBefore.Resources[i] + resourcesBefore[r.Rname] = r.Usage + } + + time.Sleep(1 * time.Second) + + err = ds.DeleteInstance(instance.Id) + if err != nil { + t.Error(err) + } + + tenantAfter, err := ds.getTenant(tenant.Id) + if err != nil { + t.Error(err) + } + + defaults := wls[0].Defaults + + usage := make(map[string]int) + for i := range defaults { + usage[string(defaults[i].Type)] = defaults[i].Value + } + + resourcesAfter := make(map[string]int) + for i := range tenantAfter.Resources { + r := tenantAfter.Resources[i] + resourcesAfter[r.Rname] = r.Usage + } + + // make sure usage was reduced by workload defaults values + for name, val := range resourcesAfter { + before := resourcesBefore[name] + delta := usage[name] + if val != before-delta { + t.Error("usage not reduced") + } + } + + ip := net.ParseIP(instance.IPAddress) + + ipBytes := ip.To4() + if ipBytes == nil { + t.Error(errors.New("Unable to convert ip to bytes")) + } + + subnetInt := binary.BigEndian.Uint16(ipBytes[1:3]) + + // confirm that tenant map shows it not used. + if tenantAfter.network[int(subnetInt)][int(ipBytes[3])] != false { + t.Error("IP Address not released from cache") + } + + time.Sleep(1 * time.Second) + + // clear tenant from cache + ds.tenantsLock.Lock() + delete(ds.tenants, tenant.Id) + ds.tenantsLock.Unlock() + + // get updated tenant info - should hit database + newTenant, err := ds.getTenant(tenant.Id) + if err != nil { + t.Error(err) + } + + // confirm that tenant map shows it not used. + if newTenant.network[int(subnetInt)][int(ipBytes[3])] != false { + t.Error("IP Address not released from database") + } +} + +func TestGetAllInstances(t *testing.T) { + instancesBefore, err := ds.GetAllInstances() + if err != nil { + t.Fatal(err) + } + + tenant, err := addTestTenant() + if err != nil { + t.Fatal(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Fatal(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + for i := 0; i < 10; i++ { + _, err = addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + } + + instances, err := ds.GetAllInstances() + if err != nil { + t.Fatal(err) + } + + if len(instances) != (len(instancesBefore) + 10) { + t.Fatal(err) + } +} + +func TestGetAllInstancesFromTenant(t *testing.T) { + var err error + + /* add a new tenant */ + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + for i := 0; i < 10; i++ { + _, err = addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + } + + // if we don't get 10 eventually, the test will timeout and fail + instances, err := ds.GetAllInstancesFromTenant(tenant.Id) + for len(instances) < 10 { + time.Sleep(1 * time.Second) + instances, err = ds.GetAllInstancesFromTenant(tenant.Id) + } + + if err != nil { + t.Error(err) + } + + if len(instances) < 10 { + t.Error("Didn't get right number of instances") + } +} + +func TestGetAllInstancesByNode(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + var instances []*types.Instance + + for i := 0; i < 10; i++ { + instance, err := addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + instances = append(instances, instance) + } + + var stats []payloads.InstanceStat + + for i := range instances { + stat := payloads.InstanceStat{ + InstanceUUID: instances[i].Id, + State: "running", + SSHIP: "192.168.0.1", + SSHPort: 34567, + MemoryUsageMB: 0, + DiskUsageMB: 0, + CPUUsage: 0, + } + stats = append(stats, stat) + } + stat := payloads.Stat{ + NodeUUID: uuid.Generate().String(), + MemTotalMB: 256, + MemAvailableMB: 256, + DiskTotalMB: 1024, + DiskAvailableMB: 1024, + Load: 20, + CpusOnline: 4, + NodeHostName: "test", + Instances: stats, + } + + err = ds.addNodeStat(stat) + if err != nil { + t.Error(err) + } + + err = ds.addInstanceStats(stats, stat.NodeUUID) + if err != nil { + t.Error(err) + } + + newInstances, err := ds.GetAllInstancesByNode(stat.NodeUUID) + if err != nil { + t.Error(err) + } + + retry := 5 + for len(newInstances) < len(instances) && retry > 0 { + retry-- + time.Sleep(1 * time.Second) + newInstances, err = ds.GetAllInstancesByNode(stat.NodeUUID) + if err != nil { + t.Error(err) + } + } + + if len(newInstances) != len(instances) { + msg := fmt.Sprintf("expected %d instances, got %d", len(instances), len(newInstances)) + t.Error(msg) + } +} + +func TestGetInstancesFromTenant(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + var instances []*types.Instance + + for i := 0; i < 10; i++ { + instance, err := addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + instances = append(instances, instance) + } + + var stats []payloads.InstanceStat + + for i := range instances { + stat := payloads.InstanceStat{ + InstanceUUID: instances[i].Id, + State: "running", + SSHIP: "192.168.0.1", + SSHPort: 34567, + MemoryUsageMB: 0, + DiskUsageMB: 0, + CPUUsage: 0, + } + stats = append(stats, stat) + } + stat := payloads.Stat{ + NodeUUID: uuid.Generate().String(), + MemTotalMB: 256, + MemAvailableMB: 256, + DiskTotalMB: 1024, + DiskAvailableMB: 1024, + Load: 20, + CpusOnline: 4, + NodeHostName: "test", + Instances: stats, + } + + err = ds.addNodeStat(stat) + if err != nil { + t.Error(err) + } + + err = ds.addInstanceStats(stats, stat.NodeUUID) + if err != nil { + t.Error(err) + } + + instance, err := ds.GetInstanceFromTenant(tenant.Id, instances[0].Id) + if err != nil && err != sql.ErrNoRows { + t.Error(err) + } + + for instance == nil { + time.Sleep(1 * time.Second) + instance, err = ds.GetInstanceFromTenant(tenant.Id, instances[0].Id) + if err != nil && err != sql.ErrNoRows { + t.Error(err) + } + } + // check contents of instance for correctness - TBD +} + +func TestGetInstanceInfo(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + instance, err := addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + + time.Sleep(1 * time.Second) + + nodeID, state, err := ds.GetInstanceInfo(instance.Id) + if err != nil { + t.Error(err) + } + + if nodeID != "" { + t.Error(errors.New("Expected NULL nodeID")) + } + + if state != "pending" { + t.Error(errors.New("Expected pending state")) + } + + // add some stats and retest + var stats []payloads.InstanceStat + + istat := payloads.InstanceStat{ + InstanceUUID: instance.Id, + State: "running", + SSHIP: "192.168.0.1", + SSHPort: 34567, + MemoryUsageMB: 0, + DiskUsageMB: 0, + CPUUsage: 0, + } + stats = append(stats, istat) + + stat := payloads.Stat{ + NodeUUID: uuid.Generate().String(), + MemTotalMB: 256, + MemAvailableMB: 256, + DiskTotalMB: 1024, + DiskAvailableMB: 1024, + Load: 20, + CpusOnline: 4, + NodeHostName: "test", + Instances: stats, + } + + err = ds.addNodeStat(stat) + if err != nil { + t.Error(err) + } + + err = ds.addInstanceStats(stats, stat.NodeUUID) + if err != nil { + t.Error(err) + } + + nodeID, state, err = ds.GetInstanceInfo(instance.Id) + if err != nil { + t.Error(err) + } + + if nodeID != stat.NodeUUID { + t.Error("retrieved incorrect NodeID") + } + + if state != "running" { + t.Error("retrieved incorrect state") + } + + // now clear instance cache to exercise sql + ds.instanceLastStatLock.Lock() + delete(ds.instanceLastStat, instance.Id) + ds.instanceLastStatLock.Unlock() + + nodeID, state, err = ds.GetInstanceInfo(instance.Id) + if err != nil { + t.Error(err) + } + + if nodeID != stat.NodeUUID { + t.Error("retrieved incorrect NodeID") + } + + if state != "running" { + t.Error("retrieved incorrect state") + } +} + +func TestHandleStats(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + var instances []*types.Instance + + for i := 0; i < 10; i++ { + instance, err := addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + instances = append(instances, instance) + } + + var stats []payloads.InstanceStat + + for i := range instances { + stat := payloads.InstanceStat{ + InstanceUUID: instances[i].Id, + State: "running", + SSHIP: "192.168.0.1", + SSHPort: 34567, + MemoryUsageMB: 0, + DiskUsageMB: 0, + CPUUsage: 0, + } + stats = append(stats, stat) + } + stat := payloads.Stat{ + NodeUUID: uuid.Generate().String(), + MemTotalMB: 256, + MemAvailableMB: 256, + DiskTotalMB: 1024, + DiskAvailableMB: 1024, + Load: 20, + CpusOnline: 4, + NodeHostName: "test", + Instances: stats, + } + + err = ds.HandleStats(stat) + if err != nil { + t.Error(err) + } + + time.Sleep(1 * time.Second) + + // check instance stats recorded + for i := range stats { + id := stats[i].InstanceUUID + nodeID, state, err := ds.GetInstanceInfo(id) + if err != nil { + t.Error(err) + } + + if nodeID != stat.NodeUUID { + t.Error("Incorrect NodeID in stats table") + } + + if state != "running" { + t.Error("state not updated") + } + } + + // check node stats recorded + end := time.Now().UTC() + start := end.Add(-20 * time.Minute) + + statsRows, err := ds.GetNodeStats(start, end) + if err != nil { + t.Fatal(err) + } + + for i := range statsRows { + if statsRows[i].NodeId == stat.NodeUUID { + return + } + } + t.Error("node stat not found") +} + +func TestGetInstanceLastStats(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + var instances []*types.Instance + + for i := 0; i < 10; i++ { + instance, err := addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + instances = append(instances, instance) + } + + var stats []payloads.InstanceStat + + for i := range instances { + stat := payloads.InstanceStat{ + InstanceUUID: instances[i].Id, + State: "running", + SSHIP: "192.168.0.1", + SSHPort: 34567, + MemoryUsageMB: 0, + DiskUsageMB: 0, + CPUUsage: 0, + } + stats = append(stats, stat) + } + stat := payloads.Stat{ + NodeUUID: uuid.Generate().String(), + MemTotalMB: 256, + MemAvailableMB: 256, + DiskTotalMB: 1024, + DiskAvailableMB: 1024, + Load: 20, + CpusOnline: 4, + NodeHostName: "test", + Instances: stats, + } + + err = ds.HandleStats(stat) + if err != nil { + t.Error(err) + } + + time.Sleep(1 * time.Second) + + serverStats := ds.GetInstanceLastStats(stat.NodeUUID) + + if len(serverStats.Servers) != len(instances) { + t.Error("Not enough instance stats retrieved") + } +} + +func TestGetNodeLastStats(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + var instances []*types.Instance + + for i := 0; i < 10; i++ { + instance, err := addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + instances = append(instances, instance) + } + + var stats []payloads.InstanceStat + + for i := range instances { + stat := payloads.InstanceStat{ + InstanceUUID: instances[i].Id, + State: "running", + SSHIP: "192.168.0.1", + SSHPort: 34567, + MemoryUsageMB: 0, + DiskUsageMB: 0, + CPUUsage: 0, + } + stats = append(stats, stat) + } + stat := payloads.Stat{ + NodeUUID: uuid.Generate().String(), + MemTotalMB: 256, + MemAvailableMB: 256, + DiskTotalMB: 1024, + DiskAvailableMB: 1024, + Load: 20, + CpusOnline: 4, + NodeHostName: "test", + Instances: stats, + } + + err = ds.HandleStats(stat) + if err != nil { + t.Error(err) + } + + time.Sleep(1 * time.Second) + + computeNodes := ds.GetNodeLastStats() + + // how many compute Nodes should be here? If we want to + // control we need to clear out previous test stats + if len(computeNodes.Nodes) == 0 { + t.Error("Not enough compute Nodes found") + } +} + +func TestGetBatchFrameStatistics(t *testing.T) { + var nodes []payloads.SSNTPNode + for i := 0; i < 3; i++ { + node := payloads.SSNTPNode{ + SSNTPUUID: uuid.Generate().String(), + SSNTPRole: "test", + TxTimestamp: time.Now().Format(time.RFC3339Nano), + RxTimestamp: time.Now().Format(time.RFC3339Nano), + } + nodes = append(nodes, node) + } + + var frames []payloads.FrameTrace + for i := 0; i < 3; i++ { + stat := payloads.FrameTrace{ + Label: "batch_frame_test", + Type: "type", + Operand: "operand", + StartTimestamp: time.Now().Format(time.RFC3339Nano), + EndTimestamp: time.Now().Format(time.RFC3339Nano), + Nodes: nodes, + } + frames = append(frames, stat) + } + + trace := payloads.Trace{ + Frames: frames, + } + + err := ds.HandleTraceReport(trace) + if err != nil { + t.Error(err) + } + + _, err = ds.GetBatchFrameStatistics("batch_frame_test") + if err != nil { + t.Errorf(err.Error()) + } +} + +func TestGetBatchFrameSummary(t *testing.T) { + var nodes []payloads.SSNTPNode + for i := 0; i < 3; i++ { + node := payloads.SSNTPNode{ + SSNTPUUID: uuid.Generate().String(), + SSNTPRole: "test", + TxTimestamp: time.Now().Format(time.RFC3339Nano), + RxTimestamp: time.Now().Format(time.RFC3339Nano), + } + nodes = append(nodes, node) + } + + var frames []payloads.FrameTrace + for i := 0; i < 3; i++ { + stat := payloads.FrameTrace{ + Label: "batch_summary_test", + Type: "type", + Operand: "operand", + StartTimestamp: time.Now().Format(time.RFC3339Nano), + EndTimestamp: time.Now().Format(time.RFC3339Nano), + Nodes: nodes, + } + frames = append(frames, stat) + } + + trace := payloads.Trace{ + Frames: frames, + } + + err := ds.HandleTraceReport(trace) + if err != nil { + t.Error(err) + } + + _, err = ds.GetBatchFrameSummary() + if err != nil { + t.Errorf(err.Error()) + } +} + +func TestGetFrameStatistics(t *testing.T) { + var nodes []payloads.SSNTPNode + for i := 0; i < 3; i++ { + node := payloads.SSNTPNode{ + SSNTPUUID: uuid.Generate().String(), + SSNTPRole: "test", + TxTimestamp: time.Now().Format(time.RFC3339Nano), + RxTimestamp: time.Now().Format(time.RFC3339Nano), + } + nodes = append(nodes, node) + } + + var frames []payloads.FrameTrace + for i := 0; i < 3; i++ { + stat := payloads.FrameTrace{ + Label: "test", + Type: "type", + Operand: "operand", + StartTimestamp: time.Now().Format(time.RFC3339Nano), + EndTimestamp: time.Now().Format(time.RFC3339Nano), + Nodes: nodes, + } + frames = append(frames, stat) + } + + trace := payloads.Trace{ + Frames: frames, + } + + err := ds.HandleTraceReport(trace) + if err != nil { + t.Error(err) + } + + _, err = ds.GetFrameStatistics("test") + if err != nil { + t.Errorf(err.Error()) + } +} + +func TestGetNodeSummary(t *testing.T) { + _, err := ds.GetNodeSummary() + if err != nil { + t.Errorf(err.Error()) + } +} + +func TestGetNodeStats(t *testing.T) { + endTime := time.Now() + startTime := endTime.Add(-20 * time.Minute) + + _, err := ds.GetNodeStats(startTime, endTime) + if err != nil { + t.Errorf(err.Error()) + } +} + +func TestClearNodeStats(t *testing.T) { + err := ds.ClearNodeStats(time.Now()) + if err != nil { + t.Errorf(err.Error()) + } +} + +func TestGetEventLog(t *testing.T) { + err := ds.logEvent("test-tenantID", "info", "this is a test") + if err != nil { + t.Errorf(err.Error()) + } + + _, err = ds.GetEventLog() + if err != nil { + t.Errorf(err.Error()) + } +} + +func TestLogEvent(t *testing.T) { + err := ds.logEvent("test-tenantID", "info", "this is a test") + if err != nil { + t.Errorf(err.Error()) + } +} + +func TestClearLog(t *testing.T) { + err := ds.ClearLog() + if err != nil { + t.Errorf(err.Error()) + } +} + +func TestAddFrameStat(t *testing.T) { + var nodes []payloads.SSNTPNode + for i := 0; i < 3; i++ { + node := payloads.SSNTPNode{ + SSNTPUUID: uuid.Generate().String(), + SSNTPRole: "test", + TxTimestamp: time.Now().Format(time.RFC3339Nano), + RxTimestamp: time.Now().Format(time.RFC3339Nano), + } + nodes = append(nodes, node) + } + + stat := payloads.FrameTrace{ + Label: "test", + Type: "type", + Operand: "operand", + StartTimestamp: time.Now().Format(time.RFC3339Nano), + EndTimestamp: time.Now().Format(time.RFC3339Nano), + Nodes: nodes, + } + err := ds.addFrameStat(stat) + if err != nil { + t.Error(err) + } +} + +func TestAddInstanceStats(t *testing.T) { + var stats []payloads.InstanceStat + + for i := 0; i < 3; i++ { + stat := payloads.InstanceStat{ + InstanceUUID: uuid.Generate().String(), + State: "running", + SSHIP: "192.168.0.1", + SSHPort: 34567, + MemoryUsageMB: 0, + DiskUsageMB: 0, + CPUUsage: 0, + } + stats = append(stats, stat) + } + + nodeID := uuid.Generate().String() + + err := ds.addInstanceStats(stats, nodeID) + if err != nil { + t.Error(err) + } +} + +func TestAddNodeStats(t *testing.T) { + var stats []payloads.InstanceStat + + for i := 0; i < 3; i++ { + stat := payloads.InstanceStat{ + InstanceUUID: uuid.Generate().String(), + State: "running", + SSHIP: "192.168.0.1", + SSHPort: 34567, + MemoryUsageMB: 0, + DiskUsageMB: 0, + CPUUsage: 0, + } + stats = append(stats, stat) + } + stat := payloads.Stat{ + NodeUUID: uuid.Generate().String(), + MemTotalMB: 256, + MemAvailableMB: 256, + DiskTotalMB: 1024, + DiskAvailableMB: 1024, + Load: 20, + CpusOnline: 4, + NodeHostName: "test", + Instances: stats, + } + + err := ds.addNodeStat(stat) + if err != nil { + t.Error(err) + } +} + +func TestAllocateTenantIP(t *testing.T) { + /* add a new tenant */ + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + ip, err := ds.AllocateTenantIP(tenant.Id) + if err != nil { + t.Error(err) + } + + // this should hit cache + newTenant, err := ds.getTenant(tenant.Id) + if err != nil { + t.Error(err) + } + + ipBytes := ip.To4() + if ipBytes == nil { + t.Error(errors.New("Unable to convert ip to bytes")) + } + + subnetInt := int(binary.BigEndian.Uint16(ipBytes[1:3])) + host := int(ipBytes[3]) + + if newTenant.network[subnetInt][host] != true { + t.Error("IP Address not claimed in cache") + } + + time.Sleep(5 * time.Second) + + // clear out cache + ds.tenantsLock.Lock() + delete(ds.tenants, tenant.Id) + ds.tenantsLock.Unlock() + + // this should not hit cache + newTenant, err = ds.getTenant(tenant.Id) + if err != nil { + t.Error(err) + } + + if newTenant.network[subnetInt][host] != true { + t.Error("IP Address not claimed in database") + } +} + +func TestNonOverlappingTenantIP(t *testing.T) { + /* add a new tenant */ + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + ip1, err := ds.AllocateTenantIP(tenant.Id) + if err != nil { + t.Error(err) + } + + tenant, err = addTestTenant() + if err != nil { + t.Error(err) + } + + ip2, err := ds.AllocateTenantIP(tenant.Id) + if err != nil { + t.Error(err) + } + + // make sure the subnet for ip1 and ip2 don't match + b1 := ip1.To4() + subnetInt1 := binary.BigEndian.Uint16(b1[1:3]) + b2 := ip2.To4() + subnetInt2 := binary.BigEndian.Uint16(b2[1:3]) + if subnetInt1 == subnetInt2 { + t.Error(errors.New("Tenant subnets must not overlap")) + } +} + +func TestGetCNCIWorkloadID(t *testing.T) { + _, err := ds.GetCNCIWorkloadID() + if err != nil { + t.Error(err) + } +} + +func TestGetConfig(t *testing.T) { + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if len(wls) == 0 { + t.Fatal("No Workloads Found") + } + + _, err = ds.getConfig(wls[0].Id) + if err != nil { + t.Error(err) + } +} + +func TestGetImageInfo(t *testing.T) { + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if wls == nil { + t.Errorf("No Workloads to Test") + } + + wl := wls[0] + + // this should hit cache + _, _, err = ds.getImageInfo(wl.Id) + if err != nil { + t.Error(err) + } + + // clear out of cache to exercise sql + ds.workloadsLock.Lock() + delete(ds.workloads, wl.Id) + ds.workloadsLock.Unlock() + + _, _, err = ds.getImageInfo(wl.Id) + if err != nil { + t.Error(err) + } + + // put it back in the cache + work, err := ds.getWorkload(wl.Id) + if err != nil { + t.Fatal(err) + } + + ds.workloadsLock.Lock() + ds.workloads[wl.Id] = work + ds.workloadsLock.Unlock() +} + +func TestGetWorkloadDefaults(t *testing.T) { + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + if wls == nil { + t.Error("No Workloads to test") + } + + wl := wls[0] + + // this should hit the cache + _, err = ds.getWorkloadDefaults(wl.Id) + if err != nil { + t.Error(err) + } + + // clear cache to exercise sql + ds.workloadsLock.Lock() + delete(ds.workloads, wl.Id) + ds.workloadsLock.Unlock() + + // this should not hit the cache + _, err = ds.getWorkloadDefaults(wl.Id) + if err != nil { + t.Error(err) + } + + // put it back in the cache + work, err := ds.getWorkload(wl.Id) + if err != nil { + t.Fatal(err) + } + + ds.workloadsLock.Lock() + ds.workloads[wl.Id] = work + ds.workloadsLock.Unlock() +} + +func TestAddLimit(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + /* put tenant limit of 1 instance */ + err = ds.AddLimit(tenant.Id, 1, 1) + if err != nil { + t.Error(err) + } + + // make sure cache was updated + ds.tenantsLock.Lock() + t2 := ds.tenants[tenant.Id] + delete(ds.tenants, tenant.Id) + ds.tenantsLock.Unlock() + + for i := range t2.Resources { + if t2.Resources[i].Rtype == 1 { + if t2.Resources[i].Limit != 1 { + t.Error(err) + } + } + } + + // make sure datastore was updated + t3, err := ds.GetTenant(tenant.Id) + for i := range t3.Resources { + if t3.Resources[i].Rtype == 1 { + if t3.Resources[i].Limit != 1 { + t.Error(err) + } + } + } +} + +func TestGetTenantResources(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + // this should hit the cache + _, err = ds.getTenantResources(tenant.Id) + if err != nil { + t.Error(err) + } + + // clear cached + ds.tenantsLock.Lock() + delete(ds.tenants, tenant.Id) + ds.tenantsLock.Unlock() + + _, err = ds.getTenantResources(tenant.Id) + if err != nil { + t.Error(err) + } +} + +func TestGetTenantCNCI(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + // this will hit cache + id, ip, mac, err := ds.getTenantCNCI(tenant.Id) + if err != nil { + t.Error(err) + } + if id != tenant.CNCIID || ip != tenant.CNCIIP || mac != tenant.CNCIMAC { + t.Error(err) + } + + // clear cached + ds.tenantsLock.Lock() + delete(ds.tenants, tenant.Id) + ds.tenantsLock.Unlock() + + // exercise sql + id, ip, mac, err = ds.getTenantCNCI(tenant.Id) + if err != nil { + t.Error(err) + } + if id != tenant.CNCIID || ip != tenant.CNCIIP || mac != tenant.CNCIMAC { + t.Error(err) + } +} + +func TestRemoveTenantCNCI(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + err = ds.removeTenantCNCI(tenant.Id, tenant.CNCIID) + if err != nil { + t.Error(err) + } + + // make sure cache was updated + ds.tenantsLock.Lock() + t2 := ds.tenants[tenant.Id] + delete(ds.tenants, tenant.Id) + ds.tenantsLock.Unlock() + + if t2.CNCIID != "" || t2.CNCIIP != "" { + t.Error("Cache Not Updated") + } + + // check database was updated + testTenant, err := ds.GetTenant(tenant.Id) + if err != nil { + t.Error(err) + } + if testTenant.CNCIID != "" || testTenant.CNCIIP != "" { + t.Error("Database not updated") + } +} + +func TestGetCNCITenant(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + tenantID, err := ds.getCNCITenant(tenant.CNCIID) + if err != nil { + t.Error(err) + } + if tenantID != tenant.Id { + t.Error("Did not retrieve correct tenant") + } +} + +func TestIsInstanceCNCI(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + ok, err := ds.isInstanceCNCI(tenant.CNCIID) + if err != nil { + t.Error(err) + } + if !ok { + t.Error("Instance should have been a CNCI") + } +} + +func TestGetTenant(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + testTenant, err := ds.GetTenant(tenant.Id) + if err != nil { + t.Error(err) + } + if testTenant.Id != tenant.Id { + t.Error(err) + } +} + +func TestGetAllTenants(t *testing.T) { + _, err := ds.GetAllTenants() + if err != nil { + t.Error(err) + } + // for now, just check that the query has no + // errors. +} + +func TestAddCNCIIP(t *testing.T) { + /* add a new tenant */ + tuuid := uuid.Generate() + tenant, err := ds.AddTenant(tuuid.String()) + if err != nil { + t.Error(err) + } + + // Add fake CNCI + err = ds.AddTenantCNCI(tenant.Id, uuid.Generate().String(), tenant.CNCIMAC) + if err != nil { + t.Error(err) + } + + // make sure that AddCNCIIP signals the channel it's supposed to + c := make(chan bool) + ds.cnciAddedLock.Lock() + ds.cnciAddedChans[tenant.Id] = c + ds.cnciAddedLock.Unlock() + + go func() { + err := ds.AddCNCIIP(tenant.CNCIMAC, "192.168.0.1") + if err != nil { + t.Error(err) + } + }() + + success := <-c + if !success { + t.Error(err) + } + + // confirm that the channel was cleared + ds.cnciAddedLock.Lock() + c = ds.cnciAddedChans[tenant.Id] + ds.cnciAddedLock.Unlock() + if c != nil { + t.Error(err) + } +} + +func TestHandleTraceReport(t *testing.T) { + var nodes []payloads.SSNTPNode + for i := 0; i < 3; i++ { + node := payloads.SSNTPNode{ + SSNTPUUID: uuid.Generate().String(), + SSNTPRole: "test", + TxTimestamp: time.Now().Format(time.RFC3339Nano), + RxTimestamp: time.Now().Format(time.RFC3339Nano), + } + nodes = append(nodes, node) + } + + var frames []payloads.FrameTrace + for i := 0; i < 3; i++ { + stat := payloads.FrameTrace{ + Label: "test", + Type: "type", + Operand: "operand", + StartTimestamp: time.Now().Format(time.RFC3339Nano), + EndTimestamp: time.Now().Format(time.RFC3339Nano), + Nodes: nodes, + } + frames = append(frames, stat) + } + + trace := payloads.Trace{ + Frames: frames, + } + + err := ds.HandleTraceReport(trace) + if err != nil { + t.Error(err) + } +} + +func TestGetCNCISummary(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + // test without null cnciid + _, err = ds.GetTenantCNCISummary(tenant.CNCIID) + if err != nil { + t.Error(err) + } + + // test with null cnciid + _, err = ds.GetTenantCNCISummary("") + if err != nil { + t.Error(err) + } + +} + +func TestReleaseTenantIP(t *testing.T) { + /* add a new tenant */ + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + ip, err := ds.AllocateTenantIP(tenant.Id) + if err != nil { + t.Error(err) + } + ipBytes := ip.To4() + if ipBytes == nil { + t.Error(errors.New("Unable to convert ip to bytes")) + } + subnetInt := binary.BigEndian.Uint16(ipBytes[1:3]) + + // get updated tenant info + newTenant, err := ds.getTenant(tenant.Id) + if err != nil { + t.Error(err) + } + + // confirm that tenant map shows it used. + if newTenant.network[int(subnetInt)][int(ipBytes[3])] != true { + t.Error("IP Address not marked Used") + } + + time.Sleep(1 * time.Second) + + err = ds.ReleaseTenantIP(tenant.Id, ip.String()) + if err != nil { + t.Error(err) + } + + // get updated tenant info - should hit cache + newTenant, err = ds.getTenant(tenant.Id) + if err != nil { + t.Error(err) + } + + // confirm that tenant map shows it not used. + if newTenant.network[int(subnetInt)][int(ipBytes[3])] != false { + t.Error("IP Address not released from cache") + } + + time.Sleep(1 * time.Second) + + // clear tenant from cache + ds.tenantsLock.Lock() + delete(ds.tenants, tenant.Id) + ds.tenantsLock.Unlock() + + // get updated tenant info - should hit database + newTenant, err = ds.getTenant(tenant.Id) + if err != nil { + t.Error(err) + } + + // confirm that tenant map shows it not used. + if newTenant.network[int(subnetInt)][int(ipBytes[3])] != false { + t.Error("IP Address not released from database") + } +} + +func TestAddTenantChan(t *testing.T) { + c := make(chan bool) + + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + ds.AddTenantChan(c, tenant.Id) + + // check cncisAddedChans + ds.cnciAddedLock.Lock() + c1 := ds.cnciAddedChans[tenant.Id] + delete(ds.cnciAddedChans, tenant.Id) + ds.cnciAddedLock.Unlock() + + if c1 != c { + t.Error("Did not update Added Chans properly") + } +} + +func TestGetWorkload(t *testing.T) { + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + wl, err := ds.GetWorkload(wls[0].Id) + if err != nil { + t.Error(err) + } + + if wl != wls[0] { + t.Error("Did not get correct workload") + } + + // clear cache to exercise sql + // clear out of cache to exercise sql + ds.workloadsLock.Lock() + delete(ds.workloads, wl.Id) + ds.workloadsLock.Unlock() + + wl2, err := ds.GetWorkload(wls[0].Id) + if err != nil { + t.Error(err) + } + + if wl2.Id != wl.Id { + t.Error("Did not get correct workload from db") + } + + // put it back in the cache + work, err := ds.getWorkload(wl.Id) + if err != nil { + t.Fatal(err) + } + + ds.workloadsLock.Lock() + ds.workloads[wl.Id] = work + ds.workloadsLock.Unlock() +} + +func TestRestartFailure(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + instance, err := addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + + time.Sleep(1 * time.Second) + reason := payloads.RestartNoInstance + + err = ds.RestartFailure(instance.Id, reason) + if err != nil { + t.Error(err) + } +} + +func TestStopFailure(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + instance, err := addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + + time.Sleep(1 * time.Second) + reason := payloads.StopNoInstance + + err = ds.StopFailure(instance.Id, reason) + if err != nil { + t.Error(err) + } +} + +func TestStartFailureFullCloud(t *testing.T) { + tenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + wls, err := ds.GetWorkloads() + if err != nil { + t.Error(err) + } + + instance, err := addTestInstance(tenant, wls[0]) + if err != nil { + t.Error(err) + } + + time.Sleep(1 * time.Second) + + tenantBefore, err := ds.GetTenant(tenant.Id) + if err != nil { + t.Error(err) + } + + resourcesBefore := make(map[string]int) + for i := range tenantBefore.Resources { + r := tenantBefore.Resources[i] + resourcesBefore[r.Rname] = r.Usage + } + + reason := payloads.FullCloud + + err = ds.StartFailure(instance.Id, reason) + if err != nil { + t.Error(err) + } + + tenantAfter, err := ds.GetTenant(tenant.Id) + if err != nil { + t.Error(err) + } + + defaults := wls[0].Defaults + + usage := make(map[string]int) + for i := range defaults { + usage[string(defaults[i].Type)] = defaults[i].Value + } + + resourcesAfter := make(map[string]int) + for i := range tenantAfter.Resources { + r := tenantAfter.Resources[i] + resourcesAfter[r.Rname] = r.Usage + } + + // make sure usage was reduced by workload defaults values + for name, val := range resourcesAfter { + before := resourcesBefore[name] + delta := usage[name] + if val != before-delta { + t.Error("usage not reduced") + } + } +} + +func testAllocateTenantIPs(t *testing.T, nIPs int) { + nIPsPerSubnet := 253 + + newTenant, err := addTestTenant() + if err != nil { + t.Error(err) + } + + // make this tenant have some network hosts assigned to them. + for n := 0; n < nIPs; n++ { + _, err = ds.AllocateTenantIP(newTenant.Id) + if err != nil { + t.Error(err) + } + } + + // get private tenant type + tenant, err := ds.getTenant(newTenant.Id) + + if len(tenant.subnets) != (nIPs/nIPsPerSubnet)+1 { + t.Error("Too many subnets created") + } + + for i, subnet := range tenant.subnets { + if ((i + 1) * nIPsPerSubnet) < nIPs { + if len(tenant.network[subnet]) != nIPsPerSubnet { + t.Error("Missing IPs") + } + } else { + if len(tenant.network[subnet]) != nIPs%nIPsPerSubnet { + t.Error("Missing IPs") + } + } + } +} + +func TestAllocate100IPs(t *testing.T) { + testAllocateTenantIPs(t, 100) +} + +func TestAllocate1024IPs(t *testing.T) { + testAllocateTenantIPs(t, 1024) +} + +func TestGetNodes(t *testing.T) { + startNodes, err := ds.GetNodes() + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + stat := payloads.Stat{ + NodeUUID: uuid.Generate().String(), + MemTotalMB: 256, + MemAvailableMB: 256, + DiskTotalMB: 1024, + DiskAvailableMB: 1024, + Load: 20, + CpusOnline: 4, + NodeHostName: "test", + } + err := ds.addNodeStat(stat) + if err != nil { + t.Fatal(err) + } + } + + nodes, err := ds.GetNodes() + if err != nil { + t.Fatal(err) + } + + if len(nodes) != len(startNodes)+10 { + msg := fmt.Sprintf("expected %d nodes, got %d", len(startNodes)+10, len(nodes)) + t.Fatal(msg) + } +} + +var ds *Datastore + +var tablesInitPath = flag.String("tables_init_path", ".", "path to csv files") +var workloadsPath = flag.String("workloads_path", ".", "path to yaml files") + +func TestMain(m *testing.M) { + flag.Parse() + + ds = new(Datastore) + + err := ds.Connect("./ciao-controller-test.db", "./ciao-controller-test-tdb.db") + if err != nil { + os.Exit(1) + } + + err = ds.Init(*tablesInitPath, *workloadsPath) + if err != nil { + os.Exit(1) + } + + code := m.Run() + + ds.Disconnect() + os.Remove("./ciao-controller-test.db") + os.Remove("./ciao-controller-test.db-wal") + os.Remove("./ciao-controller-test.db-shm") + os.Remove("./ciao-controller-test-tdb.db") + os.Remove("./ciao-controller-test-tdb.db-wal") + os.Remove("./ciao-controller-test-tdb.db-shm") + + os.Exit(code) +} diff --git a/ciao-controller/login.gtpl b/ciao-controller/login.gtpl new file mode 100644 index 000000000..c74d2825d --- /dev/null +++ b/ciao-controller/login.gtpl @@ -0,0 +1,148 @@ + + + + + + + + + + +
+
+
    +
  • + +
  • +
  • + +
  • +
  • + +
  • +
+
+
+ + diff --git a/ciao-controller/main.go b/ciao-controller/main.go new file mode 100644 index 000000000..5203cccb2 --- /dev/null +++ b/ciao-controller/main.go @@ -0,0 +1,128 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "flag" + datastore "github.com/01org/ciao/ciao-controller/internal/datastore" + "github.com/01org/ciao/ssntp" + "github.com/golang/glog" + "os" + "sync" +) + +type controller struct { + client *ssntpClient + ds *datastore.Datastore + id *identity +} + +var cert = flag.String("cert", "/etc/pki/ciao/cert-client-localhost.pem", "Client certificate") +var caCert = flag.String("cacert", "/etc/pki/ciao/CAcert-server-localhost.pem", "CA certificate") +var serverURL = flag.String("url", "localhost", "Server URL") +var identityURL = flag.String("identity", "identity:35357", "Keystone URL") +var serviceUser = flag.String("username", "nova", "Openstack Service Username") +var servicePassword = flag.String("password", "nova", "Openstack Service Username") +var port = flag.Int("port", 8889, "http port") +var computeAPIPort = flag.Int("computeport", openstackComputeAPIPort, "Openstack Compute API port") +var httpsCAcert = flag.String("httpscert", "/etc/pki/ciao/ciao-controller-cacert.pem", "HTTPS CA certificate") +var httpsKey = flag.String("httpskey", "/etc/pki/ciao/ciao-controller-key.pem", "HTTPS cert key") +var tablesInitPath = flag.String("tables_init_path", ".", "path to csv files") +var workloadsPath = flag.String("workloads_path", ".", "path to yaml files") +var noNetwork = flag.Bool("nonetwork", false, "Debug with no networking") +var debugUI = flag.Bool("debug_ui", true, "Create Debug web UI") +var persistentDatastoreLocation = flag.String("database_path", "./ciao-controller.db", "path to persistent database") +var transientDatastoreLocation = flag.String("stats_path", "/tmp/ciao-controller-stats.db", "path to stats database") +var logDir = "/var/lib/ciao/logs/controller" + +func init() { + flag.Parse() + + logDirFlag := flag.Lookup("log_dir") + if logDirFlag == nil { + glog.Errorf("log_dir does not exist") + return + } + + if logDirFlag.Value.String() == "" { + logDirFlag.Value.Set(logDir) + } + + if err := os.MkdirAll(logDirFlag.Value.String(), 0755); err != nil { + glog.Errorf("Unable to create log directory (%s) %v", logDir, err) + return + } +} + +func main() { + var wg sync.WaitGroup + var err error + + context := new(controller) + context.ds = new(datastore.Datastore) + + err = context.ds.Connect(*persistentDatastoreLocation, *transientDatastoreLocation) + if err != nil { + glog.Fatalf("unable to connect to datastore: %s", err) + return + } + + err = context.ds.Init(*tablesInitPath, *workloadsPath) + if err != nil { + glog.Fatalf("unable to Init datastore: %s", err) + return + } + + config := &ssntp.Config{ + URI: *serverURL, + CAcert: *caCert, + Cert: *cert, + Role: ssntp.Controller, + Log: ssntp.Log, + } + + context.client, err = newSSNTPClient(context, config) + if err != nil { + // spawn some retry routine? + glog.Fatalf("unable to connect to SSNTP server") + return + } + + idConfig := identityConfig{ + endpoint: *identityURL, + serviceUserName: *serviceUser, + servicePassword: *servicePassword, + } + + context.id, err = newIdentityClient(idConfig) + if err != nil { + glog.Fatal("Unable to authenticate to Keystone: ", err) + return + } + + if *debugUI { + wg.Add(1) + go createDebugInterface(context) + } + + wg.Add(1) + go createComputeAPI(context) + + wg.Wait() + context.ds.Disconnect() + context.client.Disconnect() +} diff --git a/ciao-controller/resources.csv b/ciao-controller/resources.csv new file mode 100644 index 000000000..2bd4f9a8a --- /dev/null +++ b/ciao-controller/resources.csv @@ -0,0 +1,6 @@ +1, instances +2, vcpus +3, mem_mb +4, disk_mb +5, network_node + diff --git a/ciao-controller/stats.gtpl b/ciao-controller/stats.gtpl new file mode 100644 index 000000000..0e7beff59 --- /dev/null +++ b/ciao-controller/stats.gtpl @@ -0,0 +1,421 @@ + + + + + + + + + +
+
+
+
+
+
+

Tracing

+ +
+
+
+
+

Admin Menu

+
+ + + + +
+
+
+

Summary

+
+
+
+

Event Log

+
+
+
+

Networking

+
+
+
+

Instances

+
+
+ + diff --git a/ciao-controller/tenantDebug.gtpl b/ciao-controller/tenantDebug.gtpl new file mode 100644 index 000000000..04a8c27d3 --- /dev/null +++ b/ciao-controller/tenantDebug.gtpl @@ -0,0 +1,270 @@ + + + + + + + + + +
+

ciao

+ Logged in as:
Placeholder
+
+
+

Launch Instance

+
+
    +
  • + +
  • +
  • + +
  • +
  • + +
  • +
  • + +
  • +
  • + +
  • +
  • + + + +
  • +
+
+
+ + diff --git a/ciao-controller/test.yaml b/ciao-controller/test.yaml new file mode 100644 index 000000000..4a6b076aa --- /dev/null +++ b/ciao-controller/test.yaml @@ -0,0 +1,13 @@ +--- +#cloud-config +password: hello +chpasswd: { expire: False } +users: + - name: ciao + gecos: CIAO Rules + lock-passwd: false + passwd: \\$1\\$vzmNmLLD\\$04bivxcjdXRzZLUd.enRl1 + sudo: ciao ALL=(ALL) NOPASSWD:ALL + ssh-authorized-keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDerQfD+qkb0V0XdQs8SBWqy4sQmqYFP96n/kI4Cq162w4UE8pTxy0ozAPldOvBJjljMvgaNKSAddknkhGcrNUvvJsUcZFm2qkafi32WyBdGFvIc45A+8O7vsxPXgHEsS9E3ylEALXAC3D0eX7pPtRiAbasLlY+VcACRqr3bPDSZTfpCmIkV2334uZD9iwOvTVeR+FjGDqsfju4DyzoAIqpPasE0+wk4Vbog7osP+qvn1gj5kQyusmr62+t0wx+bs2dF5QemksnFOswUrv9PGLhZgSMmDQrRYuvEfIAC7IdN/hfjTn0OokzljBiuWQ4WIIba/7xTYLVujJV65qH3heaSMxJJD7eH9QZs9RdbbdTXMFuJFsHV2OF6wZRp18tTNZZJMqiHZZSndC5WP1WrUo3Au/9a+ighSaOiVddHsPG07C/TOEnr3IrwU7c9yIHeeRFHmcQs9K0+n9XtrmrQxDQ9/mLkfje80Ko25VJ/QpAQPzCKh2KfQ4RD+/PxBUScx/lHIHOIhTSCh57ic629zWgk0coSQDi4MKSa5guDr3cuDvt4RihGviDM6V68ewsl0gh6Z9c0Hw7hU0vky4oxak5AiySiPz0FtsOnAzIL0UON+yMuKzrJgLjTKodwLQ0wlBXu43cD+P8VXwQYeqNSzfrhBnHqsrMf4lTLtc7kDDTcw== ciao@ciao +... diff --git a/ciao-controller/types/types.go b/ciao-controller/types/types.go new file mode 100644 index 000000000..ec6b08ae9 --- /dev/null +++ b/ciao-controller/types/types.go @@ -0,0 +1,134 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package types + +import ( + "github.com/01org/ciao/payloads" + "time" +) + +type Workload struct { + Id string `json:"id"` + Description string `json:"description"` + FWType string `json:"-"` + VMType payloads.Hypervisor `json:"-"` + ImageID string `json:"-"` + ImageName string `json:"-"` + Config string `json:"-"` + Defaults []payloads.RequestedResource `json:"-"` +} + +type Instance struct { + Id string `json:"instance_id"` + TenantId string `json:"tenant_id"` + State string `json:"instance_state"` + WorkloadId string `json:"workload_id"` + NodeId string `json:"node_id"` + MACAddress string `json:"mac_address"` + IPAddress string `json:"ip_address"` + SSHIP string `json:"ssh_ip"` + SSHPort int `json:"ssh_port"` + CNCI bool `json:"-"` +} + +type Tenant struct { + Id string + Name string + CNCIID string + CNCIMAC string + CNCIIP string + Resources []*Resource +} + +type Resource struct { + Rname string + Rtype int + Limit int + Usage int +} + +func (r *Resource) OverLimit(request int) bool { + if r.Limit > 0 && r.Usage+request > r.Limit { + return true + } + return false +} + +type LogEntry struct { + Timestamp time.Time `json:"time_stamp"` + TenantId string `json:"tenant_id"` + EventType string `json:"type"` + Message string `json:"message"` +} + +type NodeStats struct { + NodeId string `json:"node_id"` + Timestamp time.Time `json:"time_stamp"` + Load int `json:"load"` + MemTotalMB int `json:"mem_total_mb"` + MemAvailableMB int `json:"mem_available_mb"` + DiskTotalMB int `json:"mem_total_mb"` + DiskAvailableMB int `json:"disk_available_mb"` + CpusOnline int `json:"cpus_online"` +} + +type NodeSummary struct { + NodeId string `json:"node_id"` + TotalInstances int `json:"total_instances"` + TotalRunningInstances int `json:"total_running_instances"` + TotalPendingInstances int `json:"total_pending_instances"` + TotalPausedInstances int `json:"total_paused_instances"` +} + +type TenantCNCI struct { + TenantID string `json:"tenant_id"` + IPAddress string `json:"ip_address"` + MACAddress string `json:"mac_address"` + InstanceID string `json:"instance_id"` + Subnets []string `json:"subnets"` +} + +type FrameStat struct { + ID string `json:"node_id"` + TotalElapsedTime float64 `json:"total_elapsed_time"` + ControllerTime float64 `json:"total_controller_time"` + LauncherTime float64 `json:"total_launcher_time"` + SchedulerTime float64 `json:"total_scheduler_time"` +} + +type BatchFrameStat struct { + NumInstances int `json:"num_instances"` + TotalElapsed float64 `json:"total_elapsed"` + AverageElapsed float64 `json:"average_elapsed"` + AverageControllerElapsed float64 `json:"average_controller_elapsed"` + AverageLauncherElapsed float64 `json:"average_launcher_elapsed"` + AverageSchedulerElapsed float64 `json:"average_scheduler_elapsed"` + VarianceController float64 `json:"controller_variance"` + VarianceLauncher float64 `json:"launcher_variance"` + VarianceScheduler float64 `json:"scheduler_variance"` +} + +type BatchFrameSummary struct { + BatchID string `json:"batch_id"` + NumInstances int `json:"num_instances"` +} + +type Node struct { + ID string `json:"node_id"` + IPAddr string `json:"ip_address"` + Hostname string `json:"hostname"` +} diff --git a/ciao-controller/workload_resources.csv b/ciao-controller/workload_resources.csv new file mode 100644 index 000000000..dd32aaab0 --- /dev/null +++ b/ciao-controller/workload_resources.csv @@ -0,0 +1,13 @@ +69e84267-ed01-4738-b15f-b47de06b62e7, 2, 2, 2, 1 +69e84267-ed01-4738-b15f-b47de06b62e7, 3, 128, 128, 1 +69e84267-ed01-4738-b15f-b47de06b62e7, 4, 80, 80, 1 +e35ed972-c46c-4aad-a1e7-ef103ae079a2, 2, 2, 2, 1 +e35ed972-c46c-4aad-a1e7-ef103ae079a2, 3, 128, 128, 1 +e35ed972-c46c-4aad-a1e7-ef103ae079a2, 4, 80, 80, 1 +eba04826-62a5-48bd-876f-9119667b1487, 2, 2, 2, 1 +eba04826-62a5-48bd-876f-9119667b1487, 3, 128, 128, 1 +eba04826-62a5-48bd-876f-9119667b1487, 4, 80, 80, 1 +eba04826-62a5-48bd-876f-9119667b1487, 5, 1, 1, 1 +ca957444-fa46-11e5-94f9-38607786d9ec, 2, 2, 2, 1 +ca957444-fa46-11e5-94f9-38607786d9ec, 3, 128, 128, 1 +ca957444-fa46-11e5-94f9-38607786d9ec, 2, 80, 80, 1 diff --git a/ciao-controller/workload_template.csv b/ciao-controller/workload_template.csv new file mode 100644 index 000000000..ec951d97c --- /dev/null +++ b/ciao-controller/workload_template.csv @@ -0,0 +1,5 @@ +69e84267-ed01-4738-b15f-b47de06b62e7,Fedora 23 Cloud,test.yaml,legacy,qemu,73a86d7e-93c0-480e-9c41-ab42f69b7799,"", 0 +e35ed972-c46c-4aad-a1e7-ef103ae079a2,Clear Cloud,test.yaml,efi,qemu,df3768da-31f5-4ba6-82f0-127a1a705169,"", 0 +eba04826-62a5-48bd-876f-9119667b1487,CNCI,test.yaml,efi,qemu,4e16e743-265a-4bf2-9fd1-57ada0b28904,"", 1 +ca957444-fa46-11e5-94f9-38607786d9ec,Docker Ubuntu latest,docker-ubuntu.yaml,"",docker,fa7d86d8-fa46-11e5-8493-38607786d9ec,"ubuntu:latest",0 + diff --git a/ciao-launcher/README.md b/ciao-launcher/README.md new file mode 100644 index 000000000..7de873b70 --- /dev/null +++ b/ciao-launcher/README.md @@ -0,0 +1,422 @@ +# ciao-launcher + +ciao-launcher is an SSNTP client that manages VM and container instances. It runs on +compute and network nodes executing commands it receives from SSNTP servers, +primarily [scheduler](https://github.com/01org/ciao/blob/master/ciao-scheduler/README.md). Its current feature set includes: + +1. Launching, stopping, restarting and deleting of docker containers and qemu VMs on compute and network nodes +2. Basic monitoring of VMs and containers +3. Collection and transmission of compute node and instance (container or VM) statistics +4. Reconnection to existing VMs and containers on start up + +We'll take a look at these features in more detail a little later on. First, +let's see what is required to install and run launcher. + +# Installation + +## Getting the code + +ciao-launcher can be downloaded and installed using go get + +```go get github.com/01org/ciao/ciao-launcher``` + +The resulting binary will be placed in $GOPATH/bin, which you should already have +in your PATH. + +## Installing Certificates + +LINK NEEDED TO CERTIFICATE CREATION PROCESS + +Secondly you need to generate a certificate pair to allow launcher to connect to +the SSNTP server. The default location for these certificates is /var/lib/ciao +So you can either copy the certs to this location, e.g., + +``` +cp CAcert-server-localhost.pem /var/lib/ciao +cp cert-client-localhost.pem /var/lib/ciao +``` + +or provide alternative locations for these files via the -cert and -cacert command +line options. + +## Install Dependencies + +ciao-launcher has dependencies on five external packages: + +1. qemu-system-x86_64 and qemu-img to launch the VMs and create qcow images +2. xorriso to create ISO images for cloudinit +3. ovmf, EFI firmware required for some images +4. fuser +5. docker, if you want to manage docker containers + +All of these packages need to be installed on your compute node before launcher +can be run. + +To create a new instance, launcher needs a template iso image to use as a backing file. +Currently, launcher requires all such backing files to be stored in +/var/lib/ciao/images. The names of these image files must exactly match the +image_uuid field passed in the payload of the START command. Here's an example setup + +``` + /var/lib/ciao/images/ + └── b286cd45-7d0c-4525-a140-4db6c95e41fa +``` + +The images should have cloudinit installed and configured to use the ConfigDrive data source. +Currently, this is the only data source supported by launcher. + +## Launching ciao-launcher + +ciao-launcher can be launched from the command line as follows +``` +sudo ciao-launcher -server -network [cn|nn|none] +``` + +Currently, launcher needs to be run as root so that it can create network links and +launcher docker containers. + +As previously mentioned the -cacert and -cert options can be used to override the SSNTP +certificates. + +ciao-launcher uses glog for logging. By default launcher stores logs in files written to +/var/lib/ciao/logs. This behaviour can be overridden using a number of different +command line arguments added by glog, e.g., -alsologtostderr. + +Here is a full list of the command line parameters supported by launcher. + +``` +Usage of ./launcher: + -alsologtostderr + log to standard error as well as files + -cacert string + Client certificate (default "/var/lib/ciao/CAcert-server-localhost.pem") + -cert string + CA certificate (default "/var/lib/ciao/cert-client-localhost.pem") + -compute-net string + Compute Subnet + -cpuprofile string + write profile information to file + -disk-limit + Use disk usage limits (default true) + -hard-reset + Kill and delete all instances, reset networking and exit + -log_backtrace_at value + when logging hits line file:N, emit a stack trace (default :0) + -log_dir string + If non-empty, write log files in this directory + -logtostderr + log to standard error instead of files + -mem-limit + Use memory usage limits (default true) + -mgmt-net string + Management Subnet + -network value + Can be none, cn (compute node) or nn (network node) (default none) + -server string + URL of SSNTP server (default "localhost") + -simulation + Launcher simulation + -stderrthreshold value + logs at or above this threshold go to stderr + -v value + log level for V logs + -vmodule value + comma-separated list of pattern=N settings for file-filtered logging + -with-ui value + Enables virtual consoles on VM instances. Can be 'none', 'spice', 'nc' (default nc) +``` + +The --with-ui and --cpuprofile options are disabled by default. To enable them use the debug +and profile tags, respectively. + +# Commands +## START + +START is used to create and launch a new VM instance. Some example payloads +are discussed below: + +The [first payload](https://github.com/01org/ciao/blob/master/ciao-launcher/tests/examples/start_legacy.yaml) creates a new CN VM instance using the backing file stored in +/var/lib/ciao/images/b286cd45-7d0c-4525-a140-4db6c95e41fa. The disk +image has a maximum size of 80GBs and the VM will be run with two CPUS and +370MBs of memory. The first part of the payload corresponds to the +cloudinit user-data file. This data will be extracted from the payload +stored in an ISO image and passed to the VM instance. Assuming cloudinit is +correctly configured on the backing image, the file /etc/bootdone will be +created and the hostname of the image will be set to the instance uuid. + +The [second payload](https://github.com/01org/ciao/blob/master/ciao-launcher/tests/examples/start_efi.yaml) creates a CN VM instance using a different image that +needs to be booted with EFI. + +The [third payload](https://github.com/01org/ciao/blob/master/ciao-launcher/tests/examples/start_nn.yaml) +is an example of starting a VM instance on a NN. Note that the networking parameters are different. + +ciao-launcher detects and returns a number of errors when executing the start command. +These are listed below: + +- invalid\_payload: if the YAML is corrupt + +- invalid\_data: if the start section of the payload is corrupt or missing +information such as image-id + +- already\_running: if you try to start an existing instance that is already running + +- instance\_exists: if you try to start an instance that has already been created +but is not currently running + +- image\_failure: If launcher is unable to prepare the file for the instance, e.g., the +image_uuid refers to an non-existant backing image + +- network_failure: It was not possible to initialise networking for the instance + +- full_cn: The node has insufficient resources to start the requested instance + +- launch\_failure: If the instance has been successfully created but could not be launched. +Actually, this is sort of an odd situation as the START command partially succeeded. +ciao-launcher returns an error code, but the instance has been created and could be booted a +later stage via RESTART. + +If the user specifies a size for disk_mb that is smaller than the virtual size of the +backing image, launcher ignores the user specified value and creates an image for the +instance whose virtual size matches that size of the chosen backing image. + +ciao-launcher only supports persistent instances at the moment. Any VM instances created +by the START command are persistent, i.e., the persistence YAML field is currently +ignored. + + +## DELETE + +DELETE can be used to destroy an existing VM instance. It removes all the +files associated with that instance from the compute node. If the VM instance +is running when the DELETE command is received it will be powered down. + +See [here](https://github.com/01org/ciao/blob/master/ciao-launcher/tests/examples/delete_legacy.yaml) for an example of the DELETE command. + +## STOP + +STOP can be used to power down an existing VM instance. The state associated +with the VM remains intact on the compute node and the instance can be restarted +at a later date via the RESTART command + +See [here](https://github.com/01org/ciao/blob/master/ciao-launcher/tests/examples/stop_legacy.yaml) for an example of the STOP command. + +## RESTART + +RESTART can be used to power up an existing VM instance that has either been +powered down by the user explicitly or shut down via the STOP command. The instance +will be restarted with the settings contained in the payload of the START command +that originally created it. It is not possible to override these settings, e.g., +change the number of CPUs used, via the RESTART command, even though the payload itself allows these values to be specified. + +See [here](https://github.com/01org/ciao/blob/master/ciao-launcher/tests/examples/restart_legacy.yaml) for an example of the RESTART command. + +# Recovery + +When launcher starts up it checks to see if any VM instances exist and if they +do it tries to connect to them. This means that you can easily kill launcher, +restart it and continue to use it to manage previously created VMs. One thing +that it does not yet do is to restart VM instances that have been powered down. +We might want to do this if the machine reboots, but I need to think about how +best this should be done. + + +# Reporting + +ciao-launcher sends STATS commands and STATUS updates to the SSNTP server to which +it is connected. STATUS updates are sent when launcher connects to the SSNTP +server. They are also sent when a VM instance is successfully created or +destroyed, informing the upper levels of the stack that the capacity of +launcher's compute node has changed. The STATS command is sent when launcher +connects to the SSNTP server and every 30 seconds thereafter. + +ciao-launcher computes the information that it sends back in the STATS command and +STATUS update payloads as follows: + + + + + + + + + +
DatumSource
MemTotalMB/proc/meminfo:MemTotal
MemAvailableMB/proc/meminfo:MemFree + Active(file) + Inactive(file)
DiskTotalMBstatfs("/var/lib/ciao/instances")
DiskAvailableMBstatfs("/var/lib/ciao/instances")
Load/proc/loadavg (Average over last minute reported)
CpusOnLineNumber of cpu[0-9]+ entries in /proc/stat
+ +And instance statistics are computed like this + + + + + + + + +
DatumSource
SSHIPIP of the concentrator node, see below
SSHPortPort number on the concentrator node which can be used to ssh into the instance
MemUsageMBpss of qemu of docker process id
DiskUsageMBSize of rootfs
CPUUsageAmount of cpuTime consumed by instance over 30 second period, normalized for number of VCPUs
+ +ciao-launcher sends two different STATUS updates, READY and FULL. FULL is sent +when launcher determines that there is insufficient memory or disk space available +on the node on which it runs to launch another instance. It also returns FULL +if it determines that the launcher process is running low on file descriptors. +The memory and disk space checks can be disabled using the -mem-limit and +-disk-limit command line options. The file descriptor limit check cannot be +disabled. + +# Testing ciao-launcher in Isolation + +ciao-launcher is part of the ciao network statck and is usually run and tested +in conjunction with the other ciao components. However, it is often +useful in debugging and development to test ciao-launcher in isolation of the +other ciao components. This can be done using two tools in the +tests directory. + +The first tool, ciao-launcher-server, is an simple SSNTP server. It can be +used to send commands to and receive events from multiple launchers. +ciao-launcher-server exposes a REST API. Commands can be sent to it +directly using curl, if you know the URLs, or directly with the tool, ciaolc. +We'll look at some examples of using ciaolc below. + +To get started copy the test certs in https://github.com/01org/ciao/tree/master/ciao-launcher/tests/ciao-launcher-server to /var/lib/ciao. Then run +ciao-launcher-server. + +Open a new terminal and start ciao-launcher, e.g., + +./ciao-launcher --logtostderr --network cn + +Open a new terminal and try some ciaolc commands + +To retrieve a list of instances type + +``` +$ ciaolc instances +d7d86208-b46c-4465-9018-fe14087d415f +67d86208-b46c-4465-9018-e14287d415f +``` + +To retrieve detailed information about the instances + +``` +$ cialoc istats +UUID Status SSH Mem Disk CPU +d7d86208-b46c-4465-9018-fe14087d415f running 192.168.42.21:35050 492 MB 339 MB 0% +67d86208-b46c-4465-9018-e14287d415f running 192.168.200.200:61519 14 MB 189 MB 0% +``` + +Both of the above commands take a filter parameter. So to see only the pending +instances type + +``` +$ ciaolc instances --filter pending +``` + +A new instance can be started using the startf command. You need to provide +a file containing a valid start payload. There are some examples +[here](https://github.com/01org/ciao/tree/master/ciao-launcher/tests/examples) + +``` +$ ciaolc startf start_legacy.yaml +``` + +Instances can be stopped, restarted and deleted using the stop, restart and +delete commands. Each of these commands require an instance-uuid, e.g., + +``` +$ ciaolc stop d7d86208-b46c-4465-9018-fe14087d415f +``` + +The most recent stats returned by the launcher can be retrieved using the +stats command, e.g, + +``` +$ ciaolc stats +NodeUUID: dacf409a-7c7e-48c7-b382-546168ab6cdf +Status: READY +MemTotal: 7856 MB +MemAvailable: 5220 MB +DiskTotal: 231782 MB +DiskAvailable: 166163 MB +Load: 0 +CpusOnline: 4 +NodeHostName: +Instances: 2 (2 running 0 exited 0 pending) +``` + +You can retrieve a list of events and errors received for a +ciao-launcher instance using the drain command: + +``` +$ ciaolc drain +- instance_deleted: + instance_uuid: d7d86208-b46c-4465-9018-fe14087d415f +``` + +Once drained, the events are deleted from inside the ciao-launcher-server. +Running subsequent drain commands will return nothing, assuming that no +new events have been generated. + +Finally, you can connect mulitple ciao-launchers to the ciao-server-launcher +instance. If you do this you need to specify which launcher you would like +to command when issuing a command via ciaolc. This can be done via the +--client option. + +e.g., + +``` +$ ciaolc drain --client dacf409a-7c7e-48c7-b382-546168ab6cdf +- instance_deleted: + instance_uuid: d7d86208-b46c-4465-9018-fe14087d415f +``` + +A list of connected clients can be obtained with the clients command. + +# Connecting to QEMU Instances + +There are two options. The preferred option is to create a user and associate +an ssh key with that user in the cloud-init payload that gets sent with the +START comnmand that creates the VM. You can see an example of such a payload +here: + +https://github.com/01org/ciao/blob/master/ciao-launcher/tests/examples/start_efi.yaml + +Once the start command has succeeded and the instance has been launched you can +connect to it via SSH using the IP address of the concentrator. You also need to +specify a port number, which can be computed using the following formula: + +33000 + ip[2] << 8 + ip[3] + +where ip is the ip address of the instance as specified in the START command. +For example, if the IP address of the instance is 192.168.0.2, the ssh port +would be 33002. Launcher actually sends the SSH IP address and port number +of each instance in the stats commands. This information should normally be +shown in the ciao UI. + +Please note that ssh is typically only used when you are running a complete +ciao stack, including ciao-scheduler, ciao-controller and a network node. +Howevever, it should be possible to get it to work by manually starting +a concentrator instance on a network node before you launch your instance. + +The second method is to compile launcher with the debug tag, e.g., +go build --tags debug. This will add a new command line option that can +be used to connect to an instance via netcat or spice. To use this method you +need to look in the launcher logs when launching an instance. You should see +some instructions in the logs telling you how to connect to the instance. +Here's an example, + +I0407 14:38:10.874786 8154 qemu.go:375] ============================================ +I0407 14:38:10.874830 8154 qemu.go:376] Connect to vm with netcat 127.0.0.1 5909 +I0407 14:38:10.874849 8154 qemu.go:377] ============================================ + +netcat 127.0.0.1 5909 will give you a login prompt. You might need to press return to see the login. Note this will only work if the VM allows login on the +console port, i.e., is running getty on ttyS0. + +# Connecting to Docker Container Instances + +This can only be done from the compute note that is running the docker +container. + +1. Install nsenter +2. sudo docker ps -a | grep +3. copy the container-id +4. PID=$(sudo docker inspect -f {{.State.Pid}} ) +5. sudo nsenter --target $PID --mount --uts --ipc --net --pid + +See [here](https://blog.docker.com/tag/nsenter/) for more information. diff --git a/ciao-launcher/delete.go b/ciao-launcher/delete.go new file mode 100644 index 000000000..695309a0a --- /dev/null +++ b/ciao-launcher/delete.go @@ -0,0 +1,88 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "os" + + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + "github.com/golang/glog" +) + +type deleteError struct { + err error + code payloads.DeleteFailureReason +} + +func (de *deleteError) send(client *ssntpConn, instance string) { + if !client.isConnected() { + return + } + + payload, err := generateDeleteError(instance, de) + if err != nil { + glog.Errorf("Unable to generate payload for delete_failure: %v", err) + return + } + + _, err = client.SendError(ssntp.DeleteFailure, payload) + if err != nil { + glog.Errorf("Unable to send delete_failure: %v", err) + } +} + +func deleteVnic(instanceDir string, client *ssntpConn) { + cfg, err := loadVMConfig(instanceDir) + if err != nil { + glog.Warningf("Unable to load instance state %s: %s", instanceDir, err) + return + } + + vnicCfg, err := createVnicCfg(cfg) + if err != nil { + glog.Warningf("Unable to create vnicCfg: %s", err) + return + } + + err = destroyVnic(client, vnicCfg) + if err != nil { + glog.Warningf("Unable to destroy vnic: %s", err) + } +} + +func processDelete(vm virtualizer, instanceDir string, client *ssntpConn, running ovsRunningState) error { + + // We have to ignore these errors for the time being. There's no way to distinguish + // between the various sort of errors that docker can return. We could be getting + // a container not found error, if someone had deleted the container manually. In this + // case we definitely want to delete the instance. + + _ = vm.deleteImage() + + if networking.Enabled() && running != ovsPending { + glog.Info("Deleting Vnic") + deleteVnic(instanceDir, client) + } + + err := os.RemoveAll(instanceDir) + if err != nil { + glog.Warningf("Unable to remove instance dir: %v", err) + } + + return err +} diff --git a/ciao-launcher/doc.go b/ciao-launcher/doc.go new file mode 100644 index 000000000..919cf6d91 --- /dev/null +++ b/ciao-launcher/doc.go @@ -0,0 +1,186 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +// ciao-launcher is ssntp agent that runs on compute and network nodes. +// Its primary purpose is to launch and manage containers and virtual +// machines. For more information on installing and running ciao-launcher +// see https://github.com/01org/ciao/blob/master/ciao-launcher/README.md +// for more information. +// +// Introduction +// +// ciao-launcher tries to take advantage of Go's concurrency support as much as +// possible. The intention here is that most of the work involved in launching +// and manipulating VMs is mostly self contained and IO bound and so should in +// theory lend itself well to a concurrent design. As a consequence, ciao-launcher +// is highly concurrent and performant. The concurrent nature +// of ciao-launcher can make it a little difficult to understand for new comers, +// so here are a few notes on its design. ciao-launcher can be thought of as a +// collection of distinct go routines. These notes explain what these go +// routines are for and how they communicate. +// +// Main +// +// Main is the go routine that starts when ciao-launcher is itself launched. The code +// for this is in main.go. It parses the command line parameters, initialises +// networking, ensures that no other instance of ciao-launcher are running and then +// starts the server go routine. Having done all this, the main go routine waits +// for a signal, e.g., SIGTERM, from the OS to quit. When this signal is retrieved +// it instructs all child go routines to quit and waits for their exit. Note that +// it only waits for 1 second. If all child go routines have failed to exit in 1 +// second, ciao-launcher panics. The panic is useful as it prints the stack trace of +// all the running go routines, so you can see which ones are blocked. At least +// this was the intention. The default behaviour of the go runtime has changed in +// this regard in 1.6 so a small code change is required, but you get the idea, I +// hope. +// +// The Server go routine +// +// Manages the connection to the SSNTP server and pre-processes all commands +// received from this server. The code for this go routine is also in main.go, at +// least for the time being. ciao-Launcher +// establishes a connection to the ssntp server via the ssntp.Dial command. This +// creates a separate go routine, managed by the ssntp library. Any subsequent +// ssntp events that occur are handled by the ciao-launcher function CommandNotify. +// CommandNotify is called in the context of the ssntp go routine. To avoid +// blocking this go routine, ciao-launcher parses the YAML associated with the command, +// sends a newly created internal command down a channel to the server go routine +// and returns. The command is then processed further in the server go routine. +// +// Most commands are operations on instances, e.g., create a new instance, restart +// an instance, and are ultimately processed by a go routine dedicated to the +// particular instance to which they pertain. These instance go routines are +// managed by another go routine called the overseer which will be discussed in +// more detail below. Before the server go routine can forward a command to the +// appropriate go routine it needs to ask the overseer for a channel which can be +// used to communicate with the relevant instance go routine. This is done by +// sending an ovsAddCmd or an ovsGetCmd to the overseer via the overseer channel, +// ovsCh. ovsAddCmd is used when starting a new instance. ovsGetCmd is used to +// process all other commands. +// +// The overseer go routine is started by the server go routine. When the server +// go routine is asked to exit by the main go routine, i.e., main go routine +// closes the doneCh channel, the server go routine closes the channel it uses +// to communicate with the overseer. This instructs the overseer to close, +// which it does after all the instance go routines it manages have in turn +// exited. The server go routine waits for the overseer to exit before +// terminating. +// +// The Overseer +// +// The overseer is a go routine that serves three main purposes. +// +// 1. It manages instance go routines that themselves manage individual vms. +// 2. It collects statistics about the node and the VMs it hosts and +// tranmits these periodically to the ssntp server via the STATS and +// STATUS commands. +// 3. It Rediscovers and reconnects to existing instances when ciao-launcher is started. +// Overseer launches new instances via the startInstance function from instance.go. +// This function starts a new go routine for that instance and returns a channel +// through which commands can be sent to the instance. The overseer itself does +// not send commands down this channel. It cannot as this would lead to deadlock. +// Instead it makes this channel available to the server go routine when it is +// requested via the ovsAddCmd or the ovsGetCmd commands. +// +// The overseer passes each instance go routine a reference to the a single +// channel, childDoneCh. This channel is closed when the overseer starts shutting +// down. Closing this channel serves as a broadcast notification to each instance +// go routine, indicating that they need to shutdown. The overseer waits until all +// instance go routines have shut down before exiting. This is achieved via a wait +// group called chilWg. +// +// The overseer maintains a map of information about each instance called +// instances. The map is indexed by the uuid of instances. It contains +// information about the instances, namely their running state and their resource +// usage. This information is used when sending STATs and STATUS commands. +// +// Information about the instances ultimately comes from the instance go routines. +// However, these go routines cannot access the overseer's instance map directly. +// To update it they send commands down a channel, which the overseer passes to +// startInstance, for example, ovsStatsUpdateCmd or ovsStateChange. The overseer +// processes these commands in the processCommand function. +// +// The Instance Go routines +// +// ciao-launcher maintains one go routine per instance it manages. These go routines +// exist regardless of the state of the underlying instance, i.e., there is +// generally one go routine running per instance, regardless of whether that +// instance is pending, exited or running. +// +// The instance go routines serve 3 main purposes: +// +// 1. They accept and process commands from the server go routine down their +// command channel. These commands typically come from the ssntp server, +// although there are some occasions where the commands originate from inside +// ciao-launcher itself. +// 2. They monitor the running state of VMs. +// 3. They manage the collection of instance statistics, which they report +// to the overseer. +// +// The nice thing about this design is that almost all instance related work can be +// performed in parallel. Stats can be computed for one instance at the same time +// as a separate instance is being powered down. ciao-Launcher can process any number +// of commands to start new instances in parallel. There is no locking required +// apart from a synchronised access to the overseer map made by the server go +// routine when the command is first received and a small check related to the +// image from which the instance will be launched, in the case where instances are +// being started. An additional synchronisation point is required for docker +// instances to ensure that the relevant docker network has been created +// before the container. +// +// Note that although commands submitted to different instances are in executed in +// parallel, the instance go routines serialise commands issue against a single +// instance. This is necessary to avoid instance corruption. +// +// Right now the command channels that the server go routine uses to send command +// to instances are not buffered. This might need to change as currently it could +// be possible for a SSNTP server to kill ciao-launcher's parallelism by repetively +// sending commands to the same instance over and over again. +// +// The code for the instance go routines is in instance.go. However, the code that +// executes most of the commands has been placed in separate files named after the +// commands themselves, e.g., start.go, delete.go, restart.go. It should be noted +// that the code in these files runs in the context of an instance go routine. +// Finally, some of the code used to process instance commands is in payloads.go. +// This is for legacy reasons and in the future this file will probably go away and +// its contents will be redistributed. It should not be assumed as of the time of +// writing that all the code in payloads.go runs in an instance go routine. +// payloads.go needs cleaning up (https://github.com/01org/ciao/issues/10). +// +// The virtualizer +// +// The instance go routines need to talk to qemu and docker to manage their VMs +// and containers. However, they do not do so directly. Rather they do so via +// a virtualizer interface. +// +// The virtualizer interface is designed to isolate ciao-launcher, and in particular, +// functions that run in the instance go routine, from the underlying virtualisation +// technologies used to launch and manage VMs and containers. +// All the methods on the virtualizer interface will be called serially by the instance +// go routine. Therefore there is no need to synchronised data between the virtualizers +// methods. +// +// qemu.go contains functions to start and stop a VM, to create a qcow2 image, to +// collect statistics about the instance running in the hypervisor, such as its +// memory and cpu usage, and to monitor the instance, i.e., to determine whether +// the VM is actually running or not. +// +// docker.go contains methods to manage docker containers. +// +// For more information about the virtualizer API, please see the comments +// in https://github.com/01org/ciao/blob/master/ciao-launcher/virtualizer.go +// +package main diff --git a/ciao-launcher/docker.go b/ciao-launcher/docker.go new file mode 100644 index 000000000..5fd9dc318 --- /dev/null +++ b/ciao-launcher/docker.go @@ -0,0 +1,493 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "path" + "sync" + "time" + + "gopkg.in/yaml.v2" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/version" + "github.com/docker/engine-api/client" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/container" + "github.com/docker/engine-api/types/filters" + "github.com/docker/engine-api/types/network" + "github.com/golang/glog" + "golang.org/x/net/context" +) + +var dockerClient struct { + sync.Mutex + cli *client.Client +} + +type docker struct { + cfg *vmConfig + instanceDir string + dockerID string + prevCPUTime int64 + prevSampleTime time.Time + pid int +} + +// It's not entirely clear that it's safe to call a client.Client object from +// multiple go routines simulataneously. The code looks like it is re-entrant +// but this doesn't seem to be documented anywhere. Need to check this. + +// There's no real way to return an error from init at the moment, so we'll +// try to retrieve the client object at each new invocation of the virtualizer. + +// BUG(markus): We shouldn't report ssh ports for docker instances + +func getDockerClient() (cli *client.Client, err error) { + dockerClient.Lock() + if dockerClient.cli == nil { + defaultHeaders := map[string]string{"User-Agent": "ciao-1.0"} + dockerClient.cli, err = client.NewClient("unix:///var/run/docker.sock", + "v1.22", nil, defaultHeaders) + } + cli = dockerClient.cli + dockerClient.Unlock() + return cli, err +} + +func (d *docker) init(cfg *vmConfig, instanceDir string) { + d.cfg = cfg + d.instanceDir = instanceDir +} + +func (d *docker) checkBackingImage() error { + glog.Infof("Checking backing docker image %s", d.cfg.Image) + + cli, err := getDockerClient() + if err != nil { + return err + } + + args := filters.NewArgs() + images, err := cli.ImageList(context.Background(), + types.ImageListOptions{ + MatchName: d.cfg.Image, + All: false, + Filters: args, + }) + + if err != nil { + glog.Infof("Called to ImageList for %s failed: %v", d.cfg.Image, err) + return err + } + + if len(images) == 0 { + glog.Infof("Docker Image not found %s", d.cfg.Image) + return errImageNotFound + } + + glog.Infof("Docker Image %s is present on node", d.cfg.Image) + + return nil +} + +func (d *docker) downloadBackingImage() error { + glog.Infof("Downloading backing docker image %s", d.cfg.Image) + + cli, err := getDockerClient() + if err != nil { + return err + } + + prog, err := cli.ImagePull(context.Background(), types.ImagePullOptions{ImageID: d.cfg.Image}, nil) + if err != nil { + glog.Errorf("Unable to download image %s: %v\n", d.cfg.Image, err) + return err + + } + defer func() { _ = prog.Close() }() + + dec := json.NewDecoder(prog) + var msg jsonmessage.JSONMessage + err = dec.Decode(&msg) + for err == nil { + if msg.Error != nil { + err = msg.Error + break + } + + err = dec.Decode(&msg) + } + + if err != nil && err != io.EOF { + glog.Errorf("Unable to download image %v\n", err) + return err + } + + return nil +} + +func (d *docker) createImage(bridge string, userData, metaData []byte) error { + var hostname string + var cmd []string + + cli, err := getDockerClient() + if err != nil { + return err + } + + md := &struct { + Hostname string `json:"hostname"` + }{} + err = json.Unmarshal(metaData, md) + if err != nil { + glog.Info("Start command does not contain hostname. Setting to instance UUID") + hostname = d.cfg.Instance + } else { + glog.Infof("Found hostname %s", md.Hostname) + hostname = md.Hostname + } + + ud := &struct { + Cmds [][]string `yaml:"runcmd"` + }{} + err = yaml.Unmarshal(userData, ud) + if err != nil { + glog.Info("Start command does not contain a run command") + } else { + if len(ud.Cmds) >= 1 { + cmd = ud.Cmds[0] + if len(ud.Cmds) > 1 { + glog.Warningf("Only one command supported. Found %d in userdata", len(ud.Cmds)) + } + } + } + + config := &container.Config{ + Hostname: hostname, + Image: d.cfg.Image, + Cmd: cmd, + } + + hostConfig := &container.HostConfig{} + networkConfig := &network.NetworkingConfig{} + if bridge != "" { + config.MacAddress = d.cfg.VnicMAC + hostConfig.NetworkMode = container.NetworkMode(bridge) + networkConfig.EndpointsConfig = map[string]*network.EndpointSettings{ + bridge: &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: d.cfg.VnicIP, + }, + }, + } + } + + resp, err := cli.ContainerCreate(context.Background(), config, hostConfig, networkConfig, + d.cfg.Instance) + if err != nil { + glog.Errorf("Unable to create container %v", err) + return err + } + + idPath := path.Join(d.instanceDir, "docker-id") + err = ioutil.WriteFile(idPath, []byte(resp.ID), 0600) + if err != nil { + glog.Errorf("Unable to store docker container ID %v", err) + return err + } + + d.dockerID = resp.ID + + // This value is configurable. Need to figure out how to get it from docker. + + d.cfg.Disk = 10000 + + return nil +} + +func (d *docker) deleteImage() error { + if d.dockerID == "" { + return nil + } + + cli, err := getDockerClient() + if err != nil { + return err + } + + err = cli.ContainerRemove(context.Background(), + types.ContainerRemoveOptions{ + ContainerID: d.dockerID, + Force: true}) + if err != nil { + glog.Warningf("Unable to delete docker instance %s:%s err %v", + d.cfg.Instance, d.dockerID, err) + } + + return err +} + +func (d *docker) startVM(vnicName, ipAddress string) error { + cli, err := getDockerClient() + if err != nil { + return err + } + + err = cli.ContainerStart(context.Background(), d.dockerID) + if err != nil { + glog.Errorf("Unable to start container %v", err) + return err + } + return nil +} + +func dockerConnect(dockerChannel chan string, instance, dockerID string, closedCh chan struct{}, + connectedCh chan struct{}, wg *sync.WaitGroup, boot bool) { + + defer func() { + if closedCh != nil { + close(closedCh) + } + glog.Infof("Monitor function for %s exitting", instance) + wg.Done() + }() + + cli, err := getDockerClient() + if err != nil { + return + } + + // BUG(markus): Need a way to cancel this. Can't do this until we have contexts + + con, err := cli.ContainerInspect(context.Background(), dockerID) + if err != nil { + glog.Errorf("Unable to determine status of instance %s:%s: %v", instance, dockerID, err) + return + } + + if !con.State.Running && !con.State.Paused && !con.State.Restarting { + glog.Infof("Docker Instance %s:%s is not running", instance, dockerID) + return + } + + close(connectedCh) + + ctx, cancelFunc := context.WithCancel(context.Background()) + lostContainerCh := make(chan struct{}) + go func() { + defer close(lostContainerCh) + if err != nil { + return + } + ret, err := cli.ContainerWait(ctx, dockerID) + glog.Infof("Instance %s:%s exitted with code %d err %v", + instance, dockerID, ret, err) + }() + +DONE: + for { + select { + case _, _ = <-lostContainerCh: + break DONE + case cmd, ok := <-dockerChannel: + if !ok { + glog.Info("Cancelling Wait") + cancelFunc() + _ = <-lostContainerCh + break DONE + } else if cmd == virtualizerStopCmd { + err := cli.ContainerKill(context.Background(), dockerID, "KILL") + if err != nil { + glog.Errorf("Unable to stop instance %s:%s", instance, dockerID) + } + } + } + } + + glog.Infof("Docker Instance %s:%s shut down", instance, dockerID) +} + +func (d *docker) monitorVM(closedCh chan struct{}, connectedCh chan struct{}, + wg *sync.WaitGroup, boot bool) chan string { + + if d.dockerID == "" { + idPath := path.Join(d.instanceDir, "docker-id") + data, err := ioutil.ReadFile(idPath) + if err != nil { + // We'll return an error later on in dockerConnect + glog.Errorf("Unable to read docker container ID %v", err) + } else { + d.dockerID = string(data) + glog.Infof("Instance UUID %s -> Docker UUID %s", d.cfg.Instance, d.dockerID) + } + } + dockerChannel := make(chan string) + wg.Add(1) + go dockerConnect(dockerChannel, d.cfg.Instance, d.dockerID, closedCh, connectedCh, wg, boot) + return dockerChannel +} + +func (d *docker) computeInstanceDiskspace() int { + if d.dockerID == "" { + return -1 + } + + cli, err := getDockerClient() + if err != nil { + return -1 + } + + con, _, err := cli.ContainerInspectWithRaw(context.Background(), d.dockerID, true) + if err != nil { + glog.Errorf("Unable to determine status of instance %s:%s: %v", d.cfg.Instance, + d.dockerID, err) + return -1 + } + + if con.SizeRootFs == nil { + return -1 + } + + return int(*con.SizeRootFs / 1000000) +} + +func (d *docker) stats() (disk, memory, cpu int) { + disk = d.computeInstanceDiskspace() + memory = -1 + cpu = -1 + + if d.pid == 0 { + return + } + + memory = computeProcessMemUsage(d.pid) + if d.cfg == nil { + return + } + + cpuTime := computeProcessCPUTime(d.pid) + now := time.Now() + if d.prevCPUTime != -1 { + cpu = int((100 * (cpuTime - d.prevCPUTime) / + now.Sub(d.prevSampleTime).Nanoseconds())) + if d.cfg.Cpus > 1 { + cpu /= d.cfg.Cpus + } + // if glog.V(1) { + // glog.Infof("cpu %d%%\n", cpu) + // } + } + d.prevCPUTime = cpuTime + d.prevSampleTime = now + + return +} + +func (d *docker) connected() { + d.prevCPUTime = -1 + if d.pid == 0 { + cli, err := getDockerClient() + if err != nil { + return + } + + con, err := cli.ContainerInspect(context.Background(), d.dockerID) + if err != nil { + glog.Errorf("Unable to determine status of instance %s:%s: %v", d.cfg.Instance, + d.dockerID, err) + return + } + if con.State.Pid <= 0 { + return + } + d.pid = con.State.Pid + } +} + +func (d *docker) lostVM() { + d.pid = 0 + d.prevCPUTime = -1 +} + +//BUG(markus): Everything from here onwards should be in a different file. It's confusing + +func dockerKillInstance(instanceDir string) []string { + idPath := path.Join(instanceDir, "docker-id") + data, err := ioutil.ReadFile(idPath) + if err != nil { + glog.Errorf("Unable to read docker container ID %v", err) + return nil + } + + cli, err := getDockerClient() + if err != nil { + return nil + } + + dockerID := string(data) + bridges := []string{} + + con, err := cli.ContainerInspect(context.Background(), dockerID) + if err != nil { + glog.Warningf("Unable to determine status of instance %s:%s: %v", instanceDir, dockerID, err) + } else { + bridges = make([]string, 0, len(con.NetworkSettings.Networks)) + for _, v := range con.NetworkSettings.Networks { + if v != nil && v.NetworkID != "" { + bridges = append(bridges, v.NetworkID) + } + } + } + + err = cli.ContainerRemove(context.Background(), + types.ContainerRemoveOptions{ + ContainerID: dockerID, + Force: true}) + if err != nil { + glog.Warningf("Unable to delete docker instance %s err %v", dockerID, err) + } + + return bridges +} + +func checkDockerServerVersion(requiredVersion string, ctx context.Context) error { + + cli, err := getDockerClient() + if err != nil { + return err + } + + ver, err := cli.ServerVersion(ctx) + if err != nil { + glog.Errorf("Unable to retrieve info from docker server err: %v", err) + return err + } + + glog.Infof("Docker server version %s", ver.Version) + + if version.Version(ver.Version).LessThan(version.Version(requiredVersion)) { + return fmt.Errorf("Docker is too old. Required >= %s. Found %s. Some things might not work.", + requiredVersion, ver.Version) + } + + return nil +} diff --git a/ciao-launcher/docker_network.go b/ciao-launcher/docker_network.go new file mode 100644 index 000000000..9ca3f87bc --- /dev/null +++ b/ciao-launcher/docker_network.go @@ -0,0 +1,133 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "sync" + + "github.com/01org/ciao/networking/libsnnet" + "github.com/docker/engine-api/types" + "github.com/docker/engine-api/types/network" + "github.com/golang/glog" + "golang.org/x/net/context" +) + +type dockerNetworkState struct { + done chan struct{} + err error +} + +var dockerNetworkMap struct { + sync.Mutex + networks map[string]*dockerNetworkState +} + +func init() { + dockerNetworkMap.networks = make(map[string]*dockerNetworkState) +} + +func createDockerVnicV2(vnicCfg *libsnnet.VnicConfig) (*libsnnet.Vnic, *libsnnet.SsntpEventInfo, *libsnnet.ContainerInfo, error) { + dockerNetworkMap.Lock() + state := dockerNetworkMap.networks[vnicCfg.SubnetID] + if state != nil { + dockerNetworkMap.Unlock() + glog.Info("Waiting for Docker network creation") + <-state.done + if state.err != nil { + return nil, nil, nil, state.err + } + + return cnNet.CreateVnicV2(vnicCfg) + } + ch := make(chan struct{}) + defer close(ch) + state = &dockerNetworkState{done: ch} + dockerNetworkMap.networks[vnicCfg.SubnetID] = state + dockerNetworkMap.Unlock() + vnic, event, info, err := cnNet.CreateVnicV2(vnicCfg) + state.err = err + if err != nil { + return vnic, event, info, err + } + + if event == nil { + glog.Warning("EVENT information expected") + return vnic, event, info, err + } + + state.err = createDockerNetwork(context.Background(), info) + return vnic, event, info, state.err +} + +func destroyDockerVnicV2(vnicCfg *libsnnet.VnicConfig) (*libsnnet.SsntpEventInfo, error) { + // BUG(markus): We need to pass in a context to destroyVnic + + event, info, err := cnNet.DestroyVnicV2(vnicCfg) + if err != nil { + glog.Errorf("cn.DestroyVnic failed %v", err) + return event, err + } + + if info != nil { + destroyDockerNetwork(context.Background(), info.SubnetID) + dockerNetworkMap.Lock() + delete(dockerNetworkMap.networks, vnicCfg.SubnetID) + dockerNetworkMap.Unlock() + } + + return event, err +} + +func createDockerNetwork(ctx context.Context, info *libsnnet.ContainerInfo) error { + cli, err := getDockerClient() + if err != nil { + return err + } + + _, err = cli.NetworkCreate(ctx, types.NetworkCreate{ + Name: info.SubnetID, + Driver: "ciao", + IPAM: network.IPAM{ + Driver: "ciao", + Config: []network.IPAMConfig{{ + Subnet: info.Subnet.String(), + Gateway: info.Gateway.String(), + }}}, + Options: map[string]string{ + "bridge": info.Bridge, + }}) + + if err != nil { + glog.Errorf("Unable to create docker network %s: %v", info.SubnetID, err) + } + + return err +} + +func destroyDockerNetwork(ctx context.Context, bridge string) error { + cli, err := getDockerClient() + if err != nil { + return err + } + + err = cli.NetworkRemove(ctx, bridge) + if err != nil { + glog.Errorf("Unable to remove docker network %s: %v", bridge, err) + } + + return err +} diff --git a/ciao-launcher/image.go b/ciao-launcher/image.go new file mode 100644 index 000000000..77de42156 --- /dev/null +++ b/ciao-launcher/image.go @@ -0,0 +1,69 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "sync" + + "github.com/golang/glog" +) + +type imageStats struct { + done chan struct{} + minSizeMB int + err error +} + +var imagesMap struct { + sync.Mutex + images map[string]*imageStats +} + +func init() { + imagesMap.images = make(map[string]*imageStats) +} + +// Originally this was supposed to be a generic +// feature which could be used by any virtualisation technology. However, since +// we currently only support QEMU and docker and docker doesn't have a way to +// set disk quotas on the rootfs, this feature is only used by QEMU, hence the +// qemu parameter. In the future we might add imageInfo back to the virtualiser +// interface. + +func getMinImageSize(vm *qemu, imagePath string) (minSizeMB int, err error) { + imagesMap.Lock() + info := imagesMap.images[imagePath] + if info == nil { + info = &imageStats{ + done: make(chan struct{}), + minSizeMB: -1, + } + imagesMap.images[imagePath] = info + imagesMap.Unlock() + + info.minSizeMB, info.err = vm.imageInfo(imagePath) + + glog.Infof("Min image size of %s = %d", imagePath, info.minSizeMB) + close(info.done) + } else { + imagesMap.Unlock() + + <-info.done + } + + return info.minSizeMB, info.err +} diff --git a/ciao-launcher/instance.go b/ciao-launcher/instance.go new file mode 100644 index 000000000..0c783e147 --- /dev/null +++ b/ciao-launcher/instance.go @@ -0,0 +1,261 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "path" + "sync" + "time" + + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + + "github.com/golang/glog" +) + +type insStartCmd struct { + userData []byte + metaData []byte + frame *ssntp.Frame + cfg *vmConfig +} +type insRestartCmd struct{} +type insDeleteCmd struct { + suicide bool + running ovsRunningState +} +type insStopCmd struct{} +type insMonitorCmd struct{} + +/* +This functions asks the server loop to kill the instance. An instance +needs to request that the server loop kill it if Start fails completly. +As the serverLoop does not wait for the start command to complete, we wouldn't +want to do this, as it would mean all start commands execute in serial, +the serverLoop cannot detect this situation. Thus the instance loop needs +to request it's own death. + +The server loop is the only go routine that can kill the instance. If the +instance kills itself, the serverLoop would lockup if a command came in for +that instance while it was shutting down. The instance go routine cannot +send a command to the serverLoop directly as this could lead to deadlock. +So we must spawn a separate go routine to do this. We also need to handle +the case that this go routine blocks for ever if the serverLoop is quit +by CTRL-C. That's why we select on doneCh as well. In this case, +the command will never be written to the serverLoop, our go routine will +exit, the instance will exit and then finally the overseer will quit. + +There's always the possibility new commands will be received for the +instance while it is waiting to be killed. We'll just fail those. +*/ + +func killMe(instance string, doneCh chan struct{}, ac *agentClient, wg *sync.WaitGroup) { + wg.Add(1) + go func() { + cmd := &cmdWrapper{instance, &insDeleteCmd{suicide: true}} + select { + case ac.cmdCh <- cmd: + case <-doneCh: + } + wg.Done() + }() +} + +func instanceLoop(cmdCh chan interface{}, instance string, cfg *vmConfig, wg *sync.WaitGroup, doneCh chan struct{}, ac *agentClient, ovsCh chan<- interface{}) { + var instanceWg sync.WaitGroup + var monitorCh chan string + var connectedCh chan struct{} + var monitorCloseCh chan struct{} + var statsTimer <-chan time.Time + var vm virtualizer + + if simulate == true { + vm = &simulation{} + } else if cfg.Container { + vm = &docker{} + } else { + vm = &qemu{} + } + instanceDir := path.Join(instancesDir, instance) + vm.init(cfg, instanceDir) + shuttingDown := false + + d, m, c := vm.stats() + ovsCh <- &ovsStatsUpdateCmd{instance, m, d, c} + +DONE: + for { + select { + case <-doneCh: + break DONE + case <-statsTimer: + d, m, c := vm.stats() + ovsCh <- &ovsStatsUpdateCmd{instance, m, d, c} + statsTimer = time.After(time.Second * statsPeriod) + case cmd := <-cmdCh: + select { + case <-doneCh: + break DONE + default: + } + + switch cmd := cmd.(type) { + case *insStartCmd: + glog.Info("Found start command") + if monitorCh != nil { + startErr := &startError{nil, payloads.AlreadyRunning} + glog.Errorf("Unable to start instance[%s]", string(startErr.code)) + startErr.send(&ac.ssntpConn, instance) + continue + } + startErr := processStart(cmd, instanceDir, vm, &ac.ssntpConn) + if startErr != nil { + glog.Errorf("Unable to start instance[%s]: %v", string(startErr.code), startErr.err) + startErr.send(&ac.ssntpConn, instance) + + if startErr.code == payloads.LaunchFailure { + ovsCh <- &ovsStateChange{instance, ovsStopped} + } else if startErr.code != payloads.InstanceExists { + glog.Warningf("Unable to create VM instance: %s. Killing it", instance) + killMe(instance, doneCh, ac, &instanceWg) + shuttingDown = true + } + continue + } + + connectedCh = make(chan struct{}) + monitorCloseCh = make(chan struct{}) + monitorCh = vm.monitorVM(monitorCloseCh, connectedCh, &instanceWg, false) + ovsCh <- &ovsStatusCmd{} + if cmd.frame != nil && cmd.frame.PathTrace() { + ovsCh <- &ovsTraceFrame{cmd.frame} + } + case *insRestartCmd: + glog.Info("Found restart command") + + if shuttingDown { + restartErr := &restartError{nil, payloads.RestartNoInstance} + glog.Errorf("Unable to restart instance[%s]", string(restartErr.code)) + restartErr.send(&ac.ssntpConn, instance) + continue + } + + if monitorCh != nil { + restartErr := &restartError{nil, payloads.RestartAlreadyRunning} + glog.Errorf("Unable to restart instance[%s]", string(restartErr.code)) + restartErr.send(&ac.ssntpConn, instance) + continue + } + + restartErr := processRestart(instanceDir, vm, &ac.ssntpConn, cfg) + + if restartErr != nil { + glog.Errorf("Unable to restart instance[%s]: %v", string(restartErr.code), + restartErr.err) + restartErr.send(&ac.ssntpConn, instance) + continue + } + + connectedCh = make(chan struct{}) + monitorCloseCh = make(chan struct{}) + monitorCh = vm.monitorVM(monitorCloseCh, connectedCh, &instanceWg, false) + case *insMonitorCmd: + connectedCh = make(chan struct{}) + monitorCloseCh = make(chan struct{}) + monitorCh = vm.monitorVM(monitorCloseCh, connectedCh, &instanceWg, true) + case *insStopCmd: + + if shuttingDown { + stopErr := &stopError{nil, payloads.StopNoInstance} + glog.Errorf("Unable to stop instance[%s]", string(stopErr.code)) + stopErr.send(&ac.ssntpConn, instance) + continue + } + + if monitorCh == nil { + stopErr := &stopError{nil, payloads.StopAlreadyStopped} + glog.Errorf("Unable to stop instance[%s]", string(stopErr.code)) + stopErr.send(&ac.ssntpConn, instance) + continue + } + glog.Infof("Powerdown %s", instance) + monitorCh <- virtualizerStopCmd + case *insDeleteCmd: + + if shuttingDown && !cmd.suicide { + deleteErr := &deleteError{nil, payloads.DeleteNoInstance} + glog.Errorf("Unable to delete instance[%s]", string(deleteErr.code)) + deleteErr.send(&ac.ssntpConn, instance) + continue + } + + if monitorCh != nil { + glog.Infof("Powerdown %s before deleting", instance) + monitorCh <- virtualizerStopCmd + vm.lostVM() + } + + _ = processDelete(vm, instanceDir, &ac.ssntpConn, cmd.running) + + if !cmd.suicide { + ovsCh <- &ovsStatusCmd{} + } + + break DONE + default: + glog.Warning("Unknown command") + } + case <-monitorCloseCh: + // Means we've lost VM for now + vm.lostVM() + d, m, c := vm.stats() + ovsCh <- &ovsStatsUpdateCmd{instance, m, d, c} + + glog.Infof("Lost VM instance: %s", instance) + monitorCloseCh = nil + connectedCh = nil + close(monitorCh) + monitorCh = nil + statsTimer = nil + ovsCh <- &ovsStateChange{instance, ovsStopped} + case <-connectedCh: + connectedCh = nil + vm.connected() + ovsCh <- &ovsStateChange{instance, ovsRunning} + d, m, c := vm.stats() + ovsCh <- &ovsStatsUpdateCmd{instance, m, d, c} + statsTimer = time.After(time.Second * statsPeriod) + } + } + + if monitorCh != nil { + close(monitorCh) + } + + glog.Infof("Instance goroutine %s waiting for monitor to exit", instance) + instanceWg.Wait() + glog.Infof("Instance goroutine %s exitted", instance) + wg.Done() +} + +func startInstance(instance string, cfg *vmConfig, wg *sync.WaitGroup, doneCh chan struct{}, + ac *agentClient, ovsCh chan<- interface{}) chan<- interface{} { + cmdCh := make(chan interface{}) + wg.Add(1) + go instanceLoop(cmdCh, instance, cfg, wg, doneCh, ac, ovsCh) + return cmdCh +} diff --git a/ciao-launcher/main.go b/ciao-launcher/main.go new file mode 100644 index 000000000..0f0283793 --- /dev/null +++ b/ciao-launcher/main.go @@ -0,0 +1,635 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "log" + "math" + "os" + "os/signal" + "path" + "path/filepath" + "sync" + "syscall" + "time" + + "golang.org/x/net/context" + + "github.com/golang/glog" + + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" +) + +var profileFN func() func() + +type networkFlag string + +func (f *networkFlag) String() string { + return string(*f) +} + +func (f *networkFlag) Set(val string) error { + if val != "none" && val != "cn" && val != "nn" { + return fmt.Errorf("none, cn or nn expected") + } + *f = networkFlag(val) + + return nil +} + +func (f *networkFlag) Enabled() bool { + return string(*f) != "none" +} + +func (f *networkFlag) NetworkNode() bool { + return string(*f) == "nn" +} + +type uiFlag string + +func (f *uiFlag) String() string { + return string(*f) +} + +func (f *uiFlag) Set(val string) error { + if val != "none" && val != "nc" && val != "spice" { + return fmt.Errorf("none, nc or spice expected") + } + *f = uiFlag(val) + + return nil +} + +func (f *uiFlag) Enabled() bool { + return string(*f) != "none" +} + +var serverURL string +var serverCertPath string +var clientCertPath string +var computeNet string +var mgmtNet string +var networking networkFlag = "none" +var hardReset bool +var diskLimit bool +var memLimit bool +var simulate bool +var maxInstances = int(math.MaxInt32) + +func init() { + flag.StringVar(&serverURL, "server", "localhost", "URL of SSNTP server") + flag.StringVar(&serverCertPath, "cacert", "/var/lib/ciao/CAcert-server-localhost.pem", "Client certificate") + flag.StringVar(&clientCertPath, "cert", "/var/lib/ciao/cert-client-localhost.pem", "CA certificate") + flag.StringVar(&computeNet, "compute-net", "", "Compute Subnet") + flag.StringVar(&mgmtNet, "mgmt-net", "", "Management Subnet") + flag.Var(&networking, "network", "Can be none, cn (compute node) or nn (network node)") + flag.BoolVar(&hardReset, "hard-reset", false, "Kill and delete all instances, reset networking and exit") + flag.BoolVar(&diskLimit, "disk-limit", true, "Use disk usage limits") + flag.BoolVar(&memLimit, "mem-limit", true, "Use memory usage limits") + flag.BoolVar(&simulate, "simulation", false, "Launcher simulation") +} + +const ( + lockDir = "/tmp/lock/ciao" + instancesDir = "/var/lib/ciao/instances" + logDir = "/var/lib/ciao/logs/launcher" + instanceState = "state" + lockFile = "client-agent.lock" + statsPeriod = 30 +) + +type cmdWrapper struct { + instance string + cmd interface{} +} +type statusCmd struct{} + +type ssntpConn struct { + sync.RWMutex + ssntp.Client + connected bool +} + +func (s *ssntpConn) isConnected() bool { + s.RLock() + defer s.RUnlock() + return s.connected +} + +func (s *ssntpConn) setStatus(status bool) { + s.Lock() + s.connected = status + s.Unlock() +} + +type agentClient struct { + ssntpConn + cmdCh chan *cmdWrapper +} + +func (client *agentClient) DisconnectNotify() { + client.setStatus(false) + glog.Warning("disconnected") +} + +func (client *agentClient) ConnectNotify() { + client.setStatus(true) + client.cmdCh <- &cmdWrapper{"", &statusCmd{}} + glog.Info("connected") +} + +func (client *agentClient) StatusNotify(status ssntp.Status, frame *ssntp.Frame) { + glog.Infof("STATUS %s", status) +} + +func (client *agentClient) CommandNotify(cmd ssntp.Command, frame *ssntp.Frame) { + payload := frame.Payload + + switch cmd { + case ssntp.START: + go func() { + start, cn, md := splitYaml(payload) + cfg, payloadErr := parseStartPayload(start) + if payloadErr != nil { + startError := &startError{ + payloadErr.err, + payloads.StartFailureReason(payloadErr.code), + } + startError.send(&client.ssntpConn, "") + glog.Errorf("Unable to parse YAML: %v", payloadErr.err) + return + } + client.cmdCh <- &cmdWrapper{cfg.Instance, &insStartCmd{cn, md, frame, cfg}} + }() + case ssntp.RESTART: + go func() { + instance, payloadErr := parseRestartPayload(payload) + if payloadErr != nil { + restartError := &restartError{ + payloadErr.err, + payloads.RestartFailureReason(payloadErr.code), + } + restartError.send(&client.ssntpConn, "") + glog.Errorf("Unable to parse YAML: %v", payloadErr.err) + return + } + client.cmdCh <- &cmdWrapper{instance, &insRestartCmd{}} + }() + case ssntp.STOP: + go func() { + instance, payloadErr := parseStopPayload(payload) + if payloadErr != nil { + stopError := &stopError{ + payloadErr.err, + payloads.StopFailureReason(payloadErr.code), + } + stopError.send(&client.ssntpConn, "") + glog.Errorf("Unable to parse YAML: %s", payloadErr) + return + } + client.cmdCh <- &cmdWrapper{instance, &insStopCmd{}} + }() + case ssntp.DELETE: + go func() { + instance, payloadErr := parseDeletePayload(payload) + if payloadErr != nil { + deleteError := &deleteError{ + payloadErr.err, + payloads.DeleteFailureReason(payloadErr.code), + } + deleteError.send(&client.ssntpConn, "") + glog.Errorf("Unable to parse YAML: %s", payloadErr.err) + return + } + client.cmdCh <- &cmdWrapper{instance, &insDeleteCmd{}} + }() + } +} + +func (client *agentClient) EventNotify(event ssntp.Event, frame *ssntp.Frame) { + glog.Infof("EVENT %s", event) +} + +func (client *agentClient) ErrorNotify(err ssntp.Error, frame *ssntp.Frame) { + glog.Infof("ERROR %d", err) +} + +func insCmdChannel(instance string, ovsCh chan<- interface{}) chan<- interface{} { + targetCh := make(chan ovsGetResult) + ovsCh <- &ovsGetCmd{instance, targetCh} + target := <-targetCh + return target.cmdCh +} + +func insState(instance string, ovsCh chan<- interface{}) ovsGetResult { + targetCh := make(chan ovsGetResult) + ovsCh <- &ovsGetCmd{instance, targetCh} + return <-targetCh +} + +func processCommand(client *ssntpConn, cmd *cmdWrapper, ovsCh chan<- interface{}) { + var target chan<- interface{} + var delCmd *insDeleteCmd + + switch insCmd := cmd.cmd.(type) { + case *statusCmd: + ovsCh <- &ovsStatsStatusCmd{} + return + case *insStartCmd: + targetCh := make(chan ovsAddResult) + ovsCh <- &ovsAddCmd{cmd.instance, insCmd.cfg, targetCh} + addResult := <-targetCh + if !addResult.canAdd { + glog.Errorf("Instance will make node full: Disk %d Mem %d CPUs %d", + insCmd.cfg.Disk, insCmd.cfg.Mem, insCmd.cfg.Cpus) + se := startError{nil, payloads.FullComputeNode} + se.send(client, cmd.instance) + return + } + target = addResult.cmdCh + case *insDeleteCmd: + insState := insState(cmd.instance, ovsCh) + target = insState.cmdCh + if target == nil { + glog.Errorf("Instance %s does not exist", cmd.instance) + de := deleteError{nil, payloads.DeleteNoInstance} + de.send(client, cmd.instance) + return + } + delCmd = insCmd + delCmd.running = insState.running + case *insStopCmd: + target = insCmdChannel(cmd.instance, ovsCh) + if target == nil { + glog.Errorf("Instance %s does not exist", cmd.instance) + se := stopError{nil, payloads.StopNoInstance} + se.send(client, cmd.instance) + return + } + case *insRestartCmd: + target = insCmdChannel(cmd.instance, ovsCh) + if target == nil { + glog.Errorf("Instance %s does not exist", cmd.instance) + re := restartError{nil, payloads.RestartNoInstance} + re.send(client, cmd.instance) + return + } + default: + target = insCmdChannel(cmd.instance, ovsCh) + } + + if target == nil { + glog.Errorf("Instance %s does not exist", cmd.instance) + return + } + + target <- cmd.cmd + + if delCmd != nil { + errCh := make(chan error) + ovsCh <- &ovsRemoveCmd{ + cmd.instance, + delCmd.suicide, + errCh} + <-errCh + } +} + +func connectToServer(doneCh chan struct{}, statusCh chan struct{}) { + + defer func() { + statusCh <- struct{}{} + }() + + var wg sync.WaitGroup + + var role uint32 + if networking.NetworkNode() { + role = uint32(ssntp.NETAGENT) + } else { + role = uint32(ssntp.AGENT) + } + + cfg := &ssntp.Config{URI: serverURL, CAcert: serverCertPath, Cert: clientCertPath, + Role: uint32(role), Log: ssntp.Log} + client := &agentClient{ + cmdCh: make(chan *cmdWrapper), + } + + ovsCh := startOverseer(&wg, client) + + dialCh := make(chan error) + + go func() { + err := client.Dial(cfg, client) + if err != nil { + glog.Errorf("Unable to connect to server %v", err) + dialCh <- err + return + } + + dialCh <- err + }() + + dialing := true + +DONE: + for { + select { + case err := <-dialCh: + dialing = false + if err != nil { + break DONE + } + case <-doneCh: + client.Close() + if !dialing { + break DONE + } + case cmd := <-client.cmdCh: + /* + Double check we're not quitting here. Otherwise a flood of commands + from the server could block our exit for an arbitrary amount of time, + i.e, doneCh and cmdCh could become available at the same time. + */ + select { + case <-doneCh: + client.Close() + break DONE + default: + } + + processCommand(&client.ssntpConn, cmd, ovsCh) + } + } + + close(ovsCh) + wg.Wait() + glog.Info("Overseer has closed down") +} + +func getLock() error { + err := os.MkdirAll(lockDir, 0777) + if err != nil { + glog.Errorf("Unable to create lockdir %s", lockDir) + return err + } + + /* We're going to let the OS close and unlock this fd */ + lockPath := path.Join(lockDir, lockFile) + fd, err := syscall.Open(lockPath, syscall.O_CREAT, syscall.S_IWUSR|syscall.S_IRUSR) + if err != nil { + glog.Errorf("Unable to open lock file %v", err) + return err + } + + syscall.CloseOnExec(fd) + + if syscall.Flock(fd, syscall.LOCK_EX|syscall.LOCK_NB) != nil { + glog.Error("Launcher is already running. Exitting.") + return fmt.Errorf("Unable to lock file %s", lockPath) + } + + return nil +} + +/* Must be called after flag.Parse() */ +func initLogger() error { + logDirFlag := flag.Lookup("log_dir") + if logDirFlag == nil { + return fmt.Errorf("log_dir does not exist") + } + + if logDirFlag.Value.String() == "" { + logDirFlag.Value.Set(logDir) + } + + if err := os.MkdirAll(logDirFlag.Value.String(), 0755); err != nil { + return fmt.Errorf("Unable to create log directory (%s) %v", logDir, err) + } + + return nil +} + +func createMandatoryDirs() error { + if err := os.MkdirAll(instancesDir, 0755); err != nil { + return fmt.Errorf("Unable to create instances directory (%s) %v", + instancesDir, err) + } + + return nil +} + +func purgeLauncherState() { + + glog.Info("======= HARD RESET ======") + + glog.Info("Shutting down running instances") + + toRemove := make([]string, 0, 1024) + dockerBridges := map[string]struct{}{} + networking := false + dockerNetworking := false + + glog.Info("Init networking") + + if err := initNetworkPhase1(); err != nil { + glog.Warningf("Failed to init network: %v\n", err) + } else { + networking = true + defer shutdownNetwork() + if err := initDockerNetworking(context.Background()); err != nil { + glog.Info("Unable to initialise docker networking") + } else { + dockerNetworking = true + } + } + + filepath.Walk(instancesDir, func(path string, info os.FileInfo, err error) error { + if path == instancesDir { + return nil + } + + if !info.IsDir() { + return nil + } + + cfg, err := loadVMConfig(path) + if err != nil { + glog.Warningf("Unable to load config for %s: %v", path, err) + } else { + if cfg.Container { + for _, b := range dockerKillInstance(path) { + dockerBridges[b] = struct{}{} + } + } else { + qemuKillInstance(path) + } + } + toRemove = append(toRemove, path) + return nil + }) + + for _, p := range toRemove { + err := os.RemoveAll(p) + if err != nil { + glog.Warningf("Unable to remove instance dir for %s: %v", p, err) + } + } + + if dockerNetworking { + + glog.Info("Reset docker networking") + + for b := range dockerBridges { + glog.Infof("Deleting docker network %s", b) + if err := destroyDockerNetwork(context.Background(), b); err != nil { + glog.Warningf("Unable to delete docker bridge %s", b) + } + } + } + + if !networking { + return + } + + glog.Info("Reset networking") + + err := cnNet.ResetNetwork() + if err != nil { + glog.Warningf("Unable to reset network: %v", err) + } +} + +func setLimits() { + var rlim syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim) + if err != nil { + glog.Warningf("Getrlimit failed %v", err) + return + } + + glog.Infof("Initial nofile limits: cur %d max %d", rlim.Cur, rlim.Max) + + if rlim.Cur < rlim.Max { + oldCur := rlim.Cur + rlim.Cur = rlim.Max + err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlim) + if err != nil { + glog.Warningf("Setrlimit failed %v", err) + rlim.Cur = oldCur + } + } + + glog.Infof("Updated nofile limits: cur %d max %d", rlim.Cur, rlim.Max) + + maxInstances = int(rlim.Cur / 5) +} + +func main() { + + if getLock() != nil { + os.Exit(1) + } + + flag.Parse() + + if err := initLogger(); err != nil { + log.Fatalf("Unable to initialise logs: %v", err) + } + + if profileFN != nil { + stopProfile := profileFN() + if stopProfile != nil { + defer stopProfile() + } + } + + defer func() { + glog.Flush() + glog.Info("Exit") + }() + + glog.Info("Starting Launcher") + + if hardReset { + purgeLauncherState() + os.Exit(0) + } + + setLimits() + + glog.Infof("Launcher will allow a maximum of %d instances", maxInstances) + + if err := createMandatoryDirs(); err != nil { + glog.Fatalf("Unable to create mandatory dirs: %v", err) + } + + doneCh := make(chan struct{}) + statusCh := make(chan struct{}) + signalCh := make(chan os.Signal, 1) + timeoutCh := make(chan struct{}) + signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) + + if networking.Enabled() { + ctx, cancelFunc := context.WithCancel(context.Background()) + ch := initNetworking(ctx) + select { + case <-signalCh: + glog.Info("Received terminating signal. Quitting") + cancelFunc() + os.Exit(1) + case err := <-ch: + if err != nil { + glog.Errorf("Failed to init network: %v\n", err) + os.Exit(1) + } + } + } + + go connectToServer(doneCh, statusCh) + +DONE: + for { + select { + case <-signalCh: + glog.Info("Received terminating signal. Waiting for server loop to quit") + close(doneCh) + go func() { + time.Sleep(time.Second) + timeoutCh <- struct{}{} + }() + case <-statusCh: + glog.Info("Server Loop quit cleanly") + break DONE + case <-timeoutCh: + glog.Warning("Server Loop did not exit within 1 second quitting") + glog.Flush() + + /* We panic here to see which naughty go routines are still running. */ + + panic("Server Loop did not exit within 1 second quitting") + } + } + + if networking.Enabled() { + shutdownNetwork() + } +} diff --git a/ciao-launcher/network.go b/ciao-launcher/network.go new file mode 100644 index 000000000..56a476fd0 --- /dev/null +++ b/ciao-launcher/network.go @@ -0,0 +1,333 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "encoding/binary" + "fmt" + "net" + "os" + + "golang.org/x/net/context" + + "github.com/golang/glog" + + "github.com/01org/ciao/networking/libsnnet" + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" +) + +var cnNet *libsnnet.ComputeNode +var hostname string +var nicInfo []*payloads.NetworkStat +var dockerNet *libsnnet.DockerPlugin + +func initNetworkPhase1() error { + + cn := &libsnnet.ComputeNode{} + var mnetList []net.IPNet + var cnetList []net.IPNet + + if computeNet != "" { + _, cnet, _ := net.ParseCIDR(computeNet) + if cnet == nil { + return fmt.Errorf("Unable to Parse CIDR :" + computeNet) + } + cnetList = []net.IPNet{*cnet} + } + + if mgmtNet != "" { + _, mnet, _ := net.ParseCIDR(mgmtNet) + if mnet == nil { + return fmt.Errorf("Unable to Parse CIDR :" + mgmtNet) + } + mnetList = []net.IPNet{*mnet} + } + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: mnetList, + ComputeNet: cnetList, + Mode: libsnnet.GreTunnel, + } + + if err := cn.Init(); err != nil { + return err + } + + cnNet = cn + + return nil +} + +func initDockerNetworking(ctx context.Context) error { + err := checkDockerServerVersion("1.10.0", ctx) + if err != nil { + glog.Warningf("Docker Version check failed: %v", err) + return err + } + + dockerPlugin := libsnnet.NewDockerPlugin() + if err := dockerPlugin.Init(); err != nil { + glog.Warningf("Docker Init failed: %v", err) + return err + } + + if err := dockerPlugin.Start(); err != nil { + dockerPlugin.Close() + glog.Warningf("Docker start failed: %v ", err) + return err + } + + dockerNet = dockerPlugin + + return nil +} + +func shutdownNetwork() { + if dockerNet == nil { + return + } + + if err := dockerNet.Stop(); err != nil { + glog.Warningf("Docker stop failed: %v", err) + } + + if err := dockerNet.Close(); err != nil { + glog.Warningf("Docker close failed: %v", err) + } + + glog.Infof("Docker networking shutdown successfully") +} + +func initNetwork(ctx context.Context) error { + + if err := initNetworkPhase1(); err != nil { + return err + } + + if err := initDockerNetworking(ctx); err != nil { + glog.Warning("Unable to initialise docker networking") + } + + if err := cnNet.DbRebuild(nil); err != nil { + return err + } + + limit := len(cnNet.ComputeAddr) + if len(cnNet.ComputeLink) < limit { + limit = len(cnNet.ComputeLink) + } + + for i := 0; i < limit; i++ { + nicInfo = append(nicInfo, &payloads.NetworkStat{ + NodeIP: cnNet.ComputeAddr[i].IP.String(), + NodeMAC: cnNet.ComputeLink[i].Attrs().HardwareAddr.String(), + }) + glog.Infof("Network card %d Info", i) + glog.Infof(" IP address of node is %s", nicInfo[i].NodeIP) + glog.Infof(" MAC address of node is %s", nicInfo[i].NodeMAC) + } + + if len(nicInfo) == 0 { + glog.Warning("Unable to determine IP address. Should not happen") + } + + var err error + hostname, err = os.Hostname() + if err == nil { + glog.Infof("Hostname of node is %s", hostname) + } else { + glog.Warning("Unable to determine hostname %s", err) + } + + return nil +} + +func initNetworking(ctx context.Context) chan error { + ch := make(chan error) + go func() { + err := initNetwork(ctx) + ch <- err + }() + return ch +} + +func createCNVnicCfg(cfg *vmConfig) (*libsnnet.VnicConfig, error) { + + glog.Info("Creating CN Vnic CFG") + + mac, err := net.ParseMAC(cfg.VnicMAC) + if err != nil { + return nil, fmt.Errorf("Invalid mac address %v", err) + } + + _, vnet, err := net.ParseCIDR(cfg.SubnetIP) + if err != nil { + return nil, fmt.Errorf("Invalid vnic subnet %v", err) + } + + concIP := net.ParseIP(cfg.ConcIP) + if concIP == nil { + return nil, fmt.Errorf("Invalid concentrator ip %s", cfg.ConcIP) + } + + vnicIP := net.ParseIP(cfg.VnicIP) + if vnicIP == nil { + return nil, fmt.Errorf("Invalid vnicIP ip %s", cfg.VnicIP) + } + + subnetKey := binary.LittleEndian.Uint32(vnet.IP) + var role libsnnet.VnicRole + if cfg.Container { + role = libsnnet.TenantContainer + } else { + role = libsnnet.TenantVM + } + + return &libsnnet.VnicConfig{ + VnicRole: role, + VnicIP: vnicIP, + ConcIP: concIP, + VnicMAC: mac, + Subnet: *vnet, + SubnetKey: int(subnetKey), + VnicID: cfg.VnicUUID, + InstanceID: cfg.Instance, + TenantID: cfg.TennantUUID, + SubnetID: cfg.SubnetIP, + ConcID: cfg.ConcUUID}, nil +} + +func createCNCIVnicCfg(cfg *vmConfig) (*libsnnet.VnicConfig, error) { + + glog.Info("Creating CNCI Vnic CFG") + + mac, err := net.ParseMAC(cfg.VnicMAC) + if err != nil { + return nil, fmt.Errorf("Invalid mac address %v", err) + } + + return &libsnnet.VnicConfig{ + VnicRole: libsnnet.DataCenter, + VnicMAC: mac, + VnicID: cfg.VnicUUID, + InstanceID: cfg.Instance, + TenantID: cfg.TennantUUID}, nil +} + +func createVnicCfg(cfg *vmConfig) (*libsnnet.VnicConfig, error) { + if cfg.NetworkNode { + return createCNCIVnicCfg(cfg) + } + + return createCNVnicCfg(cfg) +} + +func sendNetworkEvent(client *ssntpConn, eventType ssntp.Event, + event *libsnnet.SsntpEventInfo) { + + if event == nil || !client.isConnected() { + return + } + + payload, err := generateNetEventPayload(event, client.UUID()) + if err != nil { + glog.Warningf("Unable parse ssntpEvent %s", err) + return + } + + _, err = client.SendEvent(eventType, payload) + if err != nil { + glog.Warningf("Unable to send %s", event) + } +} + +func createVnic(client *ssntpConn, vnicCfg *libsnnet.VnicConfig) (string, string, error) { + var name string + var bridge string + + //BUG(markus): This function needs a context parameter + + if vnicCfg.VnicRole != libsnnet.DataCenter { + var vnic *libsnnet.Vnic + var event *libsnnet.SsntpEventInfo + var info *libsnnet.ContainerInfo + var err error + if vnicCfg.VnicRole == libsnnet.TenantContainer { + vnic, event, info, err = createDockerVnicV2(vnicCfg) + bridge = info.SubnetID + } else { + vnic, event, info, err = cnNet.CreateVnicV2(vnicCfg) + } + if err != nil { + glog.Errorf("cn.CreateVnic failed %v", err) + return "", "", err + } + sendNetworkEvent(client, ssntp.TenantAdded, event) + name = vnic.LinkName + glog.Infoln("CN VNIC created =", name, info, event) + } else { + vnic, err := cnNet.CreateCnciVnic(vnicCfg) + if err != nil { + glog.Errorf("cn.CreateCnciVnic failed %v", err) + return "", "", err + } + name = vnic.LinkName + glog.Infoln("CNCI VNIC created =", name) + } + + return name, bridge, nil +} + +func destroyVnic(client *ssntpConn, vnicCfg *libsnnet.VnicConfig) error { + if vnicCfg.VnicRole != libsnnet.DataCenter { + var event *libsnnet.SsntpEventInfo + var err error + + if vnicCfg.VnicRole == libsnnet.TenantContainer { + event, _, err = cnNet.DestroyVnicV2(vnicCfg) + } else { + event, err = destroyDockerVnicV2(vnicCfg) + } + if err != nil { + glog.Errorf("cn.DestroyVnic failed %v", err) + return err + } + + sendNetworkEvent(client, ssntp.TenantRemoved, event) + + glog.Infoln("CN VNIC Destroyed =", vnicCfg.VnicIP, event) + } else { + err := cnNet.DestroyCnciVnic(vnicCfg) + if err != nil { + glog.Errorf("cn.DestroyCnciVnic failed %v", err) + return err + } + + glog.Infoln("CNCI VNIC Destroyed =", vnicCfg.VnicIP) + } + + return nil +} + +func getNodeIPAddress() string { + if len(nicInfo) == 0 { + return "127.0.0.1" + } + + return nicInfo[0].NodeIP +} diff --git a/ciao-launcher/overseer.go b/ciao-launcher/overseer.go new file mode 100644 index 000000000..878b1a671 --- /dev/null +++ b/ciao-launcher/overseer.go @@ -0,0 +1,730 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "bufio" + "container/list" + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "sync" + "syscall" + "time" + + "github.com/golang/glog" + + "gopkg.in/yaml.v2" + + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" +) + +type ovsAddResult struct { + cmdCh chan<- interface{} + canAdd bool +} + +type ovsAddCmd struct { + instance string + cfg *vmConfig + targetCh chan<- ovsAddResult +} + +type ovsGetResult struct { + cmdCh chan<- interface{} + running ovsRunningState +} + +type ovsGetCmd struct { + instance string + targetCh chan<- ovsGetResult +} + +type ovsRemoveCmd struct { + instance string + suicide bool + errCh chan<- error +} + +type ovsStateChange struct { + instance string + state ovsRunningState +} + +type ovsStatsUpdateCmd struct { + instance string + memoryUsageMB int + diskUsageMB int + CPUUsage int +} + +type ovsTraceFrame struct { + frame *ssntp.Frame +} + +type ovsStatusCmd struct{} +type ovsStatsStatusCmd struct{} + +type ovsRunningState int + +const ( + ovsPending ovsRunningState = iota + ovsRunning + ovsStopped +) + +const ( + diskSpaceHWM = 80 * 1000 + memHWM = 1 * 1000 + diskSpaceLWM = 40 * 1000 + memLWM = 512 +) + +type ovsInstanceState struct { + cmdCh chan<- interface{} + running ovsRunningState + memoryUsageMB int + diskUsageMB int + CPUUsage int + maxDiskUsageMB int + maxVCPUs int + maxMemoryMB int + sshIP string + sshPort int +} + +type overseer struct { + instances map[string]*ovsInstanceState + ovsCh chan interface{} + childDoneCh chan struct{} + parentWg *sync.WaitGroup + childWg *sync.WaitGroup + ac *agentClient + vcpusAllocated int + diskSpaceAllocated int + memoryAllocated int + diskSpaceAvailable int + memoryAvailable int + traceFrames *list.List +} + +type cnStats struct { + totalMemMB int + availableMemMB int + totalDiskMB int + availableDiskMB int + load int + cpusOnline int +} + +var memTotalRegexp *regexp.Regexp +var memFreeRegexp *regexp.Regexp +var memActiveFileRegexp *regexp.Regexp +var memInactiveFileRegexp *regexp.Regexp +var cpuStatsRegexp *regexp.Regexp + +func init() { + memTotalRegexp = regexp.MustCompile(`MemTotal:\s+(\d+)`) + memFreeRegexp = regexp.MustCompile(`MemFree:\s+(\d+)`) + memActiveFileRegexp = regexp.MustCompile(`Active\(file\):\s+(\d+)`) + memInactiveFileRegexp = regexp.MustCompile(`Inactive\(file\):\s+(\d+)`) + cpuStatsRegexp = regexp.MustCompile(`^cpu[0-9]+.*$`) +} + +func grabInt(re *regexp.Regexp, line string, val *int) bool { + matches := re.FindStringSubmatch(line) + if matches != nil { + parsedNum, err := strconv.Atoi(matches[1]) + if err == nil { + *val = parsedNum + return true + } + } + return false +} + +func getMemoryInfo() (total, available int) { + + total = -1 + available = -1 + free := -1 + active := -1 + inactive := -1 + + file, err := os.Open("/proc/meminfo") + if err != nil { + return + } + defer func() { + _ = file.Close() + }() + + scanner := bufio.NewScanner(file) + for scanner.Scan() && (total == -1 || free == -1 || active == -1 || + inactive == -1) { + line := scanner.Text() + for _, i := range []struct { + v *int + r *regexp.Regexp + }{ + {&free, memFreeRegexp}, + {&total, memTotalRegexp}, + {&active, memActiveFileRegexp}, + {&inactive, memInactiveFileRegexp}, + } { + if *i.v == -1 { + if grabInt(i.r, line, i.v) { + break + } + } + } + } + + if free != -1 && active != -1 && inactive != -1 { + available = (free + active + inactive) / 1024 + } + + if total != -1 { + total = total / 1024 + } + + return +} + +func getOnlineCPUs() int { + + file, err := os.Open("/proc/stat") + if err != nil { + return -1 + } + defer func() { + _ = file.Close() + }() + + scanner := bufio.NewScanner(file) + if !scanner.Scan() { + return -1 + } + + cpusOnline := 0 + for scanner.Scan() && cpuStatsRegexp.MatchString(scanner.Text()) { + cpusOnline++ + } + + if cpusOnline == 0 { + return -1 + } + + return cpusOnline +} + +func getFSInfo() (total, available int) { + + total = -1 + available = -1 + var buf syscall.Statfs_t + + if syscall.Statfs(instancesDir, &buf) != nil { + return + } + + if buf.Bsize <= 0 { + return + } + + total = int((uint64(buf.Bsize) * buf.Blocks) / (1000 * 1000)) + available = int((uint64(buf.Bsize) * buf.Bavail) / (1000 * 1000)) + + return +} + +func getLoadAvg() int { + file, err := os.Open("/proc/loadavg") + if err != nil { + return -1 + } + defer func() { + _ = file.Close() + }() + + scanner := bufio.NewScanner(file) + scanner.Split(bufio.ScanWords) + if !scanner.Scan() { + return -1 + } + + loadFloat, err := strconv.ParseFloat(scanner.Text(), 64) + if err != nil { + return -1 + } + + return int(loadFloat) +} + +func (ovs *overseer) roomAvailable(cfg *vmConfig) bool { + + if len(ovs.instances) >= maxInstances { + glog.Warningf("We're FULL. Too many instances %d", len(ovs.instances)) + return false + } + + diskSpaceAvailable := ovs.diskSpaceAvailable - cfg.Disk + memoryAvailable := ovs.memoryAvailable - cfg.Mem + + glog.Infof("disk Avail %d MemAvail %d", diskSpaceAvailable, memoryAvailable) + + if diskSpaceAvailable < diskSpaceLWM { + if diskLimit == true { + return false + } + } + + if memoryAvailable < memLWM { + if memLimit == true { + return false + } + } + + return true +} + +func (ovs *overseer) updateAvailableResources(cns *cnStats) { + diskSpaceConsumed := 0 + memConsumed := 0 + for _, target := range ovs.instances { + if target.diskUsageMB != -1 { + diskSpaceConsumed += target.diskUsageMB + } + + if target.memoryUsageMB != -1 { + if target.memoryUsageMB < target.maxMemoryMB { + memConsumed += target.memoryUsageMB + } else { + memConsumed += target.maxMemoryMB + } + } + } + + ovs.diskSpaceAvailable = (cns.availableDiskMB + diskSpaceConsumed) - + ovs.diskSpaceAllocated + + ovs.memoryAvailable = (cns.availableMemMB + memConsumed) - + ovs.memoryAllocated + + if glog.V(1) { + glog.Infof("Memory Available: %d Disk space Available %d", + ovs.memoryAvailable, ovs.diskSpaceAvailable) + } +} + +func (ovs *overseer) computeStatus() ssntp.Status { + + if len(ovs.instances) >= maxInstances { + return ssntp.FULL + } + + if ovs.diskSpaceAvailable < diskSpaceHWM { + if diskLimit == true { + return ssntp.FULL + } + } + + if ovs.memoryAvailable < memHWM { + if memLimit == true { + return ssntp.FULL + } + } + + return ssntp.READY +} + +func (ovs *overseer) sendStatusCommand(cns *cnStats, status ssntp.Status) { + var s payloads.Ready + + s.Init() + + s.NodeUUID = ovs.ac.ssntpConn.UUID() + s.MemTotalMB, s.MemAvailableMB = cns.totalMemMB, cns.availableMemMB + s.Load = cns.load + s.CpusOnline = cns.cpusOnline + s.DiskTotalMB, s.DiskAvailableMB = cns.totalDiskMB, cns.availableDiskMB + + payload, err := yaml.Marshal(&s) + if err != nil { + glog.Errorf("Unable to Marshall Status %v", err) + return + } + + _, err = ovs.ac.ssntpConn.SendStatus(status, payload) + if err != nil { + glog.Errorf("Failed to send status command %v", err) + return + } +} + +func (ovs *overseer) sendStats(cns *cnStats, status ssntp.Status) { + var s payloads.Stat + + s.Init() + + s.NodeUUID = ovs.ac.ssntpConn.UUID() + s.Status = status.String() + s.MemTotalMB, s.MemAvailableMB = cns.totalMemMB, cns.availableMemMB + s.Load = cns.load + s.CpusOnline = cns.cpusOnline + s.DiskTotalMB, s.DiskAvailableMB = cns.totalDiskMB, cns.availableDiskMB + s.NodeHostName = hostname // global from network.go + s.Networks = make([]payloads.NetworkStat, len(nicInfo)) + for i, nic := range nicInfo { + s.Networks[i] = *nic + } + s.Instances = make([]payloads.InstanceStat, len(ovs.instances)) + i := 0 + for uuid, state := range ovs.instances { + s.Instances[i].InstanceUUID = uuid + if state.running == ovsRunning { + s.Instances[i].State = payloads.Running + } else if state.running == ovsStopped { + s.Instances[i].State = payloads.Exited + } else { + s.Instances[i].State = payloads.Pending + } + s.Instances[i].MemoryUsageMB = state.memoryUsageMB + s.Instances[i].DiskUsageMB = state.diskUsageMB + s.Instances[i].CPUUsage = state.CPUUsage + s.Instances[i].SSHIP = state.sshIP + s.Instances[i].SSHPort = state.sshPort + i++ + } + + payload, err := yaml.Marshal(&s) + if err != nil { + glog.Errorf("Unable to Marshall STATS %v", err) + return + } + + _, err = ovs.ac.ssntpConn.SendCommand(ssntp.STATS, payload) + if err != nil { + glog.Errorf("Failed to send stats command %v", err) + return + } +} + +func (ovs *overseer) sendTraceReport() { + var s payloads.Trace + + if ovs.traceFrames.Len() == 0 { + return + } + + for e := ovs.traceFrames.Front(); e != nil; e = e.Next() { + f := e.Value.(*ssntp.Frame) + frameTrace, err := f.DumpTrace() + if err != nil { + glog.Errorf("Unable to dump traced frame %v", err) + continue + } + + s.Frames = append(s.Frames, *frameTrace) + } + + ovs.traceFrames = list.New() + + payload, err := yaml.Marshal(&s) + if err != nil { + glog.Errorf("Unable to Marshall TraceReport %v", err) + return + } + + _, err = ovs.ac.ssntpConn.SendEvent(ssntp.TraceReport, payload) + if err != nil { + glog.Errorf("Failed to send TraceReport event %v", err) + return + } +} + +func getStats() *cnStats { + var s cnStats + + s.totalMemMB, s.availableMemMB = getMemoryInfo() + s.load = getLoadAvg() + s.cpusOnline = getOnlineCPUs() + s.totalDiskMB, s.availableDiskMB = getFSInfo() + + return &s +} + +func (ovs *overseer) sendInstanceDeletedEvent(instance string) { + var event payloads.EventInstanceDeleted + + event.InstanceDeleted.InstanceUUID = instance + + payload, err := yaml.Marshal(&event) + if err != nil { + glog.Errorf("Unable to Marshall STATS %v", err) + return + } + + _, err = ovs.ac.ssntpConn.SendEvent(ssntp.InstanceDeleted, payload) + if err != nil { + glog.Errorf("Failed to send event command %v", err) + return + } +} + +func (ovs *overseer) processCommand(cmd interface{}) { + switch cmd := cmd.(type) { + case *ovsGetCmd: + glog.Infof("Overseer: looking for instance %s", cmd.instance) + var insState ovsGetResult + target := ovs.instances[cmd.instance] + if target != nil { + insState.cmdCh = target.cmdCh + insState.running = target.running + } + cmd.targetCh <- insState + case *ovsAddCmd: + glog.Infof("Overseer: adding %s", cmd.instance) + var targetCh chan<- interface{} + target := ovs.instances[cmd.instance] + canAdd := true + cfg := cmd.cfg + if target != nil { + targetCh = target.cmdCh + } else if ovs.roomAvailable(cfg) { + ovs.vcpusAllocated += cfg.Cpus + ovs.diskSpaceAllocated += cfg.Disk + ovs.memoryAllocated += cfg.Mem + targetCh = startInstance(cmd.instance, cfg, ovs.childWg, ovs.childDoneCh, + ovs.ac, ovs.ovsCh) + ovs.instances[cmd.instance] = &ovsInstanceState{ + cmdCh: targetCh, + running: ovsPending, + diskUsageMB: -1, + CPUUsage: -1, + memoryUsageMB: -1, + maxDiskUsageMB: cfg.Disk, + maxVCPUs: cfg.Cpus, + maxMemoryMB: cfg.Mem, + sshIP: cfg.ConcIP, + sshPort: cfg.SSHPort, + } + } else { + canAdd = false + } + cmd.targetCh <- ovsAddResult{targetCh, canAdd} + case *ovsRemoveCmd: + glog.Infof("Overseer: removing %s", cmd.instance) + target := ovs.instances[cmd.instance] + if target == nil { + cmd.errCh <- fmt.Errorf("Instance does not exist") + break + } + + ovs.diskSpaceAllocated -= target.maxDiskUsageMB + if ovs.diskSpaceAllocated < 0 { + ovs.diskSpaceAllocated = 0 + } + + ovs.vcpusAllocated -= target.maxVCPUs + if ovs.vcpusAllocated < 0 { + ovs.vcpusAllocated = 0 + } + + ovs.memoryAllocated -= target.maxMemoryMB + if ovs.memoryAllocated < 0 { + ovs.memoryAllocated = 0 + } + + delete(ovs.instances, cmd.instance) + if !cmd.suicide { + ovs.sendInstanceDeletedEvent(cmd.instance) + } + cmd.errCh <- nil + case *ovsStatusCmd: + glog.Info("Overseer: Recieved Status Command") + if !ovs.ac.ssntpConn.isConnected() { + break + } + cns := getStats() + ovs.updateAvailableResources(cns) + ovs.sendStatusCommand(cns, ovs.computeStatus()) + case *ovsStatsStatusCmd: + glog.Info("Overseer: Recieved StatsStatus Command") + if !ovs.ac.ssntpConn.isConnected() { + break + } + cns := getStats() + ovs.updateAvailableResources(cns) + status := ovs.computeStatus() + ovs.sendStatusCommand(cns, status) + ovs.sendStats(cns, status) + case *ovsStateChange: + glog.Infof("Overseer: Recieved State Change %v", *cmd) + target := ovs.instances[cmd.instance] + if target != nil { + target.running = cmd.state + } + case *ovsStatsUpdateCmd: + if glog.V(1) { + glog.Infof("STATS Update for %s: Mem %d Disk %d Cpu %d", + cmd.instance, cmd.memoryUsageMB, + cmd.diskUsageMB, cmd.CPUUsage) + } + target := ovs.instances[cmd.instance] + if target != nil { + target.memoryUsageMB = cmd.memoryUsageMB + target.diskUsageMB = cmd.diskUsageMB + target.CPUUsage = cmd.CPUUsage + } + case *ovsTraceFrame: + cmd.frame.SetEndStamp() + ovs.traceFrames.PushBack(cmd.frame) + default: + panic("Unknown Overseer Command") + } +} + +func (ovs *overseer) runOverseer() { + + statsTimer := time.After(time.Second * statsPeriod) +DONE: + for { + select { + case cmd, ok := <-ovs.ovsCh: + if !ok { + break DONE + } + ovs.processCommand(cmd) + case <-statsTimer: + if !ovs.ac.ssntpConn.isConnected() { + statsTimer = time.After(time.Second * statsPeriod) + continue + } + + cns := getStats() + ovs.updateAvailableResources(cns) + status := ovs.computeStatus() + ovs.sendStatusCommand(cns, status) + ovs.sendStats(cns, status) + ovs.sendTraceReport() + statsTimer = time.After(time.Second * statsPeriod) + if glog.V(1) { + glog.Infof("Consumed: Disk %d Mem %d CPUs %d", + ovs.diskSpaceAllocated, ovs.memoryAllocated, ovs.vcpusAllocated) + } + } + } + + close(ovs.childDoneCh) + ovs.childWg.Wait() + glog.Info("All instance go routines have exitted") + ovs.parentWg.Done() + + glog.Info("Overseer exitting") +} + +func startOverseer(wg *sync.WaitGroup, ac *agentClient) chan<- interface{} { + + instances := make(map[string]*ovsInstanceState) + ovsCh := make(chan interface{}) + toMonitor := make([]chan<- interface{}, 0, 1024) + childDoneCh := make(chan struct{}) + childWg := new(sync.WaitGroup) + + vcpusAllocated := 0 + diskSpaceAllocated := 0 + memoryAllocated := 0 + + filepath.Walk(instancesDir, func(path string, info os.FileInfo, err error) error { + if path == instancesDir { + return nil + } + + if !info.IsDir() { + return nil + } + + glog.Infof("Reconnecting to existing instance %s", path) + instance := filepath.Base(path) + + // BUG(markus): We should garbage collect corrupt instances + + cfg, err := loadVMConfig(path) + if err != nil { + glog.Warning("Unable to load state of running instance %s: %v", instance, err) + return nil + } + + vcpusAllocated += cfg.Cpus + diskSpaceAllocated += cfg.Disk + memoryAllocated += cfg.Mem + + target := startInstance(instance, cfg, childWg, childDoneCh, ac, ovsCh) + instances[instance] = &ovsInstanceState{ + cmdCh: target, + running: ovsPending, + diskUsageMB: -1, + CPUUsage: -1, + memoryUsageMB: -1, + maxDiskUsageMB: cfg.Disk, + maxVCPUs: cfg.Cpus, + maxMemoryMB: cfg.Mem, + sshIP: cfg.ConcIP, + sshPort: cfg.SSHPort, + } + toMonitor = append(toMonitor, target) + + return filepath.SkipDir + }) + + ovs := &overseer{ + instances: instances, + ovsCh: ovsCh, + parentWg: wg, + childWg: childWg, + childDoneCh: childDoneCh, + ac: ac, + vcpusAllocated: vcpusAllocated, + diskSpaceAllocated: diskSpaceAllocated, + memoryAllocated: memoryAllocated, + traceFrames: list.New(), + } + ovs.parentWg.Add(1) + glog.Info("Starting Overseer") + glog.Infof("Allocated: Disk %d Mem %d CPUs %d", + diskSpaceAllocated, memoryAllocated, vcpusAllocated) + go ovs.runOverseer() + ovs = nil + instances = nil + + // I know this looks weird but there is method here. After we launch the overseer go routine + // we can no longer access instances from this go routine otherwise we will have a data race. + // For this reason we make a copy of the instance command channels that can be safely used + // in this go routine. The monitor commands cannot be sent from the overseer as it is not + // allowed to send information to the instance go routines. Doing so would incur the risk of + // deadlock. So we copy. 'A little copying is better than a little dependency', and so forth. + + for _, v := range toMonitor { + v <- &insMonitorCmd{} + } + + return ovsCh +} diff --git a/ciao-launcher/payload.go b/ciao-launcher/payload.go new file mode 100644 index 000000000..7b12dbe5a --- /dev/null +++ b/ciao-launcher/payload.go @@ -0,0 +1,497 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "bufio" + "bytes" + "encoding/gob" + "fmt" + "net" + "os" + "path" + "regexp" + "strings" + + "github.com/golang/glog" + + "github.com/01org/ciao/networking/libsnnet" + "github.com/01org/ciao/payloads" + + "gopkg.in/yaml.v2" +) + +type payloadError struct { + err error + code string +} + +type vmConfig struct { + Cpus int + Mem int + Disk int + Instance string + Image string + Legacy bool + Container bool + NetworkNode bool + VnicMAC string + VnicIP string + ConcIP string + SubnetIP string + TennantUUID string + ConcUUID string + VnicUUID string + SSHPort int +} + +type extractedDoc struct { + doc []string + realStart int + realEnd int +} + +var indentedRegexp *regexp.Regexp +var startRegexp *regexp.Regexp +var uuidRegexp *regexp.Regexp + +func init() { + indentedRegexp = regexp.MustCompile("\\s+.*") + startRegexp = regexp.MustCompile("^start\\s*:\\s*$") + uuidRegexp = regexp.MustCompile("^[0-9a-fA-F]+(-[0-9a-fA-F]+)*$") +} + +func printCloudinit(data *payloads.Start) { + start := &data.Start + glog.Info("cloud-init file content") + glog.Info("-----------------------") + glog.Infof("Instance UUID: %v", start.InstanceUUID) + glog.Infof("Disk image UUID: %v", start.ImageUUID) + glog.Infof("FW Type: %v", start.FWType) + glog.Infof("VM Type: %v", start.VMType) + glog.Infof("TennantUUID: %v", start.TenantUUID) + net := &start.Networking + glog.Infof("VnicMAC: %v", net.VnicMAC) + glog.Infof("VnicIP: %v", net.PrivateIP) + glog.Infof("ConcIP: %v", net.ConcentratorIP) + glog.Infof("SubnetIP: %v", net.Subnet) + glog.Infof("ConcUUID: %v", net.ConcentratorUUID) + glog.Infof("VnicUUID: %v", net.VnicUUID) + + glog.Info("Requested resources:") + for i := range start.RequestedResources { + glog.Infof("%8s: %v", start.RequestedResources[i].Type, + start.RequestedResources[i].Value) + } +} + +func computeSSHPort(networkNode bool, vnicIP string) int { + if networkNode || vnicIP == "" { + return 0 + } + + ip := net.ParseIP(vnicIP) + if ip == nil { + return 0 + } + + ip = ip.To4() + if ip == nil { + return 0 + } + + port, err := libsnnet.DebugSSHPortForIP(ip) + if err != nil { + return 0 + } + + return port +} + +func parseStartPayload(data []byte) (*vmConfig, *payloadError) { + var clouddata payloads.Start + + err := yaml.Unmarshal(data, &clouddata) + if err != nil { + return nil, &payloadError{err, payloads.InvalidPayload} + } + printCloudinit(&clouddata) + + start := &clouddata.Start + + instance := strings.TrimSpace(start.InstanceUUID) + if !uuidRegexp.MatchString(instance) { + err = fmt.Errorf("Invalid instance id received: %s", instance) + return nil, &payloadError{err, payloads.InvalidData} + } + + fwType := start.FWType + if fwType != "" && fwType != payloads.Legacy && fwType != payloads.EFI { + err = fmt.Errorf("Invalid fwtype received: %s", fwType) + return nil, &payloadError{err, payloads.InvalidData} + } + legacy := fwType == payloads.Legacy + + vmType := start.VMType + if vmType != "" && vmType != payloads.QEMU && vmType != payloads.Docker { + err = fmt.Errorf("Invalid vmtype received: %s", vmType) + return nil, &payloadError{err, payloads.InvalidData} + } + + var disk, cpus, mem int + var networkNode bool + var image string + + container := vmType == payloads.Docker + if container { + image = start.DockerImage + } else { + image = start.ImageUUID + } + + for i := range start.RequestedResources { + switch start.RequestedResources[i].Type { + case payloads.VCPUs: + cpus = start.RequestedResources[i].Value + case payloads.MemMB: + mem = start.RequestedResources[i].Value + case payloads.DiskMB: + disk = start.RequestedResources[i].Value + case payloads.NetworkNode: + networkNode = start.RequestedResources[i].Value != 0 + } + } + + net := &start.Networking + vnicIP := strings.TrimSpace(net.PrivateIP) + sshPort := computeSSHPort(networkNode, vnicIP) + + return &vmConfig{Cpus: cpus, + Mem: mem, + Disk: disk, + Instance: instance, + Image: image, + Legacy: legacy, + Container: container, + NetworkNode: networkNode, + VnicMAC: strings.TrimSpace(net.VnicMAC), + VnicIP: vnicIP, + ConcIP: strings.TrimSpace(net.ConcentratorIP), + SubnetIP: strings.TrimSpace(net.Subnet), + TennantUUID: strings.TrimSpace(start.TenantUUID), + ConcUUID: strings.TrimSpace(net.ConcentratorUUID), + VnicUUID: strings.TrimSpace(net.VnicUUID), + SSHPort: sshPort, + }, nil +} + +func generateStartError(instance string, startErr *startError) (out []byte, err error) { + sf := &payloads.ErrorStartFailure{ + InstanceUUID: instance, + Reason: startErr.code, + } + return yaml.Marshal(sf) +} + +func generateStopError(instance string, stopErr *stopError) (out []byte, err error) { + sf := &payloads.ErrorStopFailure{ + InstanceUUID: instance, + Reason: stopErr.code, + } + return yaml.Marshal(sf) +} + +func generateRestartError(instance string, restartErr *restartError) (out []byte, err error) { + rf := &payloads.ErrorRestartFailure{ + InstanceUUID: instance, + Reason: restartErr.code, + } + return yaml.Marshal(rf) +} + +func generateDeleteError(instance string, deleteErr *deleteError) (out []byte, err error) { + df := &payloads.ErrorDeleteFailure{ + InstanceUUID: instance, + Reason: deleteErr.code, + } + return yaml.Marshal(df) +} + +func generateNetEventPayload(ssntpEvent *libsnnet.SsntpEventInfo, agentUUID string) ([]byte, error) { + var event interface{} + var eventData *payloads.TenantAddedEvent + + switch ssntpEvent.Event { + case libsnnet.SsntpTunAdd: + add := &payloads.EventTenantAdded{} + event = add + eventData = &add.TenantAdded + case libsnnet.SsntpTunDel: + del := &payloads.EventTenantRemoved{} + event = del + eventData = &del.TenantRemoved + default: + return nil, fmt.Errorf("Unsupported ssntpEventInfo type: %d", + ssntpEvent.Event) + } + + eventData.AgentUUID = agentUUID + eventData.AgentIP = ssntpEvent.CnIP + eventData.TenantUUID = ssntpEvent.TenantID + eventData.TenantSubnet = ssntpEvent.SubnetID + eventData.ConcentratorUUID = ssntpEvent.ConcID + eventData.ConcentratorIP = ssntpEvent.CnciIP + eventData.SubnetKey = ssntpEvent.SubnetKey + + return yaml.Marshal(event) +} + +func parseRestartPayload(data []byte) (string, *payloadError) { + var clouddata payloads.Restart + + err := yaml.Unmarshal(data, &clouddata) + if err != nil { + return "", &payloadError{err, payloads.RestartInvalidPayload} + } + + instance := strings.TrimSpace(clouddata.Restart.InstanceUUID) + if !uuidRegexp.MatchString(instance) { + err = fmt.Errorf("Invalid instance id received: %s", instance) + return "", &payloadError{err, payloads.RestartInvalidData} + } + return instance, nil +} + +func parseDeletePayload(data []byte) (string, *payloadError) { + var clouddata payloads.Delete + + err := yaml.Unmarshal(data, &clouddata) + if err != nil { + return "", &payloadError{err, payloads.DeleteInvalidPayload} + } + + instance := strings.TrimSpace(clouddata.Delete.InstanceUUID) + if !uuidRegexp.MatchString(instance) { + err = fmt.Errorf("Invalid instance id received: %s", instance) + return "", &payloadError{err, payloads.DeleteInvalidData} + } + return instance, nil +} + +func parseStopPayload(data []byte) (string, *payloadError) { + var clouddata payloads.Stop + + err := yaml.Unmarshal(data, &clouddata) + if err != nil { + glog.Errorf("YAML error: %v", err) + return "", &payloadError{err, payloads.StopInvalidPayload} + } + + instance := strings.TrimSpace(clouddata.Stop.InstanceUUID) + if !uuidRegexp.MatchString(instance) { + err = fmt.Errorf("Invalid instance id received: %s", instance) + return "", &payloadError{err, payloads.StopInvalidData} + } + return instance, nil +} + +func loadVMConfig(instanceDir string) (*vmConfig, error) { + cfgFilePath := path.Join(instanceDir, instanceState) + cfgFile, err := os.Open(cfgFilePath) + if err != nil { + glog.Errorf("Unable to open instance file %s", cfgFilePath) + return nil, err + } + + dec := gob.NewDecoder(cfgFile) + cfg := &vmConfig{} + err = dec.Decode(cfg) + _ = cfgFile.Close() + + if err != nil { + glog.Error("Unable to retrieve state info") + return nil, err + } + + return cfg, nil +} + +func linesToBytes(doc []string, buf *bytes.Buffer) { + for _, line := range doc { + _, _ = buf.WriteString(line) + _, _ = buf.WriteString("\n") + } +} + +func extractDocument(doc *extractedDoc, buf *bytes.Buffer) { + linesToBytes(doc.doc[doc.realStart:doc.realEnd], buf) +} + +func extractStartYaml(lines []string, start int, s, ci *bytes.Buffer) { + cnStart := 0 + + docStartFound := false + for ; cnStart < start; cnStart++ { + line := lines[cnStart] + if strings.HasPrefix(line, "---") { + docStartFound = true + cnStart++ + break + } + } + + if !docStartFound { + cnStart = 0 + } + + linesToBytes(lines[cnStart:start], ci) + + i := start + if i < len(lines) { + _, _ = s.WriteString(lines[i]) + _, _ = s.WriteString("\n") + i++ + } + for ; i < len(lines) && (indentedRegexp.MatchString(lines[i]) || lines[i] == ""); i++ { + _, _ = s.WriteString(lines[i]) + _, _ = s.WriteString("\n") + } + + if i < len(lines) && !strings.HasPrefix(lines[i], "...") { + linesToBytes(lines[i:], ci) + } +} + +func findDocument(lines []string) (doc *extractedDoc, endOfNextDoc int) { + var realStart int + var realEnd int + docStartFound := false + docEndFound := false + + start := len(lines) - 1 + line := lines[start] + if strings.HasPrefix(line, "...") { + docEndFound = true + realEnd = start + start-- + } + + for ; start >= 0; start-- { + line := lines[start] + if strings.HasPrefix(line, "---") { + docStartFound = true + break + } + if strings.HasPrefix(line, "...") { + start++ + break + } + } + + if docStartFound { + realStart = start + 1 + for start = start - 1; start >= 0; start-- { + line := lines[start] + if !strings.HasPrefix(line, "%") { + break + } + } + start++ + } else { + if start < 0 { + start = 0 + } + realStart = start + } + + if !docEndFound { + realEnd = len(lines) + } + + realStart -= start + realEnd -= start + + return &extractedDoc{lines[start:len(lines)], realStart, realEnd}, start +} + +func splitYaml(data []byte) ([]byte, []byte, []byte) { + + var s bytes.Buffer + var ci bytes.Buffer + var md bytes.Buffer + + foundStart := -1 + lines := make([]string, 0, 256) + docs := make([]*extractedDoc, 0, 3) + + reader := bytes.NewReader(data) + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + if foundStart == -1 && startRegexp.MatchString(line) { + foundStart = len(lines) + } + lines = append(lines, line) + } + + endOfNextDoc := len(lines) + + for endOfNextDoc > 0 { + var doc *extractedDoc + doc, endOfNextDoc = findDocument(lines[:endOfNextDoc]) + docs = append([]*extractedDoc{doc}, docs...) + } + + if len(docs) == 1 { + if foundStart != -1 { + extractStartYaml(docs[0].doc, foundStart, &s, &ci) + } else { + extractDocument(docs[0], &ci) + } + } else if len(docs) == 2 { + if foundStart != -1 { + if foundStart < len(docs[0].doc) { + extractStartYaml(docs[0].doc, foundStart, &s, &ci) + extractDocument(docs[1], &md) + } else { + extractStartYaml(docs[1].doc, foundStart-len(docs[0].doc), &s, &ci) + extractDocument(docs[0], &md) + } + } else { + extractDocument(docs[0], &ci) + extractDocument(docs[1], &md) + } + } else if foundStart != -1 && foundStart < len(docs[0].doc)+len(docs[1].doc)+len(docs[2].doc) { + notStart := make([]*extractedDoc, 0, 2) + sum := 0 + for i := 0; i < 3; i++ { + newSum := sum + len(docs[i].doc) + if foundStart >= sum && foundStart < newSum { + extractDocument(docs[i], &s) + } else { + notStart = append(notStart, docs[i]) + } + sum = newSum + } + extractDocument(notStart[0], &ci) + extractDocument(notStart[1], &md) + } else { + glog.Warning("Unable to split payload into documents") + } + + return s.Bytes(), ci.Bytes(), md.Bytes() +} diff --git a/ciao-launcher/port_grabber.go b/ciao-launcher/port_grabber.go new file mode 100644 index 000000000..337200407 --- /dev/null +++ b/ciao-launcher/port_grabber.go @@ -0,0 +1,89 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "sync" + + "github.com/golang/glog" +) + +const ( + portGrabberStart = 5900 + portGrabberMax = 6900 +) + +/* +Just because a port is in the free map doesn't mean it's free. It could be +used by some other qemu process or otherwise that is not managed by launcher. +In addition, we have the tricky case of restarting launcher after a crash +where there are already running instances. We could ask those instances for +their spice port and them remove them from the map (and perhaps we will in +the future), but there will still be a race condition, if a new start command +comes in while we query the domain socket of running instances. + +To summarize, we need to try to detect the fact that qemu has failed due to +an in-use port and restart it with a new port. We could try to detect whether +a port was in use or not, a la libvirt, but there'd still be a race condition. +*/ + +type portGrabber struct { + sync.Mutex + free map[int]struct{} +} + +var uiPortGrabber = portGrabber{} + +func init() { + uiPortGrabber.free = make(map[int]struct{}) + for i := portGrabberStart; i < portGrabberMax; i++ { + uiPortGrabber.free[i] = struct{}{} + } +} + +func (pg *portGrabber) grabPort() int { + port := 0 + + pg.Lock() + glog.Infof("Ports available %d", len(pg.free)) + for key := range pg.free { + port = key + break + } + + if port != 0 { + delete(pg.free, port) + glog.Infof("Grabbing port: %d", port) + } + pg.Unlock() + + return port +} + +func (pg *portGrabber) releasePort(port int) { + glog.Infof("Releasing port: %d", port) + + if port < portGrabberStart || port >= portGrabberMax { + glog.Warningf("Unable to release invalid port number %d", port) + return + } + + pg.Lock() + pg.free[port] = struct{}{} + glog.Infof("Ports available %d", len(pg.free)) + pg.Unlock() +} diff --git a/ciao-launcher/process_stats.go b/ciao-launcher/process_stats.go new file mode 100644 index 000000000..4cf182a32 --- /dev/null +++ b/ciao-launcher/process_stats.go @@ -0,0 +1,102 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "bufio" + "fmt" + "os" + "path" + "strconv" + + "github.com/golang/glog" +) + +func computeProcessMemUsage(pid int) int { + smapsPath := path.Join("/proc", fmt.Sprintf("%d", pid), "smaps") + smaps, err := os.Open(smapsPath) + if err != nil { + if glog.V(1) { + glog.Warning("Unable to open %s: %v", smapsPath, err) + } + return -1 + } + var mem64 int64 + scanner := bufio.NewScanner(smaps) + for scanner.Scan() { + matches := pssRegexp.FindStringSubmatch(scanner.Text()) + if matches == nil || len(matches) < 2 { + continue + } + + sizeInKb, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + continue + } + mem64 += sizeInKb + } + mem := int(mem64 / 1024) + _ = smaps.Close() + + return mem +} + +func computeProcessCPUTime(pid int) int64 { + statPath := path.Join("/proc", fmt.Sprintf("%d", pid), "stat") + stat, err := os.Open(statPath) + if err != nil { + if glog.V(1) { + glog.Warning("Unable to open %s: %v", statPath, err) + } + return -1 + } + defer func() { _ = stat.Close() }() + + var userTime int64 = -1 + var sysTime int64 = -1 + scanner := bufio.NewScanner(stat) + scanner.Split(bufio.ScanWords) + i := 0 + for ; i < 13 && scanner.Scan(); i++ { + + } + + if scanner.Scan() { + userTime, _ = strconv.ParseInt(scanner.Text(), 10, 64) + if scanner.Scan() { + sysTime, _ = strconv.ParseInt(scanner.Text(), 10, 64) + } + } + + if userTime == -1 || sysTime == -1 { + if glog.V(1) { + glog.Warningf("Invalid user or systime %d %d", + userTime, sysTime) + } + return -1 + } + + cpuTime := (1000 * 1000 * 1000 * (userTime + sysTime)) / + clockTicksPerSecond + + // if glog.V(1) { + // glog.Infof("PID %d: cpuTime %d userTime %d sysTime %d", + // q.pid, cpuTime, userTime, sysTime) + // } + + return cpuTime +} diff --git a/ciao-launcher/profile.go b/ciao-launcher/profile.go new file mode 100644 index 000000000..29d51e155 --- /dev/null +++ b/ciao-launcher/profile.go @@ -0,0 +1,48 @@ +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build profile + +package main + +import ( + "flag" + "os" + "runtime/pprof" + + "github.com/golang/glog" +) + +var cpuProfile string + +func init() { + flag.StringVar(&cpuProfile, "cpuprofile", "", "write profile information to file") + profileFN = func() func() { + if cpuProfile == "" { + return nil + } + + f, err := os.Create(cpuProfile) + if err != nil { + glog.Warning("Unable to create profile file %s: %v", + cpuProfile, err) + return nil + } + pprof.StartCPUProfile(f) + return func() { + pprof.StopCPUProfile() + f.Close() + } + } +} diff --git a/ciao-launcher/qemu.go b/ciao-launcher/qemu.go new file mode 100644 index 000000000..61d165399 --- /dev/null +++ b/ciao-launcher/qemu.go @@ -0,0 +1,762 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/01org/ciao/payloads" + "gopkg.in/yaml.v2" + + "github.com/golang/glog" +) + +const ( + qemuEfiFw = "/usr/share/qemu/OVMF.fd" + seedImage = "seed.iso" + ciaoImage = "ciao.iso" + imagesPath = "/var/lib/ciao/images" + vcTries = 10 +) + +var virtualSizeRegexp *regexp.Regexp +var pssRegexp *regexp.Regexp + +func init() { + virtualSizeRegexp = regexp.MustCompile(`virtual size:.*\(([0-9]+) bytes\)`) + pssRegexp = regexp.MustCompile(`^Pss:\s*([0-9]+)`) +} + +type qemu struct { + cfg *vmConfig + instanceDir string + vcPort int + pid int + prevCPUTime int64 + prevSampleTime time.Time + isoPath string + ciaoISOPath string +} + +func (q *qemu) init(cfg *vmConfig, instanceDir string) { + q.cfg = cfg + q.instanceDir = instanceDir + q.isoPath = path.Join(instanceDir, seedImage) + q.ciaoISOPath = path.Join(instanceDir, ciaoImage) +} + +func (q *qemu) imageInfo(imagePath string) (imageSizeMB int, err error) { + imageSizeMB = -1 + + params := make([]string, 0, 8) + params = append(params, "info") + params = append(params, imagePath) + + cmd := exec.Command("qemu-img", params...) + stdout, err := cmd.StdoutPipe() + if err != nil { + glog.Errorf("Unable to read output from qemu-img: %v", err) + return -1, err + } + + err = cmd.Start() + if err != nil { + _ = stdout.Close() + glog.Errorf("Unable start qemu-img: %v", err) + return -1, err + } + + scanner := bufio.NewScanner(stdout) + for scanner.Scan() && imageSizeMB == -1 { + line := scanner.Text() + matches := virtualSizeRegexp.FindStringSubmatch(line) + if matches == nil { + continue + } + + if len(matches) < 2 { + glog.Warningf("Unable to find image size from: %s", + line) + break + } + + sizeInBytes, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + glog.Warningf("Unable to parse image size from: %s", + matches[1]) + break + } + + size := sizeInBytes / (1000 * 1000) + if size > int64((^uint(0))>>1) { + glog.Warningf("Unexpectedly large disk size found: %d MB", + size) + break + } + + imageSizeMB = int(size) + if int64(imageSizeMB)*1000*1000 < sizeInBytes { + imageSizeMB++ + } + } + + err = cmd.Wait() + if err != nil { + glog.Warningf("qemu-img returned an error: %v", err) + if imageSizeMB != -1 { + glog.Warning("But we already parsed the image size, so we don't care") + err = nil + } + } + + return imageSizeMB, err +} + +func createCloudInitISO(instanceDir, isoPath string, cfg *vmConfig, userData, metaData []byte) error { + + configDrivePath := path.Join(instanceDir, "clr-cloud-init") + dataDirPath := path.Join(configDrivePath, "openstack", "latest") + metaDataPath := path.Join(dataDirPath, "meta_data.json") + userDataPath := path.Join(dataDirPath, "user_data") + + defer func() { + _ = os.RemoveAll(configDrivePath) + }() + + err := os.MkdirAll(dataDirPath, 0755) + if err != nil { + glog.Errorf("Unable to create config drive directory %s", dataDirPath) + return err + } + + if len(metaData) == 0 { + defaultMeta := fmt.Sprintf("{\n \"uuid\": %q,\n \"hostname\": %[1]q\n}\n", cfg.Instance) + metaData = []byte(defaultMeta) + } + + err = ioutil.WriteFile(metaDataPath, metaData, 0644) + if err != nil { + glog.Errorf("Unable to create %s", metaDataPath) + return err + } + + err = ioutil.WriteFile(userDataPath, userData, 0644) + if err != nil { + glog.Errorf("Unable to create %s", userDataPath) + return err + } + + cmd := exec.Command("xorriso", "-as", "mkisofs", "-R", "-V", "config-2", "-o", isoPath, + configDrivePath) + err = cmd.Run() + if err != nil { + glog.Errorf("Unable to create cloudinit iso image %v", err) + return err + } + + glog.Infof("ISO image %s created", isoPath) + + return nil +} + +func createCiaoISO(instanceDir, isoPath string) error { + ciaoDrivePath := path.Join(instanceDir, "ciao") + ciaoPath := path.Join(ciaoDrivePath, "ciao.yaml") + + defer func() { + _ = os.RemoveAll(ciaoDrivePath) + }() + + err := os.MkdirAll(ciaoDrivePath, 0755) + if err != nil { + glog.Errorf("Unable to create ciao drive directory %s", ciaoDrivePath) + return err + } + + config := payloads.CNCIInstanceConfig{SchedulerAddr: serverURL} + y, err := yaml.Marshal(&config) + if err != nil { + glog.Errorf("Unable to create yaml ciao file %s", err) + return err + } + + err = ioutil.WriteFile(ciaoPath, y, 0644) + if err != nil { + glog.Errorf("Unable to create %s", ciaoPath) + return err + } + + cmd := exec.Command("xorriso", "-as", "mkisofs", "-R", "-V", "ciao", "-o", isoPath, + ciaoPath) + err = cmd.Run() + if err != nil { + glog.Errorf("Unable to create ciao iso image %v", err) + return err + } + + glog.Infof("Ciao ISO image %s created", isoPath) + + return nil +} + +func (q *qemu) createRootfs() error { + vmImage := path.Join(q.instanceDir, "image.qcow2") + backingImage := path.Join(imagesPath, q.cfg.Image) + glog.Infof("Creating qcow image from %s backing %s", vmImage, backingImage) + + params := make([]string, 0, 32) + params = append(params, "create", "-f", "qcow2", "-o", "backing_file="+backingImage, + vmImage) + if q.cfg.Disk > 0 { + diskSize := fmt.Sprintf("%dM", q.cfg.Disk) + params = append(params, diskSize) + } + + cmd := exec.Command("qemu-img", params...) + return cmd.Run() +} + +func (q *qemu) checkBackingImage() error { + backingImage := path.Join(imagesPath, q.cfg.Image) + _, err := os.Stat(backingImage) + if err != nil { + return fmt.Errorf("Backing Image does not exist: %v", err) + } + + if q.cfg.Disk != 0 { + minSizeMB, err := getMinImageSize(q, backingImage) + if err != nil { + return fmt.Errorf("Unable to determine image size: %v", err) + } + + if minSizeMB != -1 && minSizeMB > q.cfg.Disk { + glog.Warningf("Requested disk size (%dM) is smaller than minimum image size (%dM). Defaulting to min size", q.cfg.Disk, minSizeMB) + q.cfg.Disk = minSizeMB + } + } + + return nil +} + +func (q *qemu) downloadBackingImage() error { + return fmt.Errorf("Not supported yet!") +} + +func (q *qemu) createImage(bridge string, userData, metaData []byte) error { + err := createCloudInitISO(q.instanceDir, q.isoPath, q.cfg, userData, metaData) + if err != nil { + glog.Errorf("Unable to create iso image %v", err) + return err + } + + if q.cfg.NetworkNode { + err = createCiaoISO(q.instanceDir, q.ciaoISOPath) + if err != nil { + return err + } + } + + return q.createRootfs() +} + +func (q *qemu) deleteImage() error { + return nil +} + +func computeTapParam(vnicName string, networkNode bool) (string, *os.File, error) { + if !networkNode { + return fmt.Sprintf("tap,ifname=%s,script=no,downscript=no", + vnicName), nil, nil + } + + ifIndexPath := path.Join("/sys/class/net", vnicName, "ifindex") + fip, err := os.Open(ifIndexPath) + if err != nil { + glog.Errorf("Failed to determine tap ifname: %s", err) + return "", nil, err + } + defer fip.Close() + + scan := bufio.NewScanner(fip) + if !scan.Scan() { + glog.Error("Unable to read tap index") + return "", nil, fmt.Errorf("Unable to read tap index") + } + + i, err := strconv.Atoi(scan.Text()) + if err != nil { + glog.Errorf("Failed to determine tap ifname: %s", err) + return "", nil, err + } + + tapDev := fmt.Sprintf("/dev/tap%d", i) + + f, err := os.OpenFile(tapDev, os.O_RDWR, 0666) + if err != nil { + glog.Errorf("Failed to open tap device %s: %s", tapDev, err) + return "", nil, err + } + + /* + 3, what do you mean 3. Well, it turns out that files passed to child + processes via cmd.ExtraFiles have different fds in the child and the + parent. In the child the fds are determined by the file's position + in the ExtraFiles array + 3. Since we're only specifying a single + file we end up with an child fd of 3. + */ + + return fmt.Sprintf("tap,fd=%d", 3), f, nil +} + +func launchQemu(params []string, f *os.File) (string, error) { + errStr := "" + cmd := exec.Command("qemu-system-x86_64", params...) + if f != nil { + glog.Infof("Adding extra file %v", f) + cmd.ExtraFiles = []*os.File{f} + } + + var stderr bytes.Buffer + cmd.Stderr = &stderr + glog.Infof("launching qemu with: %v", params) + + err := cmd.Run() + if err != nil { + glog.Errorf("Unable to launch qemu: %v", err) + errStr = stderr.String() + glog.Error(errStr) + } + return errStr, err +} + +func launchQemuWithNC(params []string, f *os.File, ipAddress string) (int, error) { + var err error + + tries := 0 + params = append(params, "-display", "none", "-vga", "none") + params = append(params, "-device", "isa-serial,chardev=gnc0", "-chardev", "") + port := 0 + for ; tries < vcTries; tries++ { + port = uiPortGrabber.grabPort() + if port == 0 { + break + } + ncString := "socket,port=%d,host=%s,server,id=gnc0,server,nowait" + params[len(params)-1] = fmt.Sprintf(ncString, port, ipAddress) + var errStr string + errStr, err = launchQemu(params, f) + if err == nil { + glog.Info("============================================") + glog.Infof("Connect to vm with netcat %s %d", ipAddress, port) + glog.Info("============================================") + break + } + + lowErr := strings.ToLower(errStr) + if !strings.Contains(lowErr, "socket") { + uiPortGrabber.releasePort(port) + break + } + } + + if port == 0 || (err != nil && tries == vcTries) { + glog.Warning("Failed to launch qemu due to chardev error. Relaunching without virtual console") + _, err = launchQemu(params[:len(params)-4], f) + } + + return port, err +} + +func launchQemuWithSpice(params []string, f *os.File, ipAddress string) (int, error) { + var err error + + tries := 0 + params = append(params, "-spice", "") + port := 0 + for ; tries < vcTries; tries++ { + port = uiPortGrabber.grabPort() + if port == 0 { + break + } + params[len(params)-1] = fmt.Sprintf("port=%d,addr=%s,disable-ticketing", port, ipAddress) + var errStr string + errStr, err = launchQemu(params, f) + if err == nil { + glog.Info("============================================") + glog.Infof("Connect to vm with spicec -h %s -p %d", ipAddress, port) + glog.Info("============================================") + break + } + + // Not great I know, but it's the only way to figure out if spice is at fault + lowErr := strings.ToLower(errStr) + if !strings.Contains(lowErr, "spice") { + uiPortGrabber.releasePort(port) + break + } + } + + if port == 0 || (err != nil && tries == vcTries) { + glog.Warning("Failed to launch qemu due to spice error. Relaunching without virtual console") + params = append(params[:len(params)-2], "-display", "none", "-vga", "none") + _, err = launchQemu(params, f) + } + + return port, err +} + +func (q *qemu) startVM(vnicName, ipAddress string) error { + + var f *os.File + + glog.Info("Launching qemu") + + vmImage := path.Join(q.instanceDir, "image.qcow2") + qmpSocket := path.Join(q.instanceDir, "socket") + fileParam := fmt.Sprintf("file=%s,if=virtio,aio=threads,format=qcow2", vmImage) + //BUG(markus): Should specify media type here + isoParam := fmt.Sprintf("file=%s,if=virtio", q.isoPath) + qmpParam := fmt.Sprintf("unix:%s,server,nowait", qmpSocket) + + params := make([]string, 0, 32) + params = append(params, "-drive", fileParam) + params = append(params, "-drive", isoParam) + if q.cfg.NetworkNode { + ciaoParam := fmt.Sprintf("file=%s,if=virtio", q.ciaoISOPath) + params = append(params, "-drive", ciaoParam) + } + + if vnicName != "" { + net1Param := fmt.Sprintf("nic,model=virtio,macaddr=%s", + q.cfg.VnicMAC) + var err error + var net2Param string + net2Param, f, err = computeTapParam(vnicName, q.cfg.NetworkNode) + if err != nil { + return err + } + if f != nil { + defer f.Close() + } + params = append(params, "-net", net1Param) + params = append(params, "-net", net2Param) + } else { + params = append(params, "-net", "nic,model=virtio") + params = append(params, "-net", "user") + } + + params = append(params, "-enable-kvm") + params = append(params, "-cpu", "host") + params = append(params, "-daemonize") + params = append(params, "-qmp", qmpParam) + + if q.cfg.Mem > 0 { + memoryParam := fmt.Sprintf("%d", q.cfg.Mem) + params = append(params, "-m", memoryParam) + } + if q.cfg.Cpus > 0 { + cpusParam := fmt.Sprintf("cpus=%d", q.cfg.Cpus) + params = append(params, "-smp", cpusParam) + } + + if !q.cfg.Legacy { + params = append(params, "-bios", qemuEfiFw) + } + + var err error + + if !launchWithUI.Enabled() { + params = append(params, "-display", "none", "-vga", "none") + _, err = launchQemu(params, f) + } else if launchWithUI.String() == "spice" { + var port int + port, err = launchQemuWithSpice(params, f, ipAddress) + if err == nil { + q.vcPort = port + } + } else { + var port int + port, err = launchQemuWithNC(params, f, ipAddress) + if err == nil { + q.vcPort = port + } + } + + if err != nil { + return err + } + + glog.Info("Launched VM") + + return nil +} + +func (q *qemu) lostVM() { + if launchWithUI.Enabled() { + glog.Infof("Releasing VC Port %d", q.vcPort) + uiPortGrabber.releasePort(q.vcPort) + q.vcPort = 0 + } + q.pid = 0 + q.prevCPUTime = -1 +} + +func qmpConnect(qmpChannel chan string, instance, instanceDir string, closedCh chan struct{}, + connectedCh chan struct{}, wg *sync.WaitGroup, boot bool) { + var conn net.Conn + + defer func() { + if conn != nil { + conn.Close() + } + if closedCh != nil { + close(closedCh) + } + glog.Infof("Monitor function for %s exitting", instance) + wg.Done() + }() + + qmpSocket := path.Join(instanceDir, "socket") + conn, err := net.DialTimeout("unix", qmpSocket, time.Second*30) + if err != nil { + glog.Errorf("Unable to open qmp socket for instance %s: %v", instance, err) + return + } + + scanner := bufio.NewScanner(conn) + _, err = fmt.Fprintln(conn, "{ \"execute\": \"qmp_capabilities\" }") + if err != nil { + glog.Errorf("Unable to send qmp_capabilities to instance %s: %v", instance, err) + return + } + + /* TODO check return value and implement timeout */ + + if !scanner.Scan() { + glog.Errorf("qmp_capabilities failed on instance %s", instance) + return + } + + close(connectedCh) + + eventCh := make(chan string) + go func() { + for scanner.Scan() { + text := scanner.Text() + if glog.V(1) { + glog.Info(text) + } + eventCh <- scanner.Text() + } + glog.Infof("Quitting %s read Loop", instance) + close(eventCh) + }() + + waitForShutdown := false + quitting := false + +DONE: + for { + select { + case cmd, ok := <-qmpChannel: + if !ok { + qmpChannel = nil + if !waitForShutdown { + break DONE + } else { + quitting = true + } + } + if cmd == virtualizerStopCmd { + glog.Info("Sending STOP") + _, err = fmt.Fprintln(conn, "{ \"execute\": \"quit\" }") + if err != nil { + glog.Errorf("Unable to send power down command to %s: %v\n", instance, err) + } else { + waitForShutdown = true + } + } + case event, ok := <-eventCh: + if !ok { + close(closedCh) + closedCh = nil + eventCh = nil + waitForShutdown = false + if quitting { + glog.Info("Lost connection to qemu domain socket") + break DONE + } else { + glog.Warning("Lost connection to qemu domain socket") + } + continue + } + if waitForShutdown == true && strings.Contains(event, "return") { + waitForShutdown = false + if quitting { + break DONE + } + } + } + } + + conn.Close() + conn = nil + + /* Readloop could be blocking on a send */ + + if eventCh != nil { + for _ = range eventCh { + } + } + + glog.Infof("Quitting Monitor Loop for %s\n", instance) +} + +/* closedCh is closed by the monitor go routine when it loses connection to the domain socket, basically, + indicating that the VM instance has shut down. The instance go routine is expected to close the + qmpChannel to force the monitor go routine to exit. + + connectedCh is closed when we successfully connect to the domain socket, inidcating that the + VM instance is running. +*/ + +func (q *qemu) monitorVM(closedCh chan struct{}, connectedCh chan struct{}, + wg *sync.WaitGroup, boot bool) chan string { + qmpChannel := make(chan string) + wg.Add(1) + go qmpConnect(qmpChannel, q.cfg.Instance, q.instanceDir, closedCh, connectedCh, wg, boot) + return qmpChannel +} + +func computeInstanceDiskspace(instanceDir string) int { + vmImage := path.Join(instanceDir, "image.qcow2") + fi, err := os.Stat(vmImage) + if err != nil { + return -1 + } + return int(fi.Size() / 1000000) +} + +func (q *qemu) stats() (disk, memory, cpu int) { + disk = computeInstanceDiskspace(q.instanceDir) + memory = -1 + cpu = -1 + + if q.pid == 0 { + return + } + + memory = computeProcessMemUsage(q.pid) + if q.cfg == nil { + return + } + + cpuTime := computeProcessCPUTime(q.pid) + now := time.Now() + if q.prevCPUTime != -1 { + cpu = int((100 * (cpuTime - q.prevCPUTime) / + now.Sub(q.prevSampleTime).Nanoseconds())) + if q.cfg.Cpus > 1 { + cpu /= q.cfg.Cpus + } + // if glog.V(1) { + // glog.Infof("cpu %d%%\n", cpu) + // } + } + q.prevCPUTime = cpuTime + q.prevSampleTime = now + + return +} + +func (q *qemu) connected() { + qmpSocket := path.Join(q.instanceDir, "socket") + var buf bytes.Buffer + cmd := exec.Command("fuser", qmpSocket) + cmd.Stdout = &buf + err := cmd.Run() + if err != nil { + glog.Errorf("Failed to run fuser: %v", err) + return + } + + scanner := bufio.NewScanner(&buf) + for scanner.Scan() { + pidString := strings.TrimSpace(scanner.Text()) + pid, err := strconv.Atoi(pidString) + if err != nil { + continue + } + + if pid != 0 && pid != os.Getpid() { + glog.Infof("PID of qemu for instance %s is %d", q.instanceDir, pid) + q.pid = pid + break + } + } + + if q.pid == 0 { + glog.Errorf("Unable to determine pid for %s", q.instanceDir) + } + q.prevCPUTime = -1 +} + +func qemuKillInstance(instanceDir string) { + var conn net.Conn + + qmpSocket := path.Join(instanceDir, "socket") + conn, err := net.DialTimeout("unix", qmpSocket, time.Second*30) + if err != nil { + return + } + + defer conn.Close() + + _, err = fmt.Fprintln(conn, "{ \"execute\": \"qmp_capabilities\" }") + if err != nil { + glog.Errorf("Unable to send qmp_capabilities to instance %s: %v", instanceDir, err) + return + } + + glog.Infof("Powering Down %s", instanceDir) + + _, err = fmt.Fprintln(conn, "{ \"execute\": \"quit\" }") + if err != nil { + glog.Errorf("Unable to send power down command to %s: %v\n", instanceDir, err) + } + + // Keep reading until the socket fails. If we close the socket straight away, qemu does not + // honour our quit command. + + scanner := bufio.NewScanner(conn) + for scanner.Scan() { + } + + return +} diff --git a/ciao-launcher/restart.go b/ciao-launcher/restart.go new file mode 100644 index 000000000..7d88ecc5a --- /dev/null +++ b/ciao-launcher/restart.go @@ -0,0 +1,72 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "github.com/01org/ciao/networking/libsnnet" + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + + "github.com/golang/glog" +) + +type restartError struct { + err error + code payloads.RestartFailureReason +} + +func (re *restartError) send(client *ssntpConn, instance string) { + if !client.isConnected() { + return + } + + payload, err := generateRestartError(instance, re) + if err != nil { + glog.Errorf("Unable to generate payload for restart_failure: %v", err) + return + } + + _, err = client.SendError(ssntp.RestartFailure, payload) + if err != nil { + glog.Errorf("Unable to send restart_failure: %v", err) + } +} + +func processRestart(instanceDir string, vm virtualizer, client *ssntpConn, cfg *vmConfig) *restartError { + var vnicName string + var vnicCfg *libsnnet.VnicConfig + var err error + + if networking.Enabled() { + vnicCfg, err = createVnicCfg(cfg) + if err != nil { + glog.Errorf("Could not create VnicCFG: %s", err) + return &restartError{err, payloads.RestartInstanceCorrupt} + } + vnicName, _, err = createVnic(client, vnicCfg) + if err != nil { + return &restartError{err, payloads.RestartNetworkFailure} + } + } + + err = vm.startVM(vnicName, getNodeIPAddress()) + if err != nil { + return &restartError{err, payloads.RestartLaunchFailure} + } + + return nil +} diff --git a/ciao-launcher/simulation.go b/ciao-launcher/simulation.go new file mode 100644 index 000000000..e426b99b7 --- /dev/null +++ b/ciao-launcher/simulation.go @@ -0,0 +1,139 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "math/rand" + "sync" + "time" + + "github.com/golang/glog" +) + +type simulation struct { + uuid string + instanceDir string + + closedCh chan struct{} + connectedCh chan struct{} + killCh chan struct{} + monitorCh chan string + wg *sync.WaitGroup + + cpus int + mem int + disk int +} + +var simulationMap map[string]simulation + +func init() { + simulationMap = make(map[string]simulation) +} + +func (s *simulation) init(cfg *vmConfig, instanceDir string) { + s.cpus = cfg.Cpus + s.mem = cfg.Mem + s.disk = cfg.Disk + s.instanceDir = instanceDir +} + +func (s *simulation) checkBackingImage() error { + return nil +} + +func (s *simulation) downloadBackingImage() error { + return nil +} + +func (s *simulation) createImage(bridge string, userData, metaData []byte) error { + return nil +} + +func (s *simulation) deleteImage() error { + return nil +} + +func fakeVM(s *simulation) { + glog.Infof("fakeVM started") + source := rand.NewSource(time.Now().UnixNano()) + r := rand.New(source) + + delay := r.Int63n(1000) + delay++ + glog.Infof("Will start in %d milliseconds", delay) + + ticker := time.NewTicker(time.Duration(delay) * time.Millisecond) +VM: + for { + select { + case cmd, ok := <-s.monitorCh: + if !ok { + s.monitorCh = nil + break VM + } + if cmd == virtualizerStopCmd { + break VM + } + case <-s.killCh: + break VM + case <-ticker.C: + ticker.Stop() + close(s.connectedCh) + } + } + + if s.wg != nil { + s.wg.Done() + } + +} + +func (s *simulation) startVM(vnicName, ipAddress string) error { + glog.Infof("startVM\n") + + s.killCh = make(chan struct{}) + + simulationMap[s.instanceDir] = *s + + return nil +} + +func (s *simulation) monitorVM(closedCh chan struct{}, connectedCh chan struct{}, wg *sync.WaitGroup, boot bool) chan string { + glog.Infof("monitorVM\n") + s.closedCh = closedCh + s.connectedCh = connectedCh + s.wg = wg + + s.monitorCh = make(chan string) + + go fakeVM(s) + + return s.monitorCh +} + +func (s *simulation) stats() (disk, memory, cpu int) { + return s.disk / 10, s.mem / 10, s.cpus / 10 +} + +func (s *simulation) connected() { + glog.Infof("connected\n") +} + +func (s *simulation) lostVM() { + glog.Infof("simulation: lostVM\n") +} diff --git a/ciao-launcher/start.go b/ciao-launcher/start.go new file mode 100644 index 000000000..f56a44193 --- /dev/null +++ b/ciao-launcher/start.go @@ -0,0 +1,177 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "encoding/gob" + "fmt" + "os" + "path" + + "github.com/01org/ciao/networking/libsnnet" + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + + "github.com/golang/glog" +) + +type startError struct { + err error + code payloads.StartFailureReason +} + +func (se *startError) send(client *ssntpConn, instance string) { + if !client.isConnected() { + return + } + + payload, err := generateStartError(instance, se) + if err != nil { + glog.Errorf("Unable to generate payload for start_failure: %v", err) + return + } + + _, err = client.SendError(ssntp.StartFailure, payload) + if err != nil { + glog.Errorf("Unable to send start_failure: %v", err) + } +} + +func ensureBackingImage(vm virtualizer) error { + + err := vm.checkBackingImage() + if err == errImageNotFound { + glog.Infof("Backing image not found. Trying to download") + err = vm.downloadBackingImage() + if err != nil { + //BUG(markus): Need to change overseer state here to Downloading + glog.Errorf("Unable to download backing image: %v", err) + return err + } + } else if err != nil { + glog.Errorf("Backing image check failed") + return err + } + + return nil +} + +func createInstance(vm virtualizer, instanceDir string, cfg *vmConfig, bridge string, userData, metaData []byte) (err error) { + err = os.MkdirAll(instanceDir, 0755) + if err != nil { + glog.Errorf("Cannot create instance directory for VM: %v", err) + return + } + + var cfgFile *os.File + defer func() { + if r := recover(); r != nil { + err = r.(error) + _ = os.RemoveAll(instanceDir) + if cfgFile != nil { + _ = cfgFile.Close() + } + } + }() + + err = vm.createImage(bridge, userData, metaData) + if err != nil { + glog.Errorf("Unable to create image %v", err) + panic(err) + } + + cfgFilePath := path.Join(instanceDir, instanceState) + cfgFile, err = os.OpenFile(cfgFilePath, os.O_CREATE|os.O_RDWR, 0600) + if err != nil { + glog.Errorf("Unable to create state file %v", err) + panic(err) + } + + enc := gob.NewEncoder(cfgFile) + err = enc.Encode(cfg) + if err != nil { + glog.Errorf("Failed to store state information %v", err) + panic(err) + } + + err = cfgFile.Close() + cfgFile = nil + if err != nil { + glog.Errorf("Failed to store state information %v", err) + panic(err) + } + + return +} + +func processStart(cmd *insStartCmd, instanceDir string, vm virtualizer, client *ssntpConn) *startError { + var err error + var vnicName string + var bridge string + var vnicCfg *libsnnet.VnicConfig + + cfg := cmd.cfg + + /* + Need to check to see if the instance exists first. Otherwise + if it does exist but we fail for another reason first, the instance would be + deleted. + */ + + _, err = os.Stat(instanceDir) + if err == nil { + err = fmt.Errorf("Instance %s has already been created", cfg.Instance) + return &startError{err, payloads.InstanceExists} + } + + if cfg.Image == "" { + err = fmt.Errorf("No backing image specified") + return &startError{err, payloads.InvalidData} + } + + if networking.Enabled() { + vnicCfg, err = createVnicCfg(cfg) + if err != nil { + glog.Errorf("Could not create VnicCFG: %s", err) + return &startError{err, payloads.InvalidData} + } + } + + err = ensureBackingImage(vm) + if err != nil { + return &startError{err, payloads.ImageFailure} + } + + if vnicCfg != nil { + vnicName, bridge, err = createVnic(client, vnicCfg) + if err != nil { + return &startError{err, payloads.NetworkFailure} + } + } + + err = createInstance(vm, instanceDir, cfg, bridge, cmd.userData, cmd.metaData) + if err != nil { + return &startError{err, payloads.ImageFailure} + } + + err = vm.startVM(vnicName, getNodeIPAddress()) + if err != nil { + return &startError{err, payloads.LaunchFailure} + } + + return nil +} diff --git a/ciao-launcher/start_test.go b/ciao-launcher/start_test.go new file mode 100644 index 000000000..604591b06 --- /dev/null +++ b/ciao-launcher/start_test.go @@ -0,0 +1,131 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import "testing" + +const ( + commentString = "# Here's a comment\n" + cloudInitString = `#cloud-config +package_upgrade: true +package_upgrade: false +runcmd: + - [ ls, -lh, / ] +` + cloudInitString2 = `groups: + - cloud-init: [ciao, openstack] +` + startString = `start: + requested_resources: + - type: vcpus + value: 2 + - type: mem_mb + value: 256 + - type: disk_mb + value: 80000 + instance_uuid: 67d86208-b46c-4465-9018-fe14087d415f + image_uuid: b286cd45-7d0c-4525-a140-4db6c95e41fa +` + directivesString = `%TAG ! tag:example.com,2000:app/ +%YAML 1.2 +--- +` + endString = `... +` + metaData = `{ + "hostname": "test.novalocal", + "keys": { + "data": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDBqUfVvCSez0/Wfpd8dLLgZXV9GtXQ7hnMN+Z0OWQUyebVEHey1CXuin0uY1cAJMhUq8j98SiW+cU0sU4J3x5l2+xi1bodDm1BtFWVeLIOQINpfV1n8fKjHB+ynPpe1F6tMDvrFGUlJs44t30BrujMXBe8Rq44cCk6wqyjATA3rQ== Generated by Nova\n" + }, + "uuid": "83679162-1378-4288-a2d4-70e13ec132aa" +} +` +) + +const ( + testStartOnlyString = startString + testStartOnlyLeadingNLString = "\n" + startString + testStartOnlySeparatorString = "---\n" + startString + testStartOnlySeparatorCommentString = "---\n" + commentString + startString + testStartOnlyBadSeparatorString = "---\nwibble:\n" + startString + testStartOnlyDirectivesString = directivesString + startString + testStartFirstString = directivesString + startString + "...\n" + cloudInitString + testMetaFirstString = cloudInitString + testStartOnlyDirectivesString + testCloudFirstNoSepString = cloudInitString + startString + testMiddleString = cloudInitString + testStartOnlyString + cloudInitString2 + testBlankString = "" + testNoStartString = endString + endString + endString + testThreeDocsString = cloudInitString + "...\n" + startString + "...\n" + metaData + testThreeDocsEndString = "---\n" + cloudInitString + "...\n---\n" + startString + "...\n---\n" + metaData + "...\n" + testThreeDocsSuperFirstString = startString + "...\n" + cloudInitString + "...\n" + metaData + testThreeDocsSuperLastString = cloudInitString + "...\n" + metaData + "---\n" + startString + testThreeDocsDirectivesString = directivesString + cloudInitString + "...\n" + testStartOnlyDirectivesString + "...\n" + metaData + testStartFourthString = "...\n" + "...\n" + "...\n" + startString + testTwoDocsNoSuperString = cloudInitString + "...\n" + metaData + testThreeAllStartString = directivesString + cloudInitString + "...\n" + testStartOnlyDirectivesString + "...\n---\n" + metaData + testThreeEmptyString = directivesString + "...\n---\n...\n---\n...\n" + testThreeEmptyNoStartString = "...\n...\n...\n" + testThreeEmptyNoEndString = "---\n---\n---\n" + testEmptyString = "" +) + +// Test YAML parsing code +// +// Launcher needs to parse the YAML it receives in the START command to +// extract the meta_json.js file and the start section, which must +// not be passed to the VM instance. This test case tests launcher's +// parsing code on a variety of well and badly formed payloads. +// +// Test should pass okay. +func TestSplitYaml(t *testing.T) { + var start, cn, md []byte + + tests := []struct{ data, s, c, m, n string }{ + {testStartOnlyString, testStartOnlyString, "", "", "testStartOnlyString"}, + {testStartOnlyLeadingNLString, testStartOnlyString, "\n", "", "testStartOnlyLeadingNLString"}, + {testStartOnlySeparatorString, testStartOnlyString, "", "", "testStartOnlySeparatorString"}, + {testStartOnlySeparatorCommentString, testStartOnlyString, commentString, "", "testStartOnlySeparatorCommentString"}, + {testStartOnlyBadSeparatorString, testStartOnlyString, "wibble:\n", "", "testStartOnlyBadSeparatorString"}, + {testStartOnlyDirectivesString, testStartOnlyString, "", "", "testStartOnlyDirectivesString"}, + {testStartFirstString, testStartOnlyString, "", cloudInitString, "testStartFirstString"}, + {testMetaFirstString, testStartOnlyString, "", cloudInitString, "testCloudFirstString"}, + {testCloudFirstNoSepString, testStartOnlyString, cloudInitString, "", "testCloudFirstNoSepString"}, + {testMiddleString, testStartOnlyString, cloudInitString + cloudInitString2, "", "testMiddleString"}, + {testBlankString, "", "", "", "testBlankString"}, + {testNoStartString, "", "", "", "testNoStartString"}, + {testThreeDocsString, startString, cloudInitString, metaData, "testThreeDocs"}, + {testThreeDocsEndString, startString, cloudInitString, metaData, "testThreeDocsEnd"}, + {testThreeDocsSuperFirstString, startString, cloudInitString, metaData, "testThreeDocsSuperFirst"}, + {testThreeDocsSuperLastString, startString, cloudInitString, metaData, "testThreeDocsSuperLast"}, + {testThreeDocsDirectivesString, testStartOnlyString, cloudInitString, metaData, "testThreeDocsDirectives"}, + {testStartFourthString, "", "", "", "testStartFourth"}, + {testTwoDocsNoSuperString, "", cloudInitString, metaData, "testTwoDocsNoSuper"}, + {testThreeAllStartString, testStartOnlyString, cloudInitString, metaData, "testThreeAllStart"}, + {testThreeEmptyString, "", "", "", "testThreeEmpty"}, + {testThreeEmptyNoStartString, "", "", "", "testThreeEmptyNoStart"}, + {testThreeEmptyNoEndString, "", "", "", "testThreeEmptyNoEnd"}, + {testEmptyString, "", "", "", "testEmpty"}, + } + + for _, s := range tests { + start, cn, md = splitYaml([]byte(s.data)) + if string(start) != s.s || string(cn) != s.c || string(md) != s.m { + t.Fatalf("%s FAILED", s.n) + } + } + +} diff --git a/ciao-launcher/stop.go b/ciao-launcher/stop.go new file mode 100644 index 000000000..d608ade75 --- /dev/null +++ b/ciao-launcher/stop.go @@ -0,0 +1,45 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + "github.com/golang/glog" +) + +type stopError struct { + err error + code payloads.StopFailureReason +} + +func (se *stopError) send(client *ssntpConn, instance string) { + if !client.isConnected() { + return + } + + payload, err := generateStopError(instance, se) + if err != nil { + glog.Errorf("Unable to generate payload for stop_failure: %v", err) + return + } + + _, err = client.SendError(ssntp.StopFailure, payload) + if err != nil { + glog.Errorf("Unable to send stop_failure: %v", err) + } +} diff --git a/ciao-launcher/system.go b/ciao-launcher/system.go new file mode 100644 index 000000000..5cb848030 --- /dev/null +++ b/ciao-launcher/system.go @@ -0,0 +1,28 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +/* +#include +*/ +import "C" + +var clockTicksPerSecond int64 + +func init() { + clockTicksPerSecond = int64(C.sysconf(C._SC_CLK_TCK)) +} diff --git a/ciao-launcher/tests/ciao-launcher-server/CAcert-server-localhost.pem b/ciao-launcher/tests/ciao-launcher-server/CAcert-server-localhost.pem new file mode 100644 index 000000000..ceefaac11 --- /dev/null +++ b/ciao-launcher/tests/ciao-launcher-server/CAcert-server-localhost.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDJzCCAg+gAwIBAgIRAI4v0wKgpQge+9MTPyzhbM0wDQYJKoZIhvcNAQELBQAw +GDEWMBQGA1UEChMNSW50ZWwvU1NHL09UQzAeFw0xNTEyMDQxMDM5MTRaFw0xNjEy +MDMxMDM5MTRaMBgxFjAUBgNVBAoTDUludGVsL1NTRy9PVEMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9X4CZzaM134EEnZdKF80ubc3OD6XXToOnZ69L +KiEggYcOWb4MlHt2by2wcx3cHF1yDqEJCUfN+EmDLB/PMVPLtLNb3YPK9Uhk+8SC +jJKI+xDczCI/P5dCyjz54dFtq1Oek8kBcD9+QxNwuLu8AaNXr9Trynqtwkzm6f75 +4cmpCcl0qUlaBLsy29l7fBsebwe7/7I2v4U48n8GwYRIJF5Bm/2AloabInPWCpx4 +S/NAquIFUKvWLzo9qytFLB3FY5legu7lcIEQWRWJJIznNFG/dpiVyYvcTtvWRouS +5OoaIOr2tCxt+9ZsTO9Rl1hA6K23vSfjJAnJ76cYZIHT9h1pAgMBAAGjbDBqMA4G +A1UdDwEB/wQEAwICpDAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w +NgYDVR0RBC8wLYIJbG9jYWxob3N0gSBzdXBlcm5vdmEtYXJjaEBlY2xpc3RzLmlu +dGVsLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAOi+ztvvpN6ssMOEDoaxHfWFFlBhZ +W+UPpaQa5V7zW06wYGFXtE34K9YQhrEBp5LbGlVW+Vra1fTXOECtvrnwP8VYDpZF +yz60H1ytzbeGYin8kCQyFVjrhD1DMye9xVzRhABFW0AsC4IdZqE63wpyyZJNgi16 +xITSMe6lg0paj/yuczmUngEX+Yc7m9o9CvZMu++nPmu0LEvD6UxSdX6guV8tezq7 +Z2g1WrJCB7PQ/iD5kJBdRKMW/kylkVWzoFt+hyl5JpmL/0dCTckPP1c4024av395 +0jQxXB5GU4Fn/6jrpsCeFNu7zjD3yKllHtvUtmf9ceYgtoehejHKHC8bSA== +-----END CERTIFICATE----- diff --git a/ciao-launcher/tests/ciao-launcher-server/cert-client-localhost.pem b/ciao-launcher/tests/ciao-launcher-server/cert-client-localhost.pem new file mode 100644 index 000000000..8d3efe08e --- /dev/null +++ b/ciao-launcher/tests/ciao-launcher-server/cert-client-localhost.pem @@ -0,0 +1,46 @@ +-----BEGIN CERTIFICATE----- +MIIDIzCCAgugAwIBAgIQGIAHoubb4w4w0dZbj1kcdDANBgkqhkiG9w0BAQsFADAY +MRYwFAYDVQQKEw1JbnRlbC9TU0cvT1RDMB4XDTE1MTIwNDE1MDcxOFoXDTE2MTIw +MzE1MDcxOFowGDEWMBQGA1UEChMNSW50ZWwvU1NHL09UQzCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBALcJomwXlG53jbTweDl2uuBKGGZ5mxd8Wpju39CV +HCgCNgsi67Feds8/tGNU3cJRx/Ygj2enIqOGi+xluhlwkyEcmtaaBjltB2Y7E8M0 +xU3AjzMVWPuHe7Pyy95wzUzyK9CrAOqGtzU+WjFdJse+eZgTMNbNUDgb5TdeduLj +Y6gbtklsU9lvwdW6L4813p9EH49yGI6FFWq/YpOeJfKSC8O+coD23fU0PrWA30ya +DocPQYheVyxtmOCJyY6UeEaNT3Wicy9CFWyYcg8YLAx/cjap7zcfABEeGNyoXL0i +lgAzPtCpwhLiAYUE88NKDpXtzFfvoh2f5LAmRKZJix4Xz6MCAwEAAaNpMGcwDgYD +VR0PAQH/BAQDAgKkMA8GA1UdJQQIMAYGBFUdJQAwDAYDVR0TAQH/BAIwADA2BgNV +HREELzAtgglsb2NhbGhvc3SBIHN1cGVybm92YS1hcmNoQGVjbGlzdHMuaW50ZWwu +Y29tMA0GCSqGSIb3DQEBCwUAA4IBAQAWQ05Ooatle/zAGPWWMtj2y5fgb7FpenNi +8pC9rwKbORxGjyd36WF9CXiYXL5h+xEpGn1SkJM1omf8yoe922qnm6d7LNRamJjH +kV8oPMI6RAuvpilCcZESMgZvYcYdSo1nPhE27Aw67650PiF42ecmpvu6lUTyub4w +COXV0d38tbBwjUskvyMLF7fbed3OZ9kIo4IaV6e6bO7OmujlR1p76OJcv9d3pkXZ +Gc3rtDbhXAdowZ0pIU9CO8ei/rs8+1w980xlRHSHrpX8Nqx17FFgGbHKOevP6pDK +p+9kn6luLhP20g2TPi3wfrCxXvBBky17Ei5hH2bazbMon/Wpvo+x +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAtwmibBeUbneNtPB4OXa64EoYZnmbF3xamO7f0JUcKAI2CyLr +sV52zz+0Y1TdwlHH9iCPZ6cio4aL7GW6GXCTIRya1poGOW0HZjsTwzTFTcCPMxVY ++4d7s/LL3nDNTPIr0KsA6oa3NT5aMV0mx755mBMw1s1QOBvlN1524uNjqBu2SWxT +2W/B1bovjzXen0Qfj3IYjoUVar9ik54l8pILw75ygPbd9TQ+tYDfTJoOhw9BiF5X +LG2Y4InJjpR4Ro1PdaJzL0IVbJhyDxgsDH9yNqnvNx8AER4Y3KhcvSKWADM+0KnC +EuIBhQTzw0oOle3MV++iHZ/ksCZEpkmLHhfPowIDAQABAoIBACRqn00gN+DumlwI +h/Lu42lidI5W3XuDu0a4KAGI2SxAnF4E2Z3VbI/5XbtxlBloSf93JPD7eNHOICKt +bqRHGI668TeHN3vPbGxzjpAE/MhmaldAhQ6uOvdndBZSnRYVDJCcpTcEDIlkWW9+ +2AtfozOVhd8GxFIVgrpMahzkHtzAKHzEauRIz0BKvJwVzvw4hkn2Zsg3hKcouF1S +LIZYTsDiGA7AObsfnNOML5QgifB2mCUefvVaAuamao/0d2VkXaASB5T5UL0wckMN +aLhM2QEEmx3AR8M/qnyryrvIyJuuOlBmoZPNguNOyGSjdbXi1XcJIls12eCQYd5m +etOjKEkCgYEA4zESpnb/88/HFT1GgHCVMpdj+pBRtgPw7aYfUoWCrqVgedlCfvw1 +5nbgCVMRUvqdFxt+umMSNA97kOE/c2g3EGZj5kDkPj1a2zeQcMM0BPQ1bvXJnXbr +RCEZiKjnryBUXRyF5QWFjOYwvxlcLV7G5XC+0Q52bX0obh8mDfbMJpcCgYEAzj9D +/TM+b5Wlkon154l/WAo94+e78fin3egf+tv993P9ydUccC8szF+DlF+WFP+V3fBs +oz+6N9zeXuBb2DSNR0cCjsHH9vsSK8BjjTtnFZRVhE/z4CXInpO8Y763aMKPqTS9 +b7mQZFkO6lzZ8eqrTYKEuN015FBChtwa7JwsbNUCgYAu7z1eHGlmEf+b2KvO7vMz +n2jtI5Ft28STI+WGo8RKlimnIoYFO89azCYeZwXGMbXwRyLYGI1zKduqI457JgVy +IuTydWdxoDU74m+5i+ocN+MuCCLdn+qciYX/4Hz0heZH0hOrK4fagteBMxkC2niW +f8M75xP6LIgEr/Spn9mvAwKBgAsWRY5f6dRPxBXLiseGkazm6ArPU0AfsM5yAv4W +Q2HA/3yREzVIo+zcmZMpTEUWtDS5Du37qAQGwIFzKBzIPmv9ejq4WgqB1qk5b2CR +N+pmDe7N6Xs8LLuxj2gBQZTFGLns1t4hqIxsaSvaD/E8Pzcsc/wIEDSUx+69+EiN +4Kl9AoGAX8m4Dxr5uPuqXaF0ag1yfq1b4+5IzqMYWq4OPwEhC5qU1beizLbiZj3w +KcNPur1QBFqiAlvOjcSzlHgh9vjezGjZj54RMqoq8lAAKyovDC3BUe/h0CWKrRCK +1zLH+Lrcm34pM/V8I8Hb9Ha1arZQf8wxXgx3JZfWz4CTRowmqBQ= +-----END RSA PRIVATE KEY----- diff --git a/ciao-launcher/tests/ciao-launcher-server/cert-server-localhost.pem b/ciao-launcher/tests/ciao-launcher-server/cert-server-localhost.pem new file mode 100644 index 000000000..bea1d8ee7 --- /dev/null +++ b/ciao-launcher/tests/ciao-launcher-server/cert-server-localhost.pem @@ -0,0 +1,46 @@ +-----BEGIN CERTIFICATE----- +MIIDJzCCAg+gAwIBAgIRAI4v0wKgpQge+9MTPyzhbM0wDQYJKoZIhvcNAQELBQAw +GDEWMBQGA1UEChMNSW50ZWwvU1NHL09UQzAeFw0xNTEyMDQxMDM5MTRaFw0xNjEy +MDMxMDM5MTRaMBgxFjAUBgNVBAoTDUludGVsL1NTRy9PVEMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9X4CZzaM134EEnZdKF80ubc3OD6XXToOnZ69L +KiEggYcOWb4MlHt2by2wcx3cHF1yDqEJCUfN+EmDLB/PMVPLtLNb3YPK9Uhk+8SC +jJKI+xDczCI/P5dCyjz54dFtq1Oek8kBcD9+QxNwuLu8AaNXr9Trynqtwkzm6f75 +4cmpCcl0qUlaBLsy29l7fBsebwe7/7I2v4U48n8GwYRIJF5Bm/2AloabInPWCpx4 +S/NAquIFUKvWLzo9qytFLB3FY5legu7lcIEQWRWJJIznNFG/dpiVyYvcTtvWRouS +5OoaIOr2tCxt+9ZsTO9Rl1hA6K23vSfjJAnJ76cYZIHT9h1pAgMBAAGjbDBqMA4G +A1UdDwEB/wQEAwICpDAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w +NgYDVR0RBC8wLYIJbG9jYWxob3N0gSBzdXBlcm5vdmEtYXJjaEBlY2xpc3RzLmlu +dGVsLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAOi+ztvvpN6ssMOEDoaxHfWFFlBhZ +W+UPpaQa5V7zW06wYGFXtE34K9YQhrEBp5LbGlVW+Vra1fTXOECtvrnwP8VYDpZF +yz60H1ytzbeGYin8kCQyFVjrhD1DMye9xVzRhABFW0AsC4IdZqE63wpyyZJNgi16 +xITSMe6lg0paj/yuczmUngEX+Yc7m9o9CvZMu++nPmu0LEvD6UxSdX6guV8tezq7 +Z2g1WrJCB7PQ/iD5kJBdRKMW/kylkVWzoFt+hyl5JpmL/0dCTckPP1c4024av395 +0jQxXB5GU4Fn/6jrpsCeFNu7zjD3yKllHtvUtmf9ceYgtoehejHKHC8bSA== +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAvV+Amc2jNd+BBJ2XShfNLm3Nzg+l106Dp2evSyohIIGHDlm+ +DJR7dm8tsHMd3Bxdcg6hCQlHzfhJgywfzzFTy7SzW92DyvVIZPvEgoySiPsQ3Mwi +Pz+XQso8+eHRbatTnpPJAXA/fkMTcLi7vAGjV6/U68p6rcJM5un++eHJqQnJdKlJ +WgS7MtvZe3wbHm8Hu/+yNr+FOPJ/BsGESCReQZv9gJaGmyJz1gqceEvzQKriBVCr +1i86PasrRSwdxWOZXoLu5XCBEFkViSSM5zRRv3aYlcmL3E7b1kaLkuTqGiDq9rQs +bfvWbEzvUZdYQOitt70n4yQJye+nGGSB0/YdaQIDAQABAoIBADOnXRiWsrH7KOHt +alrXaywC32QZeI7l55s9+N5zu112dzQ+3kvoJFUFtlyuybJEOmjl/J7OXTudQc/5 +tSN6vPSPcWzZWx19EiYZVlyVo1KhyNOxCpZd7hBVwZQXOFzFoqVagjVWPLtf6GmS +ugJqH/fHCjjWnefPW9KQePAH2k202YJ6AZ4Z5qANcDS/m98sqjvpf9K+13ns8/lv +beIgPJNtM4BdfedUk2qLmhSx+F6Bp3x00jhL2nalNkFUatim3jq1OOP41PEmnQ0T +YLHoY6ByMN+FY710OrR/pu7rnB9VI1R3xLKsqbjYlLJGGjaC7DDvJyu4q9dJJeuN +4arT0VECgYEA1LRRB5WJWBYkwHyFhtA3TpDBV9Tg1wZsdB5fS2/wqkpciMcpc1eC +KWMl1q9Y9NIOMK3TgsBHmFFdU/8A1yEVKyo8cr2qZCLAaDsFAf0wCI9+PGcAAiKQ +8PErs2R+Jl/KAO1TreCDj9yGKEX4/88DkXit7tpVASFumQMk+Wa39hsCgYEA4+tt +y029e19VxO4Ciu2v8PRtVE6G8pU3iXhYfl1MaSiLJI/wR361C58SwyVKG1p6Afd6 +M2FunDdZoOAqh1tcIyPmbSFGs44rHo8iRFKLKqSwG1A1Wyut4DrTE4wjWwdeJq1H +KIj7cv30+7SQlylbl1nDUhVbJ9IxPeeYl14ZQssCgYAljEM1lIcvPpF0TUcA+maY +/uKyj4i1TJP+l5H47iTM2t2k44uEqfD3P2JhmIiahy4sF7NbxHefAtX3yu8qCtQC +z6zmEGcw0BWpEKocpMZC7Jm7Le+JcPKnJduyCk5eldBCA6Me8aAn2CTynQ+MkM2v +c18459/TmEQ+zJhNUn2s+wKBgBO3GL8+UUarv3qScAvedZqtOgvR4yB2weAVVepQ +Xby28hpOjSVSE40WzIOu5RMqp2UmKB7rraq7qvgci/E7eOPdRiBogXoksRwqm3c3 +gKxGpKkoLgVd6ON9kyact643G/DSilY6Sy3uQ9PGEb/RpweXjyq5iDIQn4vQTcXC +MQMzAoGBAJ+lLhPoe0X3fSoaHD8hiMUFzNGd6X0wQKKcgXcXHxO479f5oddY6XyE +JOLZsPVotYcNHUt1OzSDUpeIByb674KkpJuLU59vN51TBWog6la6F/qQjHTy4izs +X5xubbrZSAP/afJVqLBdfARxHIpXx/Osr7I8fRM03EXmSsVjejuA +-----END RSA PRIVATE KEY----- diff --git a/ciao-launcher/tests/ciao-launcher-server/default-certs.go b/ciao-launcher/tests/ciao-launcher-server/default-certs.go new file mode 100644 index 000000000..fe504fcbe --- /dev/null +++ b/ciao-launcher/tests/ciao-launcher-server/default-certs.go @@ -0,0 +1,86 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +const caCertServer = ` +-----BEGIN CERTIFICATE----- +MIIDJzCCAg+gAwIBAgIRAI4v0wKgpQge+9MTPyzhbM0wDQYJKoZIhvcNAQELBQAw +GDEWMBQGA1UEChMNSW50ZWwvU1NHL09UQzAeFw0xNTEyMDQxMDM5MTRaFw0xNjEy +MDMxMDM5MTRaMBgxFjAUBgNVBAoTDUludGVsL1NTRy9PVEMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9X4CZzaM134EEnZdKF80ubc3OD6XXToOnZ69L +KiEggYcOWb4MlHt2by2wcx3cHF1yDqEJCUfN+EmDLB/PMVPLtLNb3YPK9Uhk+8SC +jJKI+xDczCI/P5dCyjz54dFtq1Oek8kBcD9+QxNwuLu8AaNXr9Trynqtwkzm6f75 +4cmpCcl0qUlaBLsy29l7fBsebwe7/7I2v4U48n8GwYRIJF5Bm/2AloabInPWCpx4 +S/NAquIFUKvWLzo9qytFLB3FY5legu7lcIEQWRWJJIznNFG/dpiVyYvcTtvWRouS +5OoaIOr2tCxt+9ZsTO9Rl1hA6K23vSfjJAnJ76cYZIHT9h1pAgMBAAGjbDBqMA4G +A1UdDwEB/wQEAwICpDAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w +NgYDVR0RBC8wLYIJbG9jYWxob3N0gSBzdXBlcm5vdmEtYXJjaEBlY2xpc3RzLmlu +dGVsLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAOi+ztvvpN6ssMOEDoaxHfWFFlBhZ +W+UPpaQa5V7zW06wYGFXtE34K9YQhrEBp5LbGlVW+Vra1fTXOECtvrnwP8VYDpZF +yz60H1ytzbeGYin8kCQyFVjrhD1DMye9xVzRhABFW0AsC4IdZqE63wpyyZJNgi16 +xITSMe6lg0paj/yuczmUngEX+Yc7m9o9CvZMu++nPmu0LEvD6UxSdX6guV8tezq7 +Z2g1WrJCB7PQ/iD5kJBdRKMW/kylkVWzoFt+hyl5JpmL/0dCTckPP1c4024av395 +0jQxXB5GU4Fn/6jrpsCeFNu7zjD3yKllHtvUtmf9ceYgtoehejHKHC8bSA== +-----END CERTIFICATE-----` + +const certServer = ` +-----BEGIN CERTIFICATE----- +MIIDJzCCAg+gAwIBAgIRAI4v0wKgpQge+9MTPyzhbM0wDQYJKoZIhvcNAQELBQAw +GDEWMBQGA1UEChMNSW50ZWwvU1NHL09UQzAeFw0xNTEyMDQxMDM5MTRaFw0xNjEy +MDMxMDM5MTRaMBgxFjAUBgNVBAoTDUludGVsL1NTRy9PVEMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9X4CZzaM134EEnZdKF80ubc3OD6XXToOnZ69L +KiEggYcOWb4MlHt2by2wcx3cHF1yDqEJCUfN+EmDLB/PMVPLtLNb3YPK9Uhk+8SC +jJKI+xDczCI/P5dCyjz54dFtq1Oek8kBcD9+QxNwuLu8AaNXr9Trynqtwkzm6f75 +4cmpCcl0qUlaBLsy29l7fBsebwe7/7I2v4U48n8GwYRIJF5Bm/2AloabInPWCpx4 +S/NAquIFUKvWLzo9qytFLB3FY5legu7lcIEQWRWJJIznNFG/dpiVyYvcTtvWRouS +5OoaIOr2tCxt+9ZsTO9Rl1hA6K23vSfjJAnJ76cYZIHT9h1pAgMBAAGjbDBqMA4G +A1UdDwEB/wQEAwICpDAPBgNVHSUECDAGBgRVHSUAMA8GA1UdEwEB/wQFMAMBAf8w +NgYDVR0RBC8wLYIJbG9jYWxob3N0gSBzdXBlcm5vdmEtYXJjaEBlY2xpc3RzLmlu +dGVsLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAOi+ztvvpN6ssMOEDoaxHfWFFlBhZ +W+UPpaQa5V7zW06wYGFXtE34K9YQhrEBp5LbGlVW+Vra1fTXOECtvrnwP8VYDpZF +yz60H1ytzbeGYin8kCQyFVjrhD1DMye9xVzRhABFW0AsC4IdZqE63wpyyZJNgi16 +xITSMe6lg0paj/yuczmUngEX+Yc7m9o9CvZMu++nPmu0LEvD6UxSdX6guV8tezq7 +Z2g1WrJCB7PQ/iD5kJBdRKMW/kylkVWzoFt+hyl5JpmL/0dCTckPP1c4024av395 +0jQxXB5GU4Fn/6jrpsCeFNu7zjD3yKllHtvUtmf9ceYgtoehejHKHC8bSA== +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAvV+Amc2jNd+BBJ2XShfNLm3Nzg+l106Dp2evSyohIIGHDlm+ +DJR7dm8tsHMd3Bxdcg6hCQlHzfhJgywfzzFTy7SzW92DyvVIZPvEgoySiPsQ3Mwi +Pz+XQso8+eHRbatTnpPJAXA/fkMTcLi7vAGjV6/U68p6rcJM5un++eHJqQnJdKlJ +WgS7MtvZe3wbHm8Hu/+yNr+FOPJ/BsGESCReQZv9gJaGmyJz1gqceEvzQKriBVCr +1i86PasrRSwdxWOZXoLu5XCBEFkViSSM5zRRv3aYlcmL3E7b1kaLkuTqGiDq9rQs +bfvWbEzvUZdYQOitt70n4yQJye+nGGSB0/YdaQIDAQABAoIBADOnXRiWsrH7KOHt +alrXaywC32QZeI7l55s9+N5zu112dzQ+3kvoJFUFtlyuybJEOmjl/J7OXTudQc/5 +tSN6vPSPcWzZWx19EiYZVlyVo1KhyNOxCpZd7hBVwZQXOFzFoqVagjVWPLtf6GmS +ugJqH/fHCjjWnefPW9KQePAH2k202YJ6AZ4Z5qANcDS/m98sqjvpf9K+13ns8/lv +beIgPJNtM4BdfedUk2qLmhSx+F6Bp3x00jhL2nalNkFUatim3jq1OOP41PEmnQ0T +YLHoY6ByMN+FY710OrR/pu7rnB9VI1R3xLKsqbjYlLJGGjaC7DDvJyu4q9dJJeuN +4arT0VECgYEA1LRRB5WJWBYkwHyFhtA3TpDBV9Tg1wZsdB5fS2/wqkpciMcpc1eC +KWMl1q9Y9NIOMK3TgsBHmFFdU/8A1yEVKyo8cr2qZCLAaDsFAf0wCI9+PGcAAiKQ +8PErs2R+Jl/KAO1TreCDj9yGKEX4/88DkXit7tpVASFumQMk+Wa39hsCgYEA4+tt +y029e19VxO4Ciu2v8PRtVE6G8pU3iXhYfl1MaSiLJI/wR361C58SwyVKG1p6Afd6 +M2FunDdZoOAqh1tcIyPmbSFGs44rHo8iRFKLKqSwG1A1Wyut4DrTE4wjWwdeJq1H +KIj7cv30+7SQlylbl1nDUhVbJ9IxPeeYl14ZQssCgYAljEM1lIcvPpF0TUcA+maY +/uKyj4i1TJP+l5H47iTM2t2k44uEqfD3P2JhmIiahy4sF7NbxHefAtX3yu8qCtQC +z6zmEGcw0BWpEKocpMZC7Jm7Le+JcPKnJduyCk5eldBCA6Me8aAn2CTynQ+MkM2v +c18459/TmEQ+zJhNUn2s+wKBgBO3GL8+UUarv3qScAvedZqtOgvR4yB2weAVVepQ +Xby28hpOjSVSE40WzIOu5RMqp2UmKB7rraq7qvgci/E7eOPdRiBogXoksRwqm3c3 +gKxGpKkoLgVd6ON9kyact643G/DSilY6Sy3uQ9PGEb/RpweXjyq5iDIQn4vQTcXC +MQMzAoGBAJ+lLhPoe0X3fSoaHD8hiMUFzNGd6X0wQKKcgXcXHxO479f5oddY6XyE +JOLZsPVotYcNHUt1OzSDUpeIByb674KkpJuLU59vN51TBWog6la6F/qQjHTy4izs +X5xubbrZSAP/afJVqLBdfARxHIpXx/Osr7I8fRM03EXmSsVjejuA +-----END RSA PRIVATE KEY-----` diff --git a/ciao-launcher/tests/ciao-launcher-server/server.go b/ciao-launcher/tests/ciao-launcher-server/server.go new file mode 100644 index 000000000..151f38f15 --- /dev/null +++ b/ciao-launcher/tests/ciao-launcher-server/server.go @@ -0,0 +1,501 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "os/signal" + "path" + "sync" + "syscall" + "time" + + "gopkg.in/yaml.v2" + + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" +) + +var caServerPath string +var serverPath string +var serverURL string +var traceCommands bool +var ssntpServer = &ssntp.Server{} + +func init() { + flag.StringVar(&caServerPath, "ca-server-cert", "", "Path to CAServer certificate") + flag.StringVar(&serverPath, "server-cert", "", "Path to server certificate") + flag.BoolVar(&traceCommands, "trace", false, "Turn on ssntp command tracing") + flag.StringVar(&serverURL, "server", "127.0.0.1:9000", "IP port of server") +} + +type client struct { + stats *payloads.Stat + status *payloads.Ready + events []interface{} +} + +var server = struct { + sync.Mutex // Protects Map + clients map[string]*client +}{ + clients: make(map[string]*client), +} + +type testServer struct{} + +func (ts *testServer) ConnectNotify(uuid string, role uint32) { + server.Lock() + defer server.Unlock() + + if role != ssntp.AGENT { + return + } + + if _, exists := server.clients[uuid]; exists { + return + } + + server.clients[uuid] = new(client) +} + +func (ts *testServer) DisconnectNotify(uuid string) { + server.Lock() + defer server.Unlock() + + if _, exists := server.clients[uuid]; exists { + delete(server.clients, uuid) + } +} + +func (ts *testServer) StatusNotify(uuid string, status ssntp.Status, frame *ssntp.Frame) { + var ready payloads.Ready + err := yaml.Unmarshal(frame.Payload, &ready) + if err == nil { + server.Lock() + if server.clients[uuid] != nil { + server.clients[uuid].status = &ready + } + server.Unlock() + } +} + +func (ts *testServer) CommandNotify(uuid string, command ssntp.Command, frame *ssntp.Frame) { + switch command { + case ssntp.STATS: + var stats payloads.Stat + err := yaml.Unmarshal(frame.Payload, &stats) + if err == nil { + server.Lock() + if server.clients[uuid] != nil { + server.clients[uuid].stats = &stats + } + server.Unlock() + } + } +} + +func (ts *testServer) ErrorNotify(uuid string, err ssntp.Error, frame *ssntp.Frame) { + server.Lock() + defer server.Unlock() + + c := server.clients[uuid] + if c == nil { + return + } + + if c.events == nil { + c.events = make([]interface{}, 0, 32) + } + + // TODO is there a better way to do this with reflection? + + var e interface{} + switch err { + case ssntp.StartFailure: + payload := payloads.ErrorStartFailure{} + err := yaml.Unmarshal(frame.Payload, &payload) + if err == nil { + e = &payload + } + case ssntp.StopFailure: + payload := payloads.ErrorStopFailure{} + err := yaml.Unmarshal(frame.Payload, &payload) + if err == nil { + e = &payload + } + case ssntp.RestartFailure: + payload := payloads.ErrorRestartFailure{} + err := yaml.Unmarshal(frame.Payload, &payload) + if err == nil { + e = &payload + } + case ssntp.DeleteFailure: + payload := payloads.ErrorDeleteFailure{} + err := yaml.Unmarshal(frame.Payload, &payload) + if err == nil { + e = &payload + } + } + + c.events = append(c.events, e) +} + +func (ts *testServer) EventNotify(uuid string, event ssntp.Event, frame *ssntp.Frame) { + server.Lock() + defer server.Unlock() + + c := server.clients[uuid] + if c == nil { + return + } + + if c.events == nil { + c.events = make([]interface{}, 0, 32) + } + + var e interface{} + + switch event { + case ssntp.TenantAdded: + payload := payloads.EventTenantAdded{} + err := yaml.Unmarshal(frame.Payload, &payload) + if err == nil { + e = &payload + } + case ssntp.TenantRemoved: + payload := payloads.EventTenantRemoved{} + err := yaml.Unmarshal(frame.Payload, &payload) + if err == nil { + e = &payload + } + case ssntp.InstanceDeleted: + payload := payloads.EventInstanceDeleted{} + err := yaml.Unmarshal(frame.Payload, &payload) + if err == nil { + e = &payload + } + } + + c.events = append(c.events, e) +} + +func getCertPaths(tmpDir string) (string, string) { + + var caPath, sPath string + + caPath = path.Join(tmpDir, "CACertServer") + sPath = path.Join(tmpDir, "CertServer") + + for _, s := range []struct{ path, data string }{{caPath, caCertServer}, {sPath, certServer}} { + err := ioutil.WriteFile(s.path, []byte(s.data), 0755) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to create certfile %s %v\n", s.path, err) + os.Exit(1) + } + } + + return caPath, sPath +} + +func dumpYaml(w http.ResponseWriter, data interface{}) { + payload, err := yaml.Marshal(data) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Write(payload) +} + +func clients(w http.ResponseWriter, r *http.Request) { + if r.Method != "" && r.Method != "GET" { + http.Error(w, "GET expected", http.StatusBadRequest) + return + } + server.Lock() + defer server.Unlock() + i := 0 + clients := make([]string, len(server.clients)) + for k := range server.clients { + clients[i] = k + i++ + } + + dumpYaml(w, &clients) +} + +func getClient(clientP string) string { + if len(server.clients) == 0 { + return "" + } else if len(server.clients) == 1 && clientP == "" { + for k := range server.clients { + clientP = k + } + } else { + if clientP == "" { + return "" + } + } + + return clientP +} + +func yamlCommand(w http.ResponseWriter, r *http.Request, command ssntp.Command) { + if r.Method != "POST" || r.Body == nil { + http.Error(w, "POST expected", http.StatusBadRequest) + return + } + + values := r.URL.Query() + clientP := values.Get("client") + server.Lock() + defer server.Unlock() + + clientP = getClient(clientP) + c := server.clients[clientP] + if c == nil { + http.Error(w, "Invalid client", http.StatusBadRequest) + return + } + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + _, err = ssntpServer.SendCommand(clientP, command, data) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func instances(w http.ResponseWriter, r *http.Request) { + if r.Method != "" && r.Method != "GET" { + http.Error(w, "GET expected", http.StatusBadRequest) + return + } + + values := r.URL.Query() + clientP := values.Get("client") + filterP := values.Get("filter") + server.Lock() + defer server.Unlock() + + clientP = getClient(clientP) + c := server.clients[clientP] + if c == nil { + http.Error(w, "Invalid client", http.StatusBadRequest) + return + } + + var instances []string + if c.stats != nil { + instances = make([]string, 0, len(c.stats.Instances)) + for _, i := range c.stats.Instances { + if filterP == "" || (filterP == i.State) { + instances = append(instances, i.InstanceUUID) + } + } + } else { + instances = []string{} + } + + dumpYaml(w, instances) +} + +func stats(w http.ResponseWriter, r *http.Request) { + if r.Method != "" && r.Method != "GET" { + http.Error(w, "GET expected", http.StatusBadRequest) + return + } + + values := r.URL.Query() + clientP := values.Get("client") + filterP := values.Get("filter") + server.Lock() + defer server.Unlock() + + clientP = getClient(clientP) + c := server.clients[clientP] + if c == nil { + http.Error(w, "Invalid client", http.StatusBadRequest) + return + } + + if c.stats == nil { + http.Error(w, "Stats not available", http.StatusNotFound) + return + } + + var stats *payloads.Stat + + if filterP == "" { + stats = c.stats + } else { + tmpStats := *c.stats + counter := 0 + for _, i := range c.stats.Instances { + if filterP == "" || (filterP == i.State) { + tmpStats.Instances[counter] = i + counter++ + } + } + tmpStats.Instances = tmpStats.Instances[:counter] + stats = &tmpStats + } + + dumpYaml(w, stats) +} + +func status(w http.ResponseWriter, r *http.Request) { + if r.Method != "" && r.Method != "GET" { + http.Error(w, "GET expected", http.StatusBadRequest) + return + } + + values := r.URL.Query() + clientP := values.Get("client") + server.Lock() + defer server.Unlock() + + clientP = getClient(clientP) + c := server.clients[clientP] + if c == nil { + http.Error(w, "Invalid client", http.StatusBadRequest) + return + } + + if c.status == nil { + http.Error(w, "Statuss not available", http.StatusNotFound) + return + } + + dumpYaml(w, c.status) +} + +func drain(w http.ResponseWriter, r *http.Request) { + if r.Method != "" && r.Method != "GET" { + http.Error(w, "GET expected", http.StatusBadRequest) + return + } + + values := r.URL.Query() + clientP := values.Get("client") + server.Lock() + defer server.Unlock() + + clientP = getClient(clientP) + c := server.clients[clientP] + if c == nil { + http.Error(w, "Invalid client", http.StatusBadRequest) + return + } + + dumpYaml(w, c.events) + c.events = nil +} + +func serve(done chan os.Signal) { + listener, err := net.Listen("tcp", serverURL) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to create listener: %v\n", err) + return + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + http.HandleFunc("/clients", clients) + http.HandleFunc("/instances", instances) + http.HandleFunc("/start", + func(w http.ResponseWriter, r *http.Request) { + yamlCommand(w, r, ssntp.START) + }) + http.HandleFunc("/stop", + func(w http.ResponseWriter, r *http.Request) { + yamlCommand(w, r, ssntp.STOP) + }) + + http.HandleFunc("/restart", + func(w http.ResponseWriter, r *http.Request) { + yamlCommand(w, r, ssntp.RESTART) + }) + http.HandleFunc("/delete", + func(w http.ResponseWriter, r *http.Request) { + yamlCommand(w, r, ssntp.DELETE) + }) + http.HandleFunc("/stats", stats) + http.HandleFunc("/status", status) + http.HandleFunc("/drain", drain) + if err := http.Serve(listener, nil); err != nil { + fmt.Fprintf(os.Stderr, "Failed to listen: %v\n", err) + } + wg.Done() + }() + <-done + listener.Close() + wg.Wait() +} + +func main() { + flag.Parse() + + cfg := new(ssntp.Config) + + if (caServerPath == "" && serverPath != "") || (caServerPath != "" && serverPath == "") { + fmt.Fprintln(os.Stderr, "Either both or neither certificate paths must be defined") + os.Exit(1) + } else if caServerPath == "" && serverPath == "" { + tmpDir, err := ioutil.TempDir("", "launcher-server") + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to create temporary Dir %v\n", err) + os.Exit(1) + } + defer func() { + _ = os.RemoveAll(tmpDir) + }() + cfg.CAcert, cfg.Cert = getCertPaths(tmpDir) + } else { + cfg.CAcert, cfg.Cert = caServerPath, serverPath + } + cfg.Trace = &ssntp.TraceConfig{PathTrace: traceCommands, Start: time.Now()} + + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) + var wg sync.WaitGroup + + wg.Add(1) + go func() { + err := ssntpServer.Serve(cfg, &testServer{}) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to start server %v\n", err) + os.Exit(1) + } + wg.Done() + }() + + serve(signalCh) + ssntpServer.Stop() + wg.Wait() +} diff --git a/ciao-launcher/tests/ciaolc/ciaolc.go b/ciao-launcher/tests/ciaolc/ciaolc.go new file mode 100644 index 000000000..6914c32ba --- /dev/null +++ b/ciao-launcher/tests/ciaolc/ciaolc.go @@ -0,0 +1,454 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "text/tabwriter" + + "github.com/01org/ciao/payloads" + + "gopkg.in/yaml.v2" +) + +var serverURL string + +func init() { + flag.StringVar(&serverURL, "server", "127.0.0.1:9000", "IP port of server") + flag.Usage = func() { + fmt.Fprintln(os.Stderr, "ciaolc is a command line tool for testing ciao-launcher") + fmt.Fprintln(os.Stderr, "") + fmt.Fprintln(os.Stderr, "Usage:") + fmt.Fprintln(os.Stderr, "\tciaolc command") + fmt.Fprintln(os.Stderr, "") + fmt.Fprintln(os.Stderr, "Where commands are:") + fmt.Fprintln(os.Stderr, "\tstartf") + fmt.Fprintln(os.Stderr, "\tdelete") + fmt.Fprintln(os.Stderr, "\tstop") + fmt.Fprintln(os.Stderr, "\trestart") + fmt.Fprintln(os.Stderr, "\tdrain") + fmt.Fprintln(os.Stderr, "\tstats") + fmt.Fprintln(os.Stderr, "\tistats") + fmt.Fprintln(os.Stderr, "\tstatus") + fmt.Fprintln(os.Stderr, "") + fmt.Fprintln(os.Stderr, "Flags:") + flag.PrintDefaults() + } +} + +type filter string + +func (f *filter) String() string { + return string(*f) +} + +func (f *filter) Set(val string) error { + if val != "none" && val != "exited" && val != "pending" && + val != "running" { + return fmt.Errorf("exited, pending, running expected") + } + *f = filter(val) + + return nil +} + +func grabBody(url string) ([]byte, error) { + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return body, nil +} + +func clients(host string) error { + u := url.URL{ + Scheme: "http", + Host: host, + Path: "clients", + } + + body, err := grabBody(u.String()) + if err != nil { + return err + } + clients := []string{} + err = yaml.Unmarshal(body, &clients) + if err != nil { + return err + } + + for _, c := range clients { + fmt.Println(c) + } + + return nil +} + +func instances(host string) error { + u, err := queryStatsURL("instances", "instances", host) + if err != nil { + return err + } + body, err := grabBody(u) + if err != nil { + return err + } + clients := []string{} + err = yaml.Unmarshal(body, &clients) + if err != nil { + return err + } + + for _, c := range clients { + fmt.Println(c) + } + + return nil +} + +func queryURL(host string, cmd string, c string, f string) string { + u := url.URL{ + Scheme: "http", + Host: host, + Path: cmd, + } + + q := u.Query() + if c != "" { + q.Set("client", c) + } + if f != "" { + q.Set("filter", f) + } + u.RawQuery = q.Encode() + return u.String() +} + +func getStats(u string) (*payloads.Stat, error) { + body, err := grabBody(u) + if err != nil { + return nil, err + } + + var stats payloads.Stat + err = yaml.Unmarshal(body, &stats) + if err != nil { + return nil, err + } + + return &stats, nil +} + +func queryStatsURL(cmd, remoteCmd, host string) (string, error) { + fs := flag.NewFlagSet(cmd, flag.ExitOnError) + fp := filter("none") + fs.Var(&fp, "filter", "Can be exited, pending or running") + cp := "" + fs.StringVar(&cp, "client", "", "UUID of client") + + if err := fs.Parse(flag.Args()[1:]); err != nil { + return "", err + } + + f := "" + if fp != filter("none") { + f = fp.String() + } + + return queryURL(host, remoteCmd, cp, f), nil +} + +func istats(host string) error { + u, err := queryStatsURL("istats", "stats", host) + if err != nil { + return err + } + stats, err := getStats(u) + if err != nil { + return err + } + + if len(stats.Instances) == 0 { + return nil + } + + w := new(tabwriter.Writer) + + w.Init(os.Stdout, 0, 8, 0, '\t', 0) + fmt.Fprintln(w, "UUID\tStatus\tSSH\tMem\tDisk\tCPU") + for _, i := range stats.Instances { + fmt.Fprintf(w, "%s\t%s\t%s:%d\t%d MB\t%d MB\t%d%%\n", + i.InstanceUUID, + i.State, + i.SSHIP, i.SSHPort, + i.MemoryUsageMB, + i.DiskUsageMB, + i.CPUUsage) + } + w.Flush() + + return nil +} + +func stats(host string) error { + fs := flag.NewFlagSet("stats", flag.ExitOnError) + cp := "" + fs.StringVar(&cp, "client", "", "UUID of client") + + if err := fs.Parse(flag.Args()[1:]); err != nil { + return err + } + + u := queryURL(host, "stats", cp, "") + stats, err := getStats(u) + if err != nil { + return err + } + + if len(stats.Instances) == 0 { + return nil + } + + w := new(tabwriter.Writer) + + running := 0 + pending := 0 + exited := 0 + unknown := 0 + + for i := range stats.Instances { + switch stats.Instances[i].State { + case payloads.Pending: + pending++ + case payloads.Running: + running++ + case payloads.Exited: + exited++ + default: + unknown++ + } + } + + w.Init(os.Stdout, 0, 8, 0, '\t', 0) + fmt.Fprintf(w, "NodeUUID:\t %s\n", stats.NodeUUID) + fmt.Fprintf(w, "Status:\t %s\n", stats.Status) + fmt.Fprintf(w, "MemTotal:\t %d MB\n", stats.MemTotalMB) + fmt.Fprintf(w, "MemAvailable:\t %d MB\n", stats.MemAvailableMB) + fmt.Fprintf(w, "DiskTotal:\t %d MB\n", stats.DiskTotalMB) + fmt.Fprintf(w, "DiskAvailable:\t %d MB\n", stats.DiskAvailableMB) + fmt.Fprintf(w, "Load:\t %d\n", stats.Load) + fmt.Fprintf(w, "CpusOnline:\t %d\n", stats.CpusOnline) + fmt.Fprintf(w, "NodeHostName:\t %s\n", stats.NodeHostName) + if len(stats.Networks) == 1 { + fmt.Fprintf(w, "NodeIP:\t %s\n", stats.Networks[0].NodeIP) + fmt.Fprintf(w, "NodeMAC:\t %s\n", stats.Networks[0].NodeMAC) + } + for i, n := range stats.Networks { + fmt.Fprintf(w, "NodeIP-%d:\t %s\n", i+1, n.NodeIP) + fmt.Fprintf(w, "NodeMAC-%d:\t %s\n", i+1, n.NodeMAC) + } + if unknown == 0 { + fmt.Fprintf(w, "Instances:\t %d (%d running %d exited %d pending)\n", + len(stats.Instances), running, exited, pending) + } else { + fmt.Fprintf(w, "Instances:\t %d (%d running %d exited %d pending %d other)\n", + len(stats.Instances), running, exited, pending, unknown) + } + w.Flush() + + return nil +} + +func status(host string) error { + fs := flag.NewFlagSet("status", flag.ExitOnError) + cp := "" + fs.StringVar(&cp, "client", "", "UUID of client") + + if err := fs.Parse(flag.Args()[1:]); err != nil { + return err + } + u := queryURL(host, "status", cp, "") + + body, err := grabBody(u) + if err != nil { + return err + } + + var status payloads.Ready + err = yaml.Unmarshal(body, &status) + if err != nil { + return err + } + + w := new(tabwriter.Writer) + + w.Init(os.Stdout, 0, 8, 0, '\t', 0) + fmt.Fprintf(w, "NodeUUID:\t %s\n", status.NodeUUID) + fmt.Fprintf(w, "MemTotal:\t %d MB\n", status.MemTotalMB) + fmt.Fprintf(w, "MemAvailable:\t %d MB\n", status.MemAvailableMB) + fmt.Fprintf(w, "DiskTotal:\t %d MB\n", status.DiskTotalMB) + fmt.Fprintf(w, "DiskAvailable:\t %d MB\n", status.DiskAvailableMB) + fmt.Fprintf(w, "Load:\t %d\n", status.Load) + fmt.Fprintf(w, "CpusOnline:\t %d\n", status.CpusOnline) + w.Flush() + + return nil +} + +func drain(host string) error { + fs := flag.NewFlagSet("drain", flag.ExitOnError) + cp := "" + fs.StringVar(&cp, "client", "", "UUID of client") + + if err := fs.Parse(flag.Args()[1:]); err != nil { + return err + } + u := queryURL(host, "drain", cp, "") + + body, err := grabBody(u) + if err != nil { + return err + } + + fmt.Println(string(body)) + return nil +} + +func getSimplePostArgs(cmd string) (string, string, error) { + fs := flag.NewFlagSet(cmd, flag.ExitOnError) + cp := "" + fs.StringVar(&cp, "client", "", "UUID of client") + + if err := fs.Parse(flag.Args()[1:]); err != nil { + return "", "", err + } + + instance := fs.Arg(0) + if instance == "" { + return "", "", fmt.Errorf("Missing instance-uuid") + } + + return cp, instance, nil +} + +func postYaml(host, cmd, client string, data interface{}) error { + u := queryURL(host, cmd, client, "") + payload, err := yaml.Marshal(data) + if err != nil { + return err + } + resp, err := http.Post(u, "text/yaml", bytes.NewBuffer(payload)) + resp.Body.Close() + return err +} + +func startf(host string) error { + client, path, err := getSimplePostArgs("start") + if err != nil { + return err + } + + payload, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + u := queryURL(host, "start", client, "") + resp, err := http.Post(u, "text/yaml", bytes.NewBuffer(payload)) + resp.Body.Close() + return err +} + +func stop(host string) error { + var stop payloads.Stop + + client, instance, err := getSimplePostArgs("stop") + if err != nil { + return err + } + + stop.Stop.InstanceUUID = instance + return postYaml(host, "stop", client, &stop) +} + +func restart(host string) error { + var restart payloads.Restart + + client, instance, err := getSimplePostArgs("restart") + if err != nil { + return err + } + + restart.Restart.InstanceUUID = instance + return postYaml(host, "restart", client, &restart) +} + +func del(host string) error { + var del payloads.Delete + + client, instance, err := getSimplePostArgs("delete") + if err != nil { + return err + } + + del.Delete.InstanceUUID = instance + return postYaml(host, "delete", client, &del) +} + +func main() { + + flag.Parse() + if len(flag.Args()) < 1 { + flag.Usage() + os.Exit(1) + } + + cmdMap := map[string]func(string) error{ + "clients": clients, + "instances": instances, + "istats": istats, + "stats": stats, + "status": status, + "stop": stop, + "restart": restart, + "delete": del, + "drain": drain, + "startf": startf, + } + + cmd := cmdMap[os.Args[1]] + if cmd == nil { + fmt.Fprintf(os.Stderr, "Unknown command %s\n", os.Args[1]) + os.Exit(1) + } + + if err := cmd(serverURL); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} diff --git a/ciao-launcher/tests/ciaolc/start_container.yaml b/ciao-launcher/tests/ciaolc/start_container.yaml new file mode 100644 index 000000000..0fcdec8f5 --- /dev/null +++ b/ciao-launcher/tests/ciaolc/start_container.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14087d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.102 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/delete_legacy.yaml b/ciao-launcher/tests/examples/delete_legacy.yaml new file mode 100644 index 000000000..05f6ea40e --- /dev/null +++ b/ciao-launcher/tests/examples/delete_legacy.yaml @@ -0,0 +1,2 @@ +delete: + instance_uuid: d7d86208-b46c-4465-9018-fe14087d415f diff --git a/ciao-launcher/tests/examples/restart_legacy.yaml b/ciao-launcher/tests/examples/restart_legacy.yaml new file mode 100644 index 000000000..a47b4ce83 --- /dev/null +++ b/ciao-launcher/tests/examples/restart_legacy.yaml @@ -0,0 +1,2 @@ +restart: + instance_uuid: d7d86208-b46c-4465-9018-fe14087d415f diff --git a/ciao-launcher/tests/examples/start_efi.yaml b/ciao-launcher/tests/examples/start_efi.yaml new file mode 100644 index 000000000..5bbed7be4 --- /dev/null +++ b/ciao-launcher/tests/examples/start_efi.yaml @@ -0,0 +1,39 @@ +--- +start: + requested_resources: + - type: vcpus + value: 2 + - type: mem_mb + value: 256 + - type: disk_mb + value: 80000 + instance_uuid: 67d86208-b46c-4465-9018-fe14087d415 + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + image_uuid: clear-1ff6bf3883708a56446d863f20c810c99b3aea6f + networking: + vnic_mac: 02:00:fa:69:71:d0 + vnic_uuid: 00d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.42.21 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.8.0/21 + private_ip: 192.168.8.3 +... +--- +#cloud-config +runcmd: + - [ touch, "/etc/bootdone" ] +users: + - name: ciao + gecos: CIAO Rules + lock-passwd: false + passwd: \\$1\\$vzmNmLLD\\$04bivxcjdXRzZLUd.enRl1 + sudo: ciao ALL=(ALL) NOPASSWD:ALL + ssh-authorized-keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDerQfD+qkb0V0XdQs8SBWqy4sQmqYFP96n/kI4Cq162w4UE8pTxy0ozAPldOvBJjljMvgaNKSAddknkhGcrNUvvJsUcZFm2qkafi32WyBdGFvIc45A+8O7vsxPXgHEsS9E3ylEALXAC3D0eX7pPtRiAbasLlY+VcACRqr3bPDSZTfpCmIkV2334uZD9iwOvTVeR+FjGDqsfju4DyzoAIqpPasE0+wk4Vbog7osP+qvn1gj5kQyusmr62+t0wx+bs2dF5QemksnFOswUrv9PGLhZgSMmDQrRYuvEfIAC7IdN/hfjTn0OokzljBiuWQ4WIIba/7xTYLVujJV65qH3heaSMxJJD7eH9QZs9RdbbdTXMFuJFsHV2OF6wZRp18tTNZZJMqiHZZSndC5WP1WrUo3Au/9a+ighSaOiVddHsPG07C/TOEnr3IrwU7c9yIHeeRFHmcQs9K0+n9XtrmrQxDQ9/mLkfje80Ko25VJ/QpAQPzCKh2KfQ4RD+/PxBUScx/lHIHOIhTSCh57ic629zWgk0coSQDi4MKSa5guDr3cuDvt4RihGviDM6V68ewsl0gh6Z9c0Hw7hU0vky4oxak5AiySiPz0FtsOnAzIL0UON+yMuKzrJgLjTKodwLQ0wlBXu43cD+P8VXwQYeqNSzfrhBnHqsrMf4lTLtc7kDDTcw== ciao@ciao +... +--- +{ + "uuid": "ciao", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/start_legacy.yaml b/ciao-launcher/tests/examples/start_legacy.yaml new file mode 100644 index 000000000..a134bfc86 --- /dev/null +++ b/ciao-launcher/tests/examples/start_legacy.yaml @@ -0,0 +1,41 @@ +--- +start: + requested_resources: + - type: vcpus + value: 2 + - type: mem_mb + value: 370 + - type: disk_mb + value: 8000 + instance_uuid: d7d86208-b46c-4465-9018-fe14087d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + image_uuid: b286cd45-7d0c-4525-a140-4db6c95e41fa + fw_type: legacy + networking: + vnic_mac: 02:00:e6:f5:af:f9 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.42.21 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.8.0/21 + private_ip: 192.168.8.2 +... +--- +#cloud-config +runcmd: + - [ touch, "/etc/bootdone" ] +users: + - name: ciao + gecos: CIAO Rules + lock-passwd: false + passwd: \\$1\\$vzmNmLLD\\$04bivxcjdXRzZLUd.enRl1 + sudo: ciao ALL=(ALL) NOPASSWD:ALL + ssh-authorized-keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDerQfD+qkb0V0XdQs8SBWqy4sQmqYFP96n/kI4Cq162w4UE8pTxy0ozAPldOvBJjljMvgaNKSAddknkhGcrNUvvJsUcZFm2qkafi32WyBdGFvIc45A+8O7vsxPXgHEsS9E3ylEALXAC3D0eX7pPtRiAbasLlY+VcACRqr3bPDSZTfpCmIkV2334uZD9iwOvTVeR+FjGDqsfju4DyzoAIqpPasE0+wk4Vbog7osP+qvn1gj5kQyusmr62+t0wx+bs2dF5QemksnFOswUrv9PGLhZgSMmDQrRYuvEfIAC7IdN/hfjTn0OokzljBiuWQ4WIIba/7xTYLVujJV65qH3heaSMxJJD7eH9QZs9RdbbdTXMFuJFsHV2OF6wZRp18tTNZZJMqiHZZSndC5WP1WrUo3Au/9a+ighSaOiVddHsPG07C/TOEnr3IrwU7c9yIHeeRFHmcQs9K0+n9XtrmrQxDQ9/mLkfje80Ko25VJ/QpAQPzCKh2KfQ4RD+/PxBUScx/lHIHOIhTSCh57ic629zWgk0coSQDi4MKSa5guDr3cuDvt4RihGviDM6V68ewsl0gh6Z9c0Hw7hU0vky4oxak5AiySiPz0FtsOnAzIL0UON+yMuKzrJgLjTKodwLQ0wlBXu43cD+P8VXwQYeqNSzfrhBnHqsrMf4lTLtc7kDDTcw== ciao@ciao +s +... +--- +{ + "uuid": "ciao", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/start_nn.yaml b/ciao-launcher/tests/examples/start_nn.yaml new file mode 100644 index 000000000..8cc1bfdc1 --- /dev/null +++ b/ciao-launcher/tests/examples/start_nn.yaml @@ -0,0 +1,28 @@ +--- +#cloud-config +runcmd: + - [ touch, "/etc/bootdone" ] +start: + requested_resources: + - type: vcpus + value: 2 + - type: mem_mb + value: 256 + - type: disk_mb + value: 80000 + - type: network_node + value: 1 + instance_uuid: 67d86208-b46c-4465-0000-fe14087d415f + tenant_uuid: 67d86208-0000-0000-9018-fe14087d415f + image_uuid: b286cd45-7d0c-4525-a140-4db6c95e41fa + fw_type: legacy + networking: + vnic_mac: 02:00:e6:f5:af:f9 + vnic_uuid: 67d86208-b46c-0000-0000-fe14087d415f +... +--- +{ + "uuid": "ciao", + "hostname": "ciao" +} +... \ No newline at end of file diff --git a/ciao-launcher/tests/examples/stop_legacy.yaml b/ciao-launcher/tests/examples/stop_legacy.yaml new file mode 100644 index 000000000..4e1ec9c94 --- /dev/null +++ b/ciao-launcher/tests/examples/stop_legacy.yaml @@ -0,0 +1,2 @@ +stop: + instance_uuid: d7d86208-b46c-4465-9018-fe14087d415f diff --git a/ciao-launcher/tests/examples/stress/loop.sh b/ciao-launcher/tests/examples/stress/loop.sh new file mode 100755 index 000000000..778e5dbd7 --- /dev/null +++ b/ciao-launcher/tests/examples/stress/loop.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +for i in $(seq -f "%g" 0 9) +do + ciaolc startf start_container-$i.yaml +done + + diff --git a/ciao-launcher/tests/examples/stress/start_container-0.yaml b/ciao-launcher/tests/examples/stress/start_container-0.yaml new file mode 100644 index 000000000..84ca892a2 --- /dev/null +++ b/ciao-launcher/tests/examples/stress/start_container-0.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14187d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.102 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/stress/start_container-1.yaml b/ciao-launcher/tests/examples/stress/start_container-1.yaml new file mode 100644 index 000000000..d40f2df39 --- /dev/null +++ b/ciao-launcher/tests/examples/stress/start_container-1.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14287d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.103 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/stress/start_container-2.yaml b/ciao-launcher/tests/examples/stress/start_container-2.yaml new file mode 100644 index 000000000..f8138666f --- /dev/null +++ b/ciao-launcher/tests/examples/stress/start_container-2.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14387d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.104 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/stress/start_container-3.yaml b/ciao-launcher/tests/examples/stress/start_container-3.yaml new file mode 100644 index 000000000..9bc7ac249 --- /dev/null +++ b/ciao-launcher/tests/examples/stress/start_container-3.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14487d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.105 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/stress/start_container-4.yaml b/ciao-launcher/tests/examples/stress/start_container-4.yaml new file mode 100644 index 000000000..f549a3ad0 --- /dev/null +++ b/ciao-launcher/tests/examples/stress/start_container-4.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14587d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.106 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/stress/start_container-5.yaml b/ciao-launcher/tests/examples/stress/start_container-5.yaml new file mode 100644 index 000000000..b283e5580 --- /dev/null +++ b/ciao-launcher/tests/examples/stress/start_container-5.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14687d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.107 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/stress/start_container-6.yaml b/ciao-launcher/tests/examples/stress/start_container-6.yaml new file mode 100644 index 000000000..407a7b6f4 --- /dev/null +++ b/ciao-launcher/tests/examples/stress/start_container-6.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14787d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.108 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/stress/start_container-7.yaml b/ciao-launcher/tests/examples/stress/start_container-7.yaml new file mode 100644 index 000000000..3fb50e1a4 --- /dev/null +++ b/ciao-launcher/tests/examples/stress/start_container-7.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14887d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.109 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/stress/start_container-8.yaml b/ciao-launcher/tests/examples/stress/start_container-8.yaml new file mode 100644 index 000000000..470ab6a29 --- /dev/null +++ b/ciao-launcher/tests/examples/stress/start_container-8.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14987d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.110 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/tests/examples/stress/start_container-9.yaml b/ciao-launcher/tests/examples/stress/start_container-9.yaml new file mode 100644 index 000000000..fee323172 --- /dev/null +++ b/ciao-launcher/tests/examples/stress/start_container-9.yaml @@ -0,0 +1,30 @@ +--- +start: + requested_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 16 + instance_uuid: 67d86208-b46c-4465-9018-e14997d415f + tenant_uuid: 67d86208-000-4465-9018-fe14087d415f + docker_image: ubuntu:latest + vm_type: docker + networking: + vnic_mac: CA:FE:00:02:02:03 + vnic_uuid: 67d86208-b46c-0000-9018-fe14087d415f + concentrator_ip: 192.168.200.200 + concentrator_uuid: 67d86208-b46c-4465-0000-fe14087d415f + subnet: 192.168.111.0/24 + private_ip: 192.168.111.111 +... +--- +#cloud-config +runcmd: + - [ /usr/bin/python3, -m, http.server] +... +--- +{ + "uuid": "67d86208-b46c-4465-0000-fe14087d415f", + "hostname": "ciao" +} +... diff --git a/ciao-launcher/ui_debug.go b/ciao-launcher/ui_debug.go new file mode 100644 index 000000000..d0e15501e --- /dev/null +++ b/ciao-launcher/ui_debug.go @@ -0,0 +1,25 @@ +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build debug + +package main + +import "flag" + +var launchWithUI uiFlag = "nc" + +func init() { + flag.Var(&launchWithUI, "with-ui", "Enables virtual consoles on VM instances. Can be 'none', 'spice', 'nc'") +} diff --git a/ciao-launcher/ui_release.go b/ciao-launcher/ui_release.go new file mode 100644 index 000000000..e938bea39 --- /dev/null +++ b/ciao-launcher/ui_release.go @@ -0,0 +1,19 @@ +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !debug + +package main + +var launchWithUI uiFlag = "none" diff --git a/ciao-launcher/virtualizer.go b/ciao-launcher/virtualizer.go new file mode 100644 index 000000000..3e5fb6dd7 --- /dev/null +++ b/ciao-launcher/virtualizer.go @@ -0,0 +1,126 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package main + +import ( + "errors" + "sync" +) + +const ( + virtualizerStartCmd = "START" + virtualizerStopCmd = "STOP" +) + +var errImageNotFound = errors.New("Image Not Found") + +//BUG(markus): These methods need to be cancellable +//BUG(markus): How do we deal with locally cached images getting stale? + +// The virtualizer interface is designed to isolate launcher, and in particular, +// functions that run in the instance go routine, from the underlying virtualisation +// technologies used to launch and manage VMs and containers. +// All the methods on the virtualizer interface will be called serially by the instance +// go routine. Therefore there is no need to synchronised data between the virtualizers +// methods. +type virtualizer interface { + // Initialise the virtualizer. cfg contains the configuration information from + // the START payload that originally created the instance. InstanceDir is the + // directory launcher has assigned to this instance. The virtualizer is free + // to store any data it likes in this directory. + init(cfg *vmConfig, instanceDir string) + + // Checks to see if the backing image is present on the node and whether it + // is capable of serving as a backing image for the new instance. A return + // value of nil means everything is okay, errImageNotFound indicates that the + // image is not present on the node, and any other error means that the + // image is present but cannot serve as the backing image for the current + // request. + checkBackingImage() error + + // Download backing image + downloadBackingImage() error + + // Creates the rootfs, and any supporting images, for a new instance + // bridge: name of the bridge if any. Needed for docker containers + // userData: cloudinit userdata payload + // metaData: cloudinit metaData payload + createImage(bridge string, userData, metaData []byte) error + + // Deletes any state related to the instance that is not stored in the + // instance directory. State stored in the instance directory will be automatically, + // deleted by the instance go routine. + deleteImage() error + + // Boots a VM. This method is called by both START and RESTART. + startVM(vnicName, ipAddress string) error + + //BUG(markus): Need to use context rather than the monitor channel to + //detect when we need to quit. + + // Monitors a newly started VM. It is intended to start one of more + // go routines to monitor the instance and return immediately with a + // channel that the instance go routine can use to communicate with the + // monitored instance. + // + // closedCh: should be closed by this method, or a go routine that it spawns, + // when it is determined that the VM or container that is being monitored is + // not running. + // connectedCh: Should be closed by this method, or a go routine that it spawns, + // when it is determined that the VM or container that is being monitored is + // running. + // wg: wg.Add should be called before any go routines started by this method + // are launched. wg.Done should be called by these go routines before they + // exit. The instance go routine will use this wg to wait until all go routines + // launched by this method have closed down before it itself exits. + // boot: indicates whether monitorVM has been called during launcher startup, + // in which case it's true. I'm not sure this is needed. It might get removed + // shortly. + // + // Returns a channel. The instance go routine uses this channel for two purposes: + // 1. It sends commands down the channel, e.g., stop VM. + // 2. It closes the channel when it is itself asked to shutdown. When the channel is + // closed, any go routines returned by monitor vm should shutdown. + monitorVM(closedCh chan struct{}, connectedCh chan struct{}, + wg *sync.WaitGroup, boot bool) chan string + + // Returns current statistics for the instance. + // disk: Size of the VM/container rootfs in GB or -1 if not known. + // memory: Amount of memory used by the VM or container process, in MB + // cpu: Normalized CPU time of VM or container process + stats() (disk, memory, cpu int) + + // connected is called by the instance go routine to inform the virtualizer that + // the VM is running. The virtualizer can used this notification to perform some + // bookeeping, for example determine the pid of the underlying process. It may + // seem slightly odd that this function exists. After all, it's a goroutine + // spawned by the monitorVM function that initially informs the instance go + // routine that the VM is connected. The problem is that all virtualizer methods + // need to called by the instance go routine. If the virtualizer were to modify + // it's own state directly from a go routine spawned by monitorVM, mutexes would + // be needed. Perhaps this would be a better design. However, for the time being + // connected exists and is called. + connected() + + // Similar to connected. This function is called by the instance go routine when + // it detects that the VM or container has stopped running. As with connected, it + // is a go routine spawned by monitorVM that originally detects that the VM has gone + // down and signals the instance go routine of this fact by closing the closedCh. + // The instance go routine then calls lostVM so that the virtualizer can update + // its internal state. + lostVM() +} diff --git a/ciao-scheduler/README.md b/ciao-scheduler/README.md new file mode 100644 index 000000000..5b31b237a --- /dev/null +++ b/ciao-scheduler/README.md @@ -0,0 +1,4 @@ +The ciao scheduler implements and ssntp server to receive workloads from +the ciao controller system(s), to receive status changes regarding caio +compute node (cn) resoureces and launched workload instances, and to +reply to nodes who've checked in by giving them work. diff --git a/ciao-scheduler/fake_agent.go b/ciao-scheduler/fake_agent.go new file mode 100644 index 000000000..0e4f192fe --- /dev/null +++ b/ciao-scheduler/fake_agent.go @@ -0,0 +1,215 @@ +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build ignore + +package main + +import ( + "flag" + "fmt" + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + "gopkg.in/yaml.v2" + "math/rand" + "os" + "runtime/pprof" + "sync" + "time" +) + +type ssntpClient struct { + ssntp ssntp.Client + name string + nCommands int +} + +func (client *ssntpClient) ConnectNotify() { + fmt.Printf("%s connected\n", client.name) +} + +func (client *ssntpClient) DisconnectNotify() { + fmt.Printf("%s disconnected\n", client.name) +} + +func (client *ssntpClient) StatusNotify(status ssntp.Status, frame *ssntp.Frame) { +} + +func (client *ssntpClient) CommandNotify(command ssntp.Command, frame *ssntp.Frame) { + payload := frame.Payload + client.nCommands++ + + if command == ssntp.START { + var cmd payloads.Start + var memReqMB int + err := yaml.Unmarshal(payload, &cmd) + if err != nil { + fmt.Println("bad START workload yaml from Controller") + return + } + statsMutex.Lock() + for idx := range cmd.Start.RequestedResources { + if cmd.Start.RequestedResources[idx].Type == payloads.MemMB { + memReqMB = cmd.Start.RequestedResources[idx].Value + } + } + stats.MemAvailableMB -= memReqMB + statsMutex.Unlock() + } +} + +func (client *ssntpClient) EventNotify(event ssntp.Event, frame *ssntp.Frame) { +} + +func (client *ssntpClient) ErrorNotify(error ssntp.Error, frame *ssntp.Frame) { + fmt.Printf("ERROR for %s\n", client.name) +} + +var stats payloads.Stat +var statsMutex sync.Mutex + +func fakeStatisticsThread(config *ssntp.Config, nFrames int, wg *sync.WaitGroup) { + defer wg.Done() + + client := &ssntpClient{ + name: "Ciao fake Agent", + nCommands: 0, + } + + fmt.Printf("----- Client [%s] delay [random] frames [%d] -----\n", client.ssntp.UUID()[:8], nFrames) + + if client.ssntp.Dial(config, client) != nil { + fmt.Printf("Could not connect to an SSNTP server\n") + return + } + fmt.Printf("Client [%s]: Connected\n", client.ssntp.UUID()[:8]) + + //pretend it takes some time to start up a node + time.Sleep(time.Duration(1) * time.Second) + fmt.Printf("...warming up\n") + time.Sleep(time.Duration(3) * time.Second) + + //dummy initial stats + statsMutex.Lock() + stats.Init() + stats.NodeUUID = client.ssntp.UUID() + stats.MemTotalMB = 3896 // 4096 - 200 overhead + stats.MemAvailableMB = 3896 // start with "all" + stats.Load = 0 + payload, err := yaml.Marshal(&stats) + statsMutex.Unlock() + if err != nil { + fmt.Printf("Could not create STATS yaml: %s\n", err) + return + } + time.Sleep(time.Duration(1) * time.Second) + + //and now we're READY + sentFrames := 0 + _, err = client.ssntp.SendStatus(ssntp.READY, payload) + if err != nil { + fmt.Printf("Could not send READY: %s\n", err) + return + } else { + sentFrames++ + } + + for i := 0; i < nFrames; i++ { + // 1..~2 seconds delay between commands + delay := rand.Intn(2000) + time.Sleep(time.Duration(delay) * time.Millisecond) + fmt.Printf("Client [%s]: delay %d\n", client.ssntp.UUID()[:8], delay) + + statsMutex.Lock() + payload, err = yaml.Marshal(&stats) + statsMutex.Unlock() + if err != nil { + fmt.Printf("Could not create READY yaml: %s\n", err) + } + + fmt.Printf("payload[%d]:%s\n", len(payload), payload) + + _, err = client.ssntp.SendStatus(ssntp.READY, payload) + if err != nil { + fmt.Printf("Could not send READY: %s\n", err) + } else { + sentFrames++ + } + + // "adjust" mem & load stats + memdelta := rand.Intn(100) + loaddelta := rand.Intn(10) + statsMutex.Lock() + if rand.Intn(2) == 0 { + stats.MemAvailableMB -= memdelta + } else { + stats.MemAvailableMB += memdelta + } + if stats.MemAvailableMB > stats.MemTotalMB { + stats.MemAvailableMB = stats.MemTotalMB + } + if rand.Intn(2) == 0 { + stats.Load -= loaddelta + } else { + stats.Load += loaddelta + } + if stats.Load < 0 { + stats.Load = 0 + } + statsMutex.Unlock() + } + + fmt.Printf("Client [%s]: Done\n", client.ssntp.UUID()[:8]) + + time.Sleep(time.Duration(2000) * time.Millisecond) + + client.ssntp.Close() + + fmt.Printf("Sent %d commands, received %d\n", sentFrames, client.nCommands) +} + +func main() { + var serverURL = flag.String("ur", "localhost", "Server URL") + var cert = flag.String("cert", "/etc/pki/ciao/cert-client-localhost.pem", "Client certificate") + var CAcert = flag.String("cacert", "/etc/pki/ciao/CAcert-server-localhost.pem", "CA certificate") + var nFrames = flag.Int("frames", 100, "Number of frames to send") + var cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file") + var role ssntp.Role = ssntp.AGENT + var config ssntp.Config + var wg sync.WaitGroup + + flag.Var(&role, "role", "Agent client role") + flag.Parse() + + config.URI = *serverURL + config.CAcert = *CAcert + config.Cert = *cert + config.Role = uint32(role) + // config.Trace = os.Stdout + // config.Error = os.Stdout + + if len(*cpuprofile) != 0 { + f, err := os.Create(*cpuprofile) + if err != nil { + fmt.Print(err) + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + wg.Add(1) + go fakeStatisticsThread(&config, *nFrames, &wg) + + wg.Wait() +} diff --git a/ciao-scheduler/fake_controller.go b/ciao-scheduler/fake_controller.go new file mode 100644 index 000000000..44cbbaade --- /dev/null +++ b/ciao-scheduler/fake_controller.go @@ -0,0 +1,209 @@ +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build ignore + +package main + +import ( + "flag" + "fmt" + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + "gopkg.in/yaml.v2" + "math/rand" + "os" + "runtime/pprof" + "sync" + "time" +) + +type ssntpClient struct { + ssntp ssntp.Client + name string + nCommands int +} + +func (client *ssntpClient) ConnectNotify() { + fmt.Printf("%s connected\n", client.name) +} + +func (client *ssntpClient) DisconnectNotify() { + fmt.Printf("%s disconnected\n", client.name) +} + +func (client *ssntpClient) StatusNotify(status ssntp.Status, payload *ssntp.Frame) { + fmt.Printf("STATUS %s for %s\n", status, client.name) +} + +func (client *ssntpClient) CommandNotify(command ssntp.Command, payload *ssntp.Frame) { + client.nCommands++ +} + +func (client *ssntpClient) EventNotify(event ssntp.Event, payload *ssntp.Frame) { +} + +func (client *ssntpClient) ErrorNotify(error ssntp.Error, payload *ssntp.Frame) { + fmt.Printf("ERROR for %s\n", client.name) +} + +func fakeControllerCommandSenderThread(config *ssntp.Config, n int, nFrames int, wg *sync.WaitGroup) { + defer wg.Done() + + client := &ssntpClient{ + name: "Ciao fake Controller command sender", + nCommands: 0, + } + + // set up a dummy START command + reqVcpus := payloads.RequestedResource{ + Type: "vcpus", + Value: 2, + Mandatory: true, + } + reqMem := payloads.RequestedResource{ + Type: "mem_mb", + Value: 256, + Mandatory: true, + } + reqDisk := payloads.RequestedResource{ + Type: "disk_mb", + Value: 10000, + Mandatory: true, + } + estVcpus := payloads.EstimatedResource{ + Type: "vcpus", + Value: 1, + } + estMem := payloads.EstimatedResource{ + Type: "mem_mb", + Value: 128, + } + estDisk := payloads.EstimatedResource{ + Type: "disk_mb", + Value: 4096, + } + var cmd payloads.Start + cmd.Start.InstanceUUID = "c73322e8-d5fe-4d57-874c-dcee4fd368cd" + cmd.Start.ImageUUID = "b265f62b-e957-47fd-a0a2-6dc261c7315c" + cmd.Start.RequestedResources = append(cmd.Start.RequestedResources, reqVcpus) + cmd.Start.RequestedResources = append(cmd.Start.RequestedResources, reqMem) + cmd.Start.RequestedResources = append(cmd.Start.RequestedResources, reqDisk) + cmd.Start.EstimatedResources = append(cmd.Start.EstimatedResources, estVcpus) + cmd.Start.EstimatedResources = append(cmd.Start.EstimatedResources, estMem) + cmd.Start.EstimatedResources = append(cmd.Start.EstimatedResources, estDisk) + cmd.Start.FWType = payloads.EFI + cmd.Start.InstancePersistence = payloads.Host + + payload, err := yaml.Marshal(&cmd) + if err != nil { + fmt.Printf("Could not create START workload yaml: %s\n", err) + } + + if client.ssntp.Dial(config, client) != nil { + fmt.Printf("Could not connect to an SSNTP server\n") + return + } + fmt.Printf("Client [%d]: Connected\n", n) + + sentFrames := 0 + for i := 0; i < nFrames; i++ { + // 1..10 seconds delay between commands + delay := rand.Intn(10) + fmt.Printf("Client [%d]: delay %d\n", n, delay) + time.Sleep(time.Duration(delay) * time.Second) + + _, err := client.ssntp.SendCommand(ssntp.START, payload) + if err != nil { + fmt.Printf("Could not send START command: %s\n", err) + } else { + fmt.Printf("Client [%d]: sent START command\n", n) + } + if err == nil { + sentFrames++ + } + } + + fmt.Printf("Client [%d]: Done\n", n) + + client.ssntp.Close() + + fmt.Printf("Sent %d commands, received %d\n", sentFrames, client.nCommands) +} +func fakeControllerStatusReceiverThread(config *ssntp.Config, n int, nFrames int, wg *sync.WaitGroup) { + defer wg.Done() + + client := &ssntpClient{ + name: "Ciao fake Controller status receiver", + nCommands: 0, + } + + if client.ssntp.Dial(config, client) != nil { + fmt.Printf("Could not connect to an SSNTP server\n") + return + } + fmt.Printf("Controller Status Receiver Therad Client [%d]: Connected\n", n) + + //TODO: how to receive a forwarded STATS command + for { + //TODO: validate a STATS command was received + + //...for now do nothing + time.Sleep(time.Duration(1) * time.Second) + } + + fmt.Printf("Controller Status Receiver Therad Client [%d]: Done\n", n) + + client.ssntp.Close() + + //fmt.Printf("Sent %d commands, received %d\n", sentFrames, client.nCommands) +} + +func main() { + var serverURL = flag.String("url", "localhost", "Server URL") + var cert = flag.String("cert", "/etc/pki/ciao/cert-client-localhost.pem", "Client certificate") + var CAcert = flag.String("cacert", "/etc/pki/ciao/CAcert-server-localhost.pem", "CA certificate") + var nFrames = flag.Int("frames", 10, "Number of frames to send") + var cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file") + var role ssntp.Role = ssntp.Controller + var config ssntp.Config + var wg sync.WaitGroup + + flag.Var(&role, "role", "Controller client role") + flag.Parse() + + config.URI = *serverURL + config.CAcert = *CAcert + config.Cert = *cert + config.Role = uint32(role) + // config.Trace = os.Stdout + // config.Error = os.Stdout + + if len(*cpuprofile) != 0 { + f, err := os.Create(*cpuprofile) + if err != nil { + fmt.Print(err) + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + wg.Add(1) + go fakeControllerCommandSenderThread(&config, 1, *nFrames, &wg) + + // wg.Add(1) + // go fakeControllerStatusReceiverThread(&config, i, *nFrames, &wg) + + wg.Wait() +} diff --git a/ciao-scheduler/scheduler.go b/ciao-scheduler/scheduler.go new file mode 100644 index 000000000..12da29a2e --- /dev/null +++ b/ciao-scheduler/scheduler.go @@ -0,0 +1,763 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "flag" + "fmt" + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + "github.com/golang/glog" + "gopkg.in/yaml.v2" + "log" + "os" + "runtime/pprof" + "sync" + "time" +) + +type ssntpSchedulerServer struct { + ssntp ssntp.Server + name string + // Command & Status Reporting node(s) + controllerMap map[string]controllerStat + controllerMutex sync.RWMutex + // Compute Nodes + cnMap map[string]*nodeStat + cnList []*nodeStat + cnMutex sync.RWMutex + cnMRU *nodeStat + cnMRUIndex int + //cnInactiveMap map[string]nodeStat + // Network Nodes + nnMap map[string]*nodeStat + nnMutex sync.RWMutex + nnMRU string + nConnections int + nCommands int + nStatuses int + nErrors int + nEvents int +} + +func newSsntpSchedulerServer() *ssntpSchedulerServer { + return &ssntpSchedulerServer{ + name: "Ciao Scheduler Server", + controllerMap: make(map[string]controllerStat), + cnMap: make(map[string]*nodeStat), + cnMRUIndex: -1, + nnMap: make(map[string]*nodeStat), + nConnections: 0, + nCommands: 0, + nStatuses: 0, + nErrors: 0, + nEvents: 0, + } +} + +type nodeStat struct { + status ssntp.Status + uuid string + memTotalMB int + memAvailMB int + load int + cpus int +} + +type controllerStatus uint8 + +const ( + controllerMaster controllerStatus = iota + controllerBackup +) + +type controllerStat struct { + status controllerStatus + uuid string +} + +func (sched *ssntpSchedulerServer) ConnectNotify(uuid string, role uint32) { + switch role { + case ssntp.Controller: + sched.controllerMutex.Lock() + defer sched.controllerMutex.Unlock() + + if sched.controllerMap[uuid].uuid != "" { + glog.Warningf("Unexpected reconnect from controller %s\n", uuid) + return + } + controller := new(controllerStat) + + // TODO: smarter clustering than "assume master, unless another is master" + controller.status = controllerMaster + for _, c := range sched.controllerMap { + if c.status == controllerMaster { + controller.status = controllerBackup + break + } + } + + controller.uuid = uuid + sched.controllerMap[uuid] = *controller + case ssntp.AGENT: + sched.cnMutex.RLock() + defer sched.cnMutex.RUnlock() + + if sched.cnMap[uuid] != nil { + glog.Warningf("Unexpected reconnect from compute node %s\n", uuid) + return + } + + var node nodeStat + node.status = ssntp.CONNECTED + node.uuid = uuid + sched.cnList = append(sched.cnList, &node) + sched.cnMap[uuid] = &node + case ssntp.NETAGENT: + sched.nnMutex.Lock() + defer sched.nnMutex.Unlock() + + if sched.nnMap[uuid] != nil { + glog.Warningf("Unexpected reconnect from network compute node %s\n", uuid) + return + } + + var node nodeStat + node.status = ssntp.CONNECTED + node.uuid = uuid + sched.nnMap[uuid] = &node + } + + sched.nConnections++ + glog.V(2).Infof("Connect (role 0x%x, uuid=%s)\n", role, uuid) +} + +func (sched *ssntpSchedulerServer) DisconnectNotify(uuid string) { + + sched.controllerMutex.Lock() + defer sched.controllerMutex.Unlock() + if sched.controllerMap[uuid].uuid != "" { + if sched.controllerMap[uuid].status == controllerMaster { + // promote a new master + for _, c := range sched.controllerMap { + if c.status == controllerBackup { + c.status = controllerMaster + sched.controllerMap[c.uuid] = c + //TODO: inform the Controller it is master + break + } + } + } + delete(sched.controllerMap, uuid) + + sched.nConnections-- + glog.V(2).Infof("Disconnect controller (uuid=%s)\n", uuid) + return + } + + sched.cnMutex.Lock() + defer sched.cnMutex.Unlock() + if sched.cnMap[uuid] != nil { + //TODO: consider moving to cnInactiveMap? + node := sched.cnMap[uuid] + if node != nil { + for i, n := range sched.cnList { + if n != node { + continue + } + + sched.cnList = append(sched.cnList[:i], sched.cnList[i+1:]...) + } + } + + if node == sched.cnMRU { + sched.cnMRU = nil + sched.cnMRUIndex = -1 + } + + delete(sched.cnMap, uuid) + + sched.nConnections-- + glog.V(2).Infof("Disconnect cn (uuid=%s)\n", uuid) + return + } + + sched.nnMutex.Lock() + defer sched.nnMutex.Unlock() + if sched.nnMap[uuid] != nil { + //TODO: consider moving to nnInactiveMap? + delete(sched.nnMap, uuid) + + sched.nConnections-- + glog.V(2).Infof("Disconnect nn (uuid=%s)\n", uuid) + return + } + + glog.Warningf("Disconnect error: no ssntp client with uuid=%s\n", uuid) + return +} + +func (sched *ssntpSchedulerServer) StatusNotify(uuid string, status ssntp.Status, frame *ssntp.Frame) { + payload := frame.Payload + + // for now only pay attention to READY status + + sched.nStatuses++ + glog.V(2).Infof("STATUS (#%d) %v from %s\n", sched.nStatuses, status, uuid) + + sched.controllerMutex.RLock() + defer sched.controllerMutex.RUnlock() + if sched.controllerMap[uuid].uuid != "" { + glog.Warningf("Ignoring STATUS change from Controller uuid=%s\n", uuid) + return + } + + sched.cnMutex.RLock() + defer sched.cnMutex.RUnlock() + + sched.nnMutex.RLock() + defer sched.nnMutex.RUnlock() + + var node *nodeStat + if sched.cnMap[uuid] != nil { + node = sched.cnMap[uuid] + } else if sched.nnMap[uuid] != nil { + node = sched.nnMap[uuid] + } else { + glog.Warningf("STATUS error: no connected ssntp client with uuid=%s\n", uuid) + return + } + + node.status = status + switch node.status { + case ssntp.READY: + //pull in client's READY status frame transmitted statistics + var stats payloads.Ready + err := yaml.Unmarshal(payload, &stats) + if err != nil { + glog.Errorf("Bad READY yaml for node %s\n", uuid) + return + } + node.memTotalMB = stats.MemTotalMB + node.memAvailMB = stats.MemAvailableMB + node.load = stats.Load + node.cpus = stats.CpusOnline + //TODO pull in other types of payloads.Ready struct data + } +} + +type workResources struct { + instanceUUID string + memReqMB int + networkNode int +} + +func (sched *ssntpSchedulerServer) getWorkloadResources(work *payloads.Start) (workload workResources, err error) { + // loop the array to find resources + for idx := range work.Start.RequestedResources { + // memory: + if work.Start.RequestedResources[idx].Type == payloads.MemMB { + workload.memReqMB = work.Start.RequestedResources[idx].Value + } + + // network node + if work.Start.RequestedResources[idx].Type == payloads.NetworkNode { + workload.networkNode = work.Start.RequestedResources[idx].Value + } + + // etc... + } + + // validate the found resources + if workload.memReqMB <= 0 { + return workload, fmt.Errorf("invalid start payload resource demand: mem_mb (%d) <= 0, must be > 0", workload.memReqMB) + } + if workload.networkNode != 0 && workload.networkNode != 1 { + return workload, fmt.Errorf("invalid start payload resource demand: network_node (%d) is not 0 or 1", workload.networkNode) + } + + return workload, nil +} + +func (sched *ssntpSchedulerServer) workloadFits(node *nodeStat, workload *workResources) bool { + // simple scheduling policy == first memory fit + if node.memAvailMB >= workload.memReqMB && + node.status == ssntp.READY { + return true + } + return false +} + +func (sched *ssntpSchedulerServer) sendStartFailureError(clientUUID string, instanceUUID string, reason payloads.StartFailureReason) { + error := payloads.ErrorStartFailure{ + InstanceUUID: instanceUUID, + Reason: reason, + } + + payload, err := yaml.Marshal(&error) + if err != nil { + glog.Errorf("Unable to Marshall Status %v", err) + return + } + + glog.Errorf("Unable to dispatch: %v\n", reason) + sched.ssntp.SendError(clientUUID, ssntp.StartFailure, payload) +} +func (sched *ssntpSchedulerServer) getConcentratorUUID(event ssntp.Event, payload []byte) (string, error) { + switch event { + default: + return "", fmt.Errorf("unsupported ssntp.Event type \"%s\"", event) + case ssntp.TenantAdded: + var ev payloads.EventTenantAdded + err := yaml.Unmarshal(payload, &ev) + return ev.TenantAdded.ConcentratorUUID, err + case ssntp.TenantRemoved: + var ev payloads.EventTenantRemoved + err := yaml.Unmarshal(payload, &ev) + return ev.TenantRemoved.ConcentratorUUID, err + case ssntp.PublicIPAssigned: + var ev payloads.EventPublicIPAssigned + err := yaml.Unmarshal(payload, &ev) + return ev.AssignedIP.ConcentratorUUID, err + } +} + +func (sched *ssntpSchedulerServer) fwdEventToCNCI(event ssntp.Event, payload []byte) (dest ssntp.ForwardDestination) { + // since the scheduler is the primary ssntp server, it needs to + // unwrap event payloads and forward them to the approriate recipient + + concentratorUUID, err := sched.getConcentratorUUID(event, payload) + if err != nil || concentratorUUID == "" { + glog.Errorf("Bad %s event yaml from, concentratorUUID == %s\n", event, concentratorUUID) + dest.SetDecision(ssntp.Discard) + return + } + + glog.V(2).Infof("Forwarding %s to %s\n", event.String(), concentratorUUID) + dest.AddRecipient(concentratorUUID) + + return dest +} + +func (sched *ssntpSchedulerServer) getWorkloadAgentUUID(command ssntp.Command, payload []byte) (string, string, error) { + switch command { + default: + return "", "", fmt.Errorf("unsupported ssntp.Command type \"%s\"", command) + case ssntp.RESTART: + var cmd payloads.Restart + err := yaml.Unmarshal(payload, &cmd) + return cmd.Restart.InstanceUUID, cmd.Restart.WorkloadAgentUUID, err + case ssntp.STOP: + var cmd payloads.Stop + err := yaml.Unmarshal(payload, &cmd) + return cmd.Stop.InstanceUUID, cmd.Stop.WorkloadAgentUUID, err + case ssntp.DELETE: + var cmd payloads.Delete + err := yaml.Unmarshal(payload, &cmd) + return cmd.Delete.InstanceUUID, cmd.Delete.WorkloadAgentUUID, err + case ssntp.EVACUATE: + var cmd payloads.Evacuate + err := yaml.Unmarshal(payload, &cmd) + return "", cmd.Evacuate.WorkloadAgentUUID, err + } +} + +func (sched *ssntpSchedulerServer) fwdCmdToComputeNode(command ssntp.Command, payload []byte) (dest ssntp.ForwardDestination, instanceUUID string) { + // some commands require no scheduling choice, rather the specified + // agent/launcher needs the command instead of the scheduler + instanceUUID, cnDestUUID, err := sched.getWorkloadAgentUUID(command, payload) + if err != nil || cnDestUUID == "" { + glog.Errorf("Bad %s command yaml from Controller, WorkloadAgentUUID == %s\n", command.String(), cnDestUUID) + dest.SetDecision(ssntp.Discard) + return + } + + glog.V(2).Infof("Forwarding controller %s command to %s\n", command.String(), cnDestUUID) + dest.AddRecipient(cnDestUUID) + + return +} + +func (sched *ssntpSchedulerServer) decrementResourceUsage(node *nodeStat, workload *workResources) { + node.memAvailMB -= workload.memReqMB +} + +func (sched *ssntpSchedulerServer) pickComputeNode(controllerUUID string, workload *workResources) (node *nodeStat) { + sched.cnMutex.Lock() + defer sched.cnMutex.Unlock() + + if len(sched.cnList) == 0 { + sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.NoComputeNodes) + return + } + + /* Shortcut for 1 nodes cluster */ + if len(sched.cnList) == 1 { + if sched.workloadFits(sched.cnList[0], workload) == true { + return sched.cnList[0] + } + } + + /* First try nodes after the MRU */ + if sched.cnMRUIndex != -1 && sched.cnMRUIndex < len(sched.cnList)-1 { + for i, n := range sched.cnList[sched.cnMRUIndex+1:] { + if n == sched.cnMRU { + continue + } + + if sched.workloadFits(n, workload) == true { + sched.cnMRUIndex = sched.cnMRUIndex + 1 + i + sched.cnMRU = n + return n + } + } + } + + /* Then try the whole list, including the MRU */ + for i, n := range sched.cnList { + if sched.workloadFits(n, workload) == true { + sched.cnMRUIndex = i + sched.cnMRU = n + return n + } + } + + sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.FullCloud) + return nil +} + +func (sched *ssntpSchedulerServer) pickNetworkNode(controllerUUID string, workload *workResources) (node *nodeStat) { + sched.nnMutex.RLock() + defer sched.nnMutex.RUnlock() + + if len(sched.nnMap) == 0 { + sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.NoNetworkNodes) + return + } + + // with more than one node MRU gives simplistic spread + for _, node = range sched.nnMap { + if (len(sched.nnMap) <= 1 || ((len(sched.nnMap) > 1) && (node.uuid != sched.nnMRU))) && + sched.workloadFits(node, workload) { + sched.nnMRU = node.uuid + return node + } + } + + sched.sendStartFailureError(controllerUUID, workload.instanceUUID, payloads.NoNetworkNodes) + return nil +} + +func (sched *ssntpSchedulerServer) startWorkload(controllerUUID string, payload []byte) (dest ssntp.ForwardDestination, instanceUUID string) { + var work payloads.Start + err := yaml.Unmarshal(payload, &work) + if err != nil { + glog.Errorf("Bad START workload yaml from Controller %s: %s\n", controllerUUID, err) + dest.SetDecision(ssntp.Discard) + return dest, "" + } + + workload, err := sched.getWorkloadResources(&work) + if err != nil { + glog.Errorf("Bad START workload resource list from Controller %s: %s\n", controllerUUID, err) + dest.SetDecision(ssntp.Discard) + return dest, "" + } + + instanceUUID = workload.instanceUUID + + var targetNode *nodeStat + + if workload.networkNode == 0 { + targetNode = sched.pickComputeNode(controllerUUID, &workload) + } else { //workload.network_node == 1 + targetNode = sched.pickNetworkNode(controllerUUID, &workload) + } + + if targetNode != nil { + //TODO: mark the targetNode as unavailable until next stats / READY checkin? + // or is subtracting mem demand sufficiently speculative enough? + // Goal is to have spread, not schedule "too many" workloads back + // to back on the same targetNode, but also not add latency to dispatch and + // hopefully not queue when all nodes have just started a workload. + sched.decrementResourceUsage(targetNode, &workload) + + dest.AddRecipient(targetNode.uuid) + } else { + // TODO Queue the frame ? + dest.SetDecision(ssntp.Discard) + } + + return dest, instanceUUID +} + +func (sched *ssntpSchedulerServer) CommandForward(controllerUUID string, command ssntp.Command, frame *ssntp.Frame) (dest ssntp.ForwardDestination) { + payload := frame.Payload + instanceUUID := "" + + sched.controllerMutex.RLock() + defer sched.controllerMutex.RUnlock() + if sched.controllerMap[controllerUUID].status != controllerMaster { + glog.Warningf("Ignoring %s command from non-master Controller %s\n", command, controllerUUID) + dest.SetDecision(ssntp.Discard) + return + } + + start := time.Now() + + glog.V(2).Infof("Command %s from %s\n", command, controllerUUID) + + switch command { + // the main command with scheduler processing + case ssntp.START: + dest, instanceUUID = sched.startWorkload(controllerUUID, payload) + case ssntp.RESTART: + fallthrough + case ssntp.STOP: + fallthrough + case ssntp.DELETE: + fallthrough + case ssntp.EVACUATE: + dest, instanceUUID = sched.fwdCmdToComputeNode(command, payload) + default: + dest.SetDecision(ssntp.Discard) + } + + elapsed := time.Since(start) + glog.V(2).Infof("%s command processed for instance %s in %s\n", command, instanceUUID, elapsed) + + return +} + +func (sched *ssntpSchedulerServer) CommandNotify(uuid string, command ssntp.Command, frame *ssntp.Frame) { + // Currently all commands are handled by CommandForward, the SSNTP command forwader, + // or directly by role defined forwarding rules. + sched.nCommands++ + glog.V(2).Infof("COMMAND (#%d) %v from %s\n", sched.nCommands, command, uuid) +} + +func (sched *ssntpSchedulerServer) EventForward(uuid string, event ssntp.Event, frame *ssntp.Frame) (dest ssntp.ForwardDestination) { + payload := frame.Payload + + start := time.Now() + + switch event { + case ssntp.TenantAdded: + fallthrough + case ssntp.TenantRemoved: + fallthrough + case ssntp.PublicIPAssigned: + dest = sched.fwdEventToCNCI(event, payload) + } + + elapsed := time.Since(start) + glog.V(2).Infof("%s event processed for instance %s in %s\n", event.String(), uuid, elapsed) + + return dest +} + +func (sched *ssntpSchedulerServer) EventNotify(uuid string, event ssntp.Event, frame *ssntp.Frame) { + // Currently all events are handled by EventForward, the SSNTP command forwader, + // or directly by role defined forwarding rules. + sched.nEvents++ + glog.V(2).Infof("EVENT (#%d) %v from %s\n", sched.nEvents, event, uuid) +} + +func (sched *ssntpSchedulerServer) ErrorNotify(uuid string, error ssntp.Error, frame *ssntp.Frame) { + sched.nErrors++ + glog.V(2).Infof("ERROR (#%d) %v from %s\n", sched.nErrors, error, uuid) +} + +func heartBeat(sched *ssntpSchedulerServer) { + iter := 0 + for { + var beatTxt string + + time.Sleep(time.Duration(1) * time.Second) + + sched.controllerMutex.RLock() + sched.cnMutex.RLock() + + if len(sched.controllerMap) == 0 && len(sched.cnMap) == 0 { + beatTxt += "** idle / disconnected **" + } else { + //output a column indication occasionally + iter++ + if iter%22 == 0 { + log.Printf("Controllers\t\t\t\t\tCompute Nodes\n") + } + + i := 0 + // show the first two controller's + controllerMax := 2 + for _, controller := range sched.controllerMap { + beatTxt += fmt.Sprintf("controller-%s:", controller.uuid[:8]) + if controller.status == controllerMaster { + beatTxt += "master" + } else { + beatTxt += "backup" + } + i++ + if i == controllerMax { + break + } + if i <= controllerMax && len(sched.controllerMap) > i { + beatTxt += ", " + } else { + beatTxt += "\t" + } + } + if i == 0 { + beatTxt += " -no Controller- \t\t\t\t\t" + } else if i < controllerMax { + beatTxt += "\t\t\t" + } else { + beatTxt += "\t" + } + i = 0 + // show the first four compute nodes + cnMax := 4 + for _, node := range sched.cnMap { + if node.uuid == "" { + beatTxt += fmt.Sprintf("node-UNKNOWN:") + } else { + beatTxt += fmt.Sprintf("node-%s:", node.uuid[:8]) + } + beatTxt += node.status.String() + if node == sched.cnMRU { + beatTxt += "*" + } + beatTxt += ":" + + fmt.Sprintf("%d/%d,%d", + node.memAvailMB, + node.memTotalMB, + node.load) + i++ + if i == cnMax { + break + } + if i <= cnMax && len(sched.cnMap) > i { + beatTxt += ", " + } + } + if i == 0 { + beatTxt += " -no Compute Nodes-" + } + } + sched.controllerMutex.RUnlock() + sched.cnMutex.RUnlock() + log.Printf("%s\n", beatTxt) + } +} + +func main() { + var cert = flag.String("cert", "/etc/pki/ciao/cert-server-localhost.pem", "Server certificate") + var CAcert = flag.String("cacert", "/etc/pki/ciao/CAcert-server-localhost.pem", "CA certificate") + var cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file") + var heartbeat = flag.Bool("heartbeat", false, "Emit status heartbeat text") + + flag.Parse() + sched := newSsntpSchedulerServer() + + if len(*cpuprofile) != 0 { + f, err := os.Create(*cpuprofile) + if err != nil { + log.Print(err) + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + //config.Trace = os.Stdout + //config.Error = os.Stdout + //config.DebugInterface = false + + config := &ssntp.Config{ + CAcert: *CAcert, + Cert: *cert, + Role: ssntp.SCHEDULER, + } + + config.ForwardRules = []ssntp.FrameForwardRule{ + { // all STATS commands go to all Controllers + Operand: ssntp.STATS, + Dest: ssntp.Controller, + }, + { // all TraceReport events go to all Controllers + Operand: ssntp.TraceReport, + Dest: ssntp.Controller, + }, + { // all InstanceDeleted events go to all Controllers + Operand: ssntp.InstanceDeleted, + Dest: ssntp.Controller, + }, + { // all ConcentratorInstanceAdded events go to all Controllers + Operand: ssntp.ConcentratorInstanceAdded, + Dest: ssntp.Controller, + }, + { // all StartFailure events go to all Controllers + Operand: ssntp.StartFailure, + Dest: ssntp.Controller, + }, + { // all StopFailure events go to all Controllers + Operand: ssntp.StopFailure, + Dest: ssntp.Controller, + }, + { // all RestartFailure events go to all Controllers + Operand: ssntp.RestartFailure, + Dest: ssntp.Controller, + }, + { // all START command are processed by the Command forwarder + Operand: ssntp.START, + CommandForward: sched, + }, + { // all RESTART command are processed by the Command forwarder + Operand: ssntp.RESTART, + CommandForward: sched, + }, + { // all STOP command are processed by the Command forwarder + Operand: ssntp.STOP, + CommandForward: sched, + }, + { // all DELETE command are processed by the Command forwarder + Operand: ssntp.DELETE, + CommandForward: sched, + }, + { // all EVACUATE command are processed by the Command forwarder + Operand: ssntp.EVACUATE, + CommandForward: sched, + }, + { // all TenantAdded events are processed by the Event forwarder + Operand: ssntp.TenantAdded, + EventForward: sched, + }, + { // all TenantRemoved events are processed by the Event forwarder + Operand: ssntp.TenantRemoved, + EventForward: sched, + }, + { // all PublicIPAssigned events are processed by the Event forwarder + Operand: ssntp.PublicIPAssigned, + EventForward: sched, + }, + } + + if *heartbeat { + go heartBeat(sched) + } + + sched.ssntp.Serve(config, sched) +} diff --git a/networking/cnci_agent/README.md b/networking/cnci_agent/README.md new file mode 100644 index 000000000..a89ac31db --- /dev/null +++ b/networking/cnci_agent/README.md @@ -0,0 +1,68 @@ +# CNCI Agent # + +## Overview ## + +Compute Node Concentrators or CNCIs are Virtual Machines running on +Network Nodes which handle subsets of traffic belonging to a single tenant. +A single network node can run multiple CNCI's limited by the Compute and +Network needs of the CNCIs. All tenant level switching and routing for +a given tenant is handled (isolated) from other tenants using the CNCIs. + +The CNCIs also implement tenant specific firewall and NAT rules. In the future +they may be extended to perform traffic shaping. + +## CNCI Agent ## + +The CNCI Agent is the service running within a CNCI VM that communicates with +the ciao-scheduler to create new bridges and tunnels in response to remote +bridge and tunnel creation on a compute node. + +## CNCI Agent Lifecyle ## + +### CNCI Provisioning ### + +A CNCI VM is provisoned by the ciao-controller to handle and isolate traffic +for subnets that belong to a specific tenant. + +### CNCI Registration ### + +When the CNCI VM boots up (at each bootup or restart) the CNCI Agent +notifies the ciao-scheduler using SSNTP that it is active and handles +tenant subnets for a specific tenant. The scheduler in turn notifies the +controller of the CNCI IP address. + +The ciao-controller associates this IP address with the appropriate +tenant (subnets). + +At this point the CNCI is registered with the ciao-controller to handle the +a specific set of tenant subnets + +### Compute Node Subnet Creation and Registration ### + +When the ciao-controller requests scheduling of a tenant workload it also +sends the associated CNCI IP address that handles the tenant traffic for this +workload as part of the payload definition to the cia-launcher. + +The launcher creates the VNIC (Virtual Network Interface) on a compute +node in reponse to a workload being launched by the ciao-launcher + +When the VNIC is instantiated the networking library checks if it is the +first (only) instance of that tenant subnet on that CN at that point in time. + +If it is the first instance it creates a local bridge and a tunnel it to the +CNCI associated with that workload. + +It also requests the launcher to notify the CNCI (via the ciao-scheduler) about +the creation of this remote subnet. + +The Launcher sends this request to the ciao-scheduler which sends it to the +CNCI all via SSNTP. + +### CNCI Subnet Creation ### + +When the CNCI sees a Remote Subnet Reqisration message it links the remote +subnet to the appropriate subnet bridge on the CNCI. + +The CNCI agent manages the bridges, routing, NAT and traffic for all tenant +IPs and subnets it handles. + diff --git a/networking/cnci_agent/client.go b/networking/cnci_agent/client.go new file mode 100644 index 000000000..d1ec6b4b6 --- /dev/null +++ b/networking/cnci_agent/client.go @@ -0,0 +1,505 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "os/signal" + "path" + "sync" + "syscall" + "time" + + "gopkg.in/yaml.v2" + + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" + + "github.com/golang/glog" +) + +var serverURL string +var serverCertPath string +var clientCertPath string +var computeNet string +var mgmtNet string +var dhcpStrict bool +var enableNetwork bool +var enableNATssh bool +var agentUUID string + +func init() { + flag.StringVar(&serverURL, "server", "localhost", "URL of SSNTP server, Use auto for auto discovery") + flag.StringVar(&serverCertPath, "cacert", "/var/lib/ciao/CAcert-server-localhost.pem", "Client certificate") + flag.StringVar(&clientCertPath, "cert", "/var/lib/ciao/cert-client-localhost.pem", "CA certificate") + flag.StringVar(&computeNet, "compute-net", "", "Compute Subnet") + flag.StringVar(&mgmtNet, "mgmt-net", "", "Management Subnet") + flag.BoolVar(&enableNetwork, "network", true, "Enable networking") + flag.BoolVar(&enableNATssh, "ssh", true, "Enable NAT and SSH") + flag.StringVar(&agentUUID, "uuid", "", "UUID the CNCI Agent should use. Autogenerated otherwise") +} + +const ( + lockDir = "/tmp/lock/ciao" + logDir = "/var/lib/ciao/logs/cnci_agent" + lockFile = "cnci-agent.lock" + statsPeriod = 30 + + interfacesDir = "/var/lib/ciao/network/interfaces" +) + +type cmdWrapper struct { + cmd interface{} +} +type statusConnected struct{} + +type ssntpConn struct { + sync.RWMutex + ssntp.Client + connected bool +} + +func (s *ssntpConn) isConnected() bool { + s.RLock() + defer s.RUnlock() + return s.connected +} + +func (s *ssntpConn) setStatus(status bool) { + s.Lock() + s.connected = status + s.Unlock() +} + +type agentClient struct { + ssntpConn + cmdCh chan *cmdWrapper + netCh chan struct{} //Used to signal physical network changes +} + +func (client *agentClient) DisconnectNotify() { + client.setStatus(false) + glog.Warning("disconnected") +} + +func (client *agentClient) ConnectNotify() { + client.setStatus(true) + client.cmdCh <- &cmdWrapper{&statusConnected{}} + glog.Info("connected") +} + +func (client *agentClient) StatusNotify(status ssntp.Status, frame *ssntp.Frame) { + glog.Infof("STATUS %s", status) +} + +func (client *agentClient) ErrorNotify(err ssntp.Error, frame *ssntp.Frame) { + glog.Infof("ERROR %d", err) +} + +func getLock() error { + err := os.MkdirAll(lockDir, 0777) + if err != nil { + glog.Errorf("Unable to create lockdir %s", lockDir) + return err + } + + /* We're going to let the OS close and unlock this fd */ + lockPath := path.Join(lockDir, lockFile) + fd, err := syscall.Open(lockPath, syscall.O_CREAT, syscall.S_IWUSR|syscall.S_IRUSR) + if err != nil { + glog.Errorf("Unable to open lock file %v", err) + return err + } + + syscall.CloseOnExec(fd) + + if syscall.Flock(fd, syscall.LOCK_EX|syscall.LOCK_NB) != nil { + glog.Error("CNCI Agent is already running. Exitting.") + return fmt.Errorf("Unable to lock file %s", lockPath) + } + + return nil +} + +/* Must be called after flag.Parse() */ +func initLogger() error { + logDirFlag := flag.Lookup("log_dir") + if logDirFlag == nil { + return fmt.Errorf("log_dir does not exist") + } + + if logDirFlag.Value.String() == "" { + err := logDirFlag.Value.Set(logDir) + if err != nil { + return err + } + } + + if err := os.MkdirAll(logDirFlag.Value.String(), 0755); err != nil { + return fmt.Errorf("Unable to create log directory (%s) %v", logDir, err) + } + + return nil +} + +func createMandatoryDirs() error { + if err := os.MkdirAll(interfacesDir, 0755); err != nil { + return fmt.Errorf("Unable to create interfaces directory (%s) %v", + interfacesDir, err) + } + return nil +} + +func processCommand(client *ssntpConn, cmd *cmdWrapper) { + + switch netCmd := cmd.cmd.(type) { + + case *payloads.EventTenantAdded: + + go func(cmd *cmdWrapper) { + c := &netCmd.TenantAdded + glog.Infof("Processing: CiaoEventTenantAdded %v", c) + err := addRemoteSubnet(c) + if err != nil { + glog.Errorf("Error Processing: CiaoEventTenantAdded %v", err) + } + }(cmd) + + case *payloads.EventTenantRemoved: + + go func(cmd *cmdWrapper) { + c := &netCmd.TenantRemoved + glog.Infof("Processing: CiaoEventTenantRemoved %v", c) + err := delRemoteSubnet(c) + if err != nil { + glog.Errorf("Error Processing: CiaoEventTenantRemoved %v", err) + } + }(cmd) + + case *payloads.CommandAssignPublicIP: + + go func(cmd *cmdWrapper) { + c := &netCmd.AssignIP + glog.Infof("Processing: CiaoCommandAssignPublicIP %v", c) + err := assignPubIP(c) + if err != nil { + glog.Infof("Error Processing: CiaoCommandAssignPublicIP %v", err) + } + }(cmd) + + case *payloads.CommandReleasePublicIP: + + go func(cmd *cmdWrapper) { + c := &netCmd.ReleaseIP + glog.Infof("Processing: CiaoCommandReleasePublicIP %v", c) + err := releasePubIP(c) + if err != nil { + glog.Errorf("Error Processing: CiaoCommandReleasePublicIP %v", err) + } + }(cmd) + + case *statusConnected: + //Block and send this as it does not make sense to send other events + //or process commands when we have not yet registered + glog.Infof("Processing: status connected") + err := sendNetworkEvent(client, ssntp.ConcentratorInstanceAdded, nil) + if err != nil { + glog.Errorf("Unable to register : %v", err) + } + + default: + glog.Errorf("Processing unknown command") + + } +} + +func (client *agentClient) CommandNotify(cmd ssntp.Command, frame *ssntp.Frame) { + payload := frame.Payload + + switch cmd { + case ssntp.AssignPublicIP: + glog.Infof("CMD: ssntp.AssignPublicIP %v", len(payload)) + + go func(payload []byte) { + var assignIP payloads.CommandAssignPublicIP + err := yaml.Unmarshal(payload, &assignIP) + if err != nil { + glog.Warning("Error unmarshalling StartFailure") + return + } + glog.Infof("EVENT: ssntp.AssignPublicIP %v", assignIP) + client.cmdCh <- &cmdWrapper{&assignIP} + }(payload) + + case ssntp.ReleasePublicIP: + glog.Infof("CMD: ssntp.ReleasePublicIP %v", len(payload)) + + go func(payload []byte) { + var releaseIP payloads.CommandReleasePublicIP + err := yaml.Unmarshal(payload, &releaseIP) + if err != nil { + glog.Warning("Error unmarshalling StartFailure") + return + } + glog.Infof("EVENT: ssntp.ReleasePublicIP %s", releaseIP) + client.cmdCh <- &cmdWrapper{&releaseIP} + }(payload) + + default: + glog.Infof("CMD: %s", cmd) + } +} + +func (client *agentClient) EventNotify(event ssntp.Event, frame *ssntp.Frame) { + payload := frame.Payload + + switch event { + case ssntp.TenantAdded: + glog.Infof("EVENT: ssntp.TenantAdded %v", len(payload)) + + go func(payload []byte) { + var tenantAdded payloads.EventTenantAdded + err := yaml.Unmarshal(payload, &tenantAdded) + if err != nil { + glog.Warning("Error unmarshalling StartFailure") + return + } + glog.Infof("EVENT: ssntp.TenantAdded %s", tenantAdded) + + client.cmdCh <- &cmdWrapper{&tenantAdded} + }(payload) + + case ssntp.TenantRemoved: + glog.Infof("EVENT: ssntp.TenantRemoved %v", len(payload)) + + go func(payload []byte) { + var tenantRemoved payloads.EventTenantRemoved + err := yaml.Unmarshal(payload, &tenantRemoved) + if err != nil { + glog.Warning("Error unmarshalling StartFailure") + return + } + glog.Infof("EVENT: ssntp.TenantRemoved %s", tenantRemoved) + client.cmdCh <- &cmdWrapper{&tenantRemoved} + }(payload) + + default: + glog.Infof("EVENT %s", event) + } +} + +func connectToServer(doneCh chan struct{}, statusCh chan struct{}) { + + defer func() { + statusCh <- struct{}{} + }() + + cfg := &ssntp.Config{UUID: agentUUID, URI: serverURL, CAcert: serverCertPath, Cert: clientCertPath, + Role: uint32(ssntp.CNCIAGENT), Log: ssntp.Log} + client := &agentClient{cmdCh: make(chan *cmdWrapper)} + + dialCh := make(chan error) + + go func() { + err := client.Dial(cfg, client) + if err != nil { + glog.Errorf("Unable to connect to server %v", err) + dialCh <- err + return + } + + dialCh <- err + }() + + dialing := true + +DONE: + for { + select { + case err := <-dialCh: + dialing = false + if err != nil { + break DONE + } + case <-doneCh: + client.Close() + if !dialing { + break DONE + } + case cmd := <-client.cmdCh: + /* + Double check we're not quitting here. Otherwise a flood of commands + from the server could block our exit for an arbitrary amount of time, + i.e, doneCh and cmdCh could become available at the same time. + */ + select { + case <-doneCh: + client.Close() + break DONE + default: + } + glog.Infof("cmd channel: %v", cmd) + processCommand(&client.ssntpConn, cmd) + } + } +} + +//Try to discover the scheduler automatically if needed +func discoverScheduler() error { + + if serverURL != "auto" { + return nil + } + + //TODO: Do this via systemd + out, err := exec.Command("mount", "/dev/vdc", "/mnt").Output() + if err != nil { + //Ignore this error, we may be already mounted + glog.Errorf("Unable to mount /dev/vdc %v %s", err, string(out)) + } + + payload, err := ioutil.ReadFile("/mnt/ciao.yaml") + if err != nil { + glog.Errorf("Unable to read /mnt/ciao.yaml %v", err) + return err + } + + var config payloads.CNCIInstanceConfig + err = yaml.Unmarshal([]byte(payload), &config) + if err != nil { + glog.Errorf("Unable to unmarshal scheduler addr %v", err) + return err + } + + serverURL = config.SchedulerAddr + return nil +} + +//CloudInitJSON represents the contents of the cloud init file +type CloudInitJSON struct { + UUID string `json:"uuid"` + Hostname string `json:"hostname"` +} + +//Try to discover the UUID automatically if needed +func discoverUUID() (string, error) { + + //TODO: Do this via systemd + out, err := exec.Command("mount", "/dev/vdb", "/media").Output() + if err != nil { + //Ignore this error, we may be already mounted + glog.Errorf("Unable to mount /dev/vdb %v %s", err, string(out)) + } + + payload, err := ioutil.ReadFile("/media/openstack/latest/meta_data.json") + if err != nil { + glog.Errorf("Unable to read /media/openstack/latest/meta_data.json %v", err) + return "", err + } + + metaData := &CloudInitJSON{} + err = json.Unmarshal(payload, metaData) + if err != nil { + glog.Errorf("Unable to read UUID from /media/openstack/latest/meta_data.json %v", err) + } + + return metaData.UUID, nil +} + +func main() { + + if getLock() != nil { + os.Exit(1) + } + + flag.Parse() + + if err := initLogger(); err != nil { + log.Fatalf("Unable to initialise logs: %v", err) + } + + glog.Info("Starting CNCI Agent") + + if err := createMandatoryDirs(); err != nil { + glog.Fatalf("Unable to create mandatory dirs: %v", err) + } + + if err := discoverScheduler(); err != nil { + glog.Fatalf("Unable to auto discover scheduler: %v", err) + } + glog.Errorf("Scheduler address %v", serverURL) + + if agentUUID == "" { + agentUUID, _ = discoverUUID() + } + glog.Errorf("CNCI Agent: UUID : %v", agentUUID) + + doneCh := make(chan struct{}) + statusCh := make(chan struct{}) + signalCh := make(chan os.Signal, 1) + timeoutCh := make(chan struct{}) + wdogCh := make(chan struct{}) + signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) + + //TODO: Wait till the node gets an IP address before we kick this off + //TODO: Add a IP address change notifier to handle potential IP address change + if err := initNetwork(signalCh); err != nil { + glog.Fatalf("Unable to setup network. %s", err.Error()) + } + + go connectToServer(doneCh, statusCh) + + //Prime the watchdog + go func() { + wdogCh <- struct{}{} + }() + +DONE: + for { + select { + case <-signalCh: + glog.Info("Received terminating signal. Waiting for server loop to quit") + close(doneCh) + go func() { + time.Sleep(time.Second) + timeoutCh <- struct{}{} + }() + case <-statusCh: + glog.Info("Server Loop quit cleanly") + break DONE + case <-timeoutCh: + glog.Warning("Server Loop did not exit within 1 second quitting") + break DONE + case <-wdogCh: + glog.Info("Watchdog kicker") + go func() { + //TODO: Add software watchdog to CNCI VM + time.Sleep(5 * time.Second) + wdogCh <- struct{}{} + }() + } + } + + glog.Flush() + glog.Info("Exit") +} diff --git a/networking/cnci_agent/database.go b/networking/cnci_agent/database.go new file mode 100644 index 000000000..9d4482505 --- /dev/null +++ b/networking/cnci_agent/database.go @@ -0,0 +1,55 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "github.com/01org/ciao/payloads" + "github.com/golang/glog" +) + +//Will implement a simple database to persist state across +//restarts of the host/VM + +func dbProcessCommand(client *agentClient, cmd *cmdWrapper) { + + switch netCmd := cmd.cmd.(type) { + + case *payloads.EventTenantAdded: + + c := &netCmd.TenantAdded + glog.Infof("Tenant Added %v", c) + + case *payloads.EventTenantRemoved: + + c := &netCmd.TenantRemoved + glog.Infof("Tenant Removed %v", c) + + case *payloads.CommandAssignPublicIP: + + c := &netCmd.AssignIP + glog.Infof("Assign IP %v", c) + + case *payloads.CommandReleasePublicIP: + + c := &netCmd.ReleaseIP + glog.Infof("Release IP %v", c) + + default: + glog.Errorf("Processing unknown command %v", netCmd) + + } +} diff --git a/networking/cnci_agent/network.go b/networking/cnci_agent/network.go new file mode 100644 index 000000000..20878484a --- /dev/null +++ b/networking/cnci_agent/network.go @@ -0,0 +1,334 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "encoding/binary" + "fmt" + "net" + "os" + "time" + + "gopkg.in/yaml.v2" + + "github.com/golang/glog" + + "github.com/01org/ciao/networking/libsnnet" + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" +) + +var gCnci *libsnnet.Cnci +var gFw *libsnnet.Firewall + +//TODO: Subscribe to netlink event to monitor physical interface changes +//TODO: Why does go not allow chan interface{} +func initNetwork(cancelCh <-chan os.Signal) error { + + cnci := &libsnnet.Cnci{} + + cnci.NetworkConfig = &libsnnet.NetworkConfig{ + Mode: libsnnet.GreTunnel, + } + + if computeNet != "" { + _, cnet, _ := net.ParseCIDR(computeNet) + if cnet == nil { + return fmt.Errorf("Unable to Parse CIDR :" + computeNet) + } + cnci.ComputeNet = []net.IPNet{*cnet} + } + + if mgmtNet != "" { + _, mnet, _ := net.ParseCIDR(mgmtNet) + if mnet == nil { + return fmt.Errorf("Unable to Parse CIDR :" + mgmtNet) + } + cnci.ManagementNet = []net.IPNet{*mnet} + } + + var err error + delays := []int64{1, 2, 5, 10, 20, 40, 60} + for _, d := range delays { + err = cnci.Init() + if err == nil { + break + } + glog.Infof("cnci network failed %v retrying in %v", err, d) + select { + case <-time.After(time.Duration(d) * time.Second): + case <-cancelCh: + glog.Infof("cnci.Init network cancelled %s", err.Error()) + return fmt.Errorf("cnci.Init cancelled") + } + } + if err != nil { + glog.Errorf("cnci.Init network failed %s", err.Error()) + return err + } + + gCnci = cnci + + if enableNetwork { + fw, err := libsnnet.InitFirewall(gCnci.ComputeLink[0].Attrs().Name) + if err != nil { + glog.Errorf("Firewall initialize failed %v", err) + } + gFw = fw + } + glog.Infof("Network Initialized %v", gCnci) + + return nil +} + +func unmarshallSubnetParams(cmd *payloads.TenantAddedEvent) (*net.IPNet, int, net.IP, error) { + const maxKey = ^uint32(0) + + _, snet, err := net.ParseCIDR(cmd.TenantSubnet) + if err != nil { + return nil, 0, nil, fmt.Errorf("Invalid Remote subnet %s", err.Error()) + } + + cIP := net.ParseIP(cmd.AgentIP) + if cIP == nil { + return nil, 0, nil, fmt.Errorf("Invalid CN IP %s", cmd.ConcentratorIP) + } + + //TODO + //When we go away from a 1:1 subnet to key map remove this check + //Today this ensures the sanity of the YAML and CN + key := int(binary.LittleEndian.Uint32(snet.IP)) + subnetKey := cmd.SubnetKey + if key != subnetKey { + return nil, 0, nil, fmt.Errorf("Invalid Subnet Key %s %x", cmd.TenantSubnet, cmd.SubnetKey) + } + + return snet, subnetKey, cIP, nil +} + +func genIPsInSubnet(subnet net.IPNet) []net.IP { + + var allIPs []net.IP + + ip := subnet.IP.To4().Mask(subnet.Mask) + + //Calculate subnet size + ones, bits := subnet.Mask.Size() + if bits != 32 || ones > 30 || ones == 0 { + return nil + } + subnetSize := ^(^0 << uint32(32-ones)) + subnetSize -= 3 //network, gateway and broadcast + + //Skip the network address and gateway + ip[3] += 2 + startU32 := binary.BigEndian.Uint32(ip) + + //Generate all valid IPs in this subnet + for i := 0; i < subnetSize; i++ { + vIP := make(net.IP, net.IPv4len) + binary.BigEndian.PutUint32(vIP, startU32+uint32(i)) + allIPs = append(allIPs, vIP) + } + return allIPs +} + +func natSSHSubnet(action libsnnet.FwAction, subnet net.IPNet, intIf string, extIf string) error { + + err := gFw.ExtFwding(action, extIf, intIf) + if err != nil { + return fmt.Errorf("Error: NAT %v failed %v", action, err) + } + + ips := genIPsInSubnet(subnet) + for _, ip := range ips { + extPort, err := libsnnet.DebugSSHPortForIP(ip) + if err != nil { + return fmt.Errorf("Error: ssh fwd %v failed %v", action, err) + } + glog.Infof("ssh fwd IP[%s] Port[%d] %d %d", ip, extPort, ip[2], ip[3]) + + err = gFw.ExtPortAccess(action, "tcp", extIf, extPort, ip, 22) + if err != nil { + return fmt.Errorf("Error: ssh fwd %v failed %v", action, err) + } + } + return nil +} + +func addRemoteSubnet(cmd *payloads.TenantAddedEvent) error { + rs, tk, rip, err := unmarshallSubnetParams(cmd) + + if err != nil { + glog.Errorf("cnci.AddRemoteSubnet invalid params %s %x %s %s", rs, tk, rip, err) + return err + } + + if !enableNetwork { + return nil + } + bridge, err := gCnci.AddRemoteSubnet(*rs, tk, rip) + if err != nil { + glog.Errorf("cnci.AddRemoteSubnet failed %s %x %s %s", rs, tk, rip, err) + return err + } + + glog.Infof("cnci.AddRemoteSubnet success %s %x %s", rs, tk, rip, err) + + if enableNATssh && bridge != "" { + err = natSSHSubnet(libsnnet.FwEnable, *rs, bridge, gCnci.ComputeLink[0].Attrs().Name) + if err != nil { + glog.Errorf("enable ssh nat failed %s %x %s %s", rs, tk, bridge, err) + return err + } + glog.Infof("cnci.AddRemoteSubnet ssh nat success %s %x %s", rs, tk, bridge, err) + } + return nil +} + +func delRemoteSubnet(cmd *payloads.TenantAddedEvent) error { + rs, tk, rip, err := unmarshallSubnetParams(cmd) + + if err != nil { + glog.Errorf("cnci.delRemoteSubnet invalid params %s %x %s %s", rs, tk, rip, err) + return err + } + + if !enableNetwork { + return nil + } + + err = gCnci.DelRemoteSubnet(*rs, tk, rip) + if err != nil { + glog.Errorf("cnci.DelRemoteSubnet failed %s %x %s %s", rs, tk, rip, err) + return err + } + glog.Infof("cnci.DelRemoteSubnet success %s %x %s", rs, tk, rip, err) + + /* We do not delete the bridge till reset. + if enableNATssh { + err = natSshSubnet(libsnnet.FwDisable, *rs, bridge, gCnci.ComputeLink[0].Attrs().Name) + if err != nil { + glog.Errorf("disable ssh nat failed %s %x %s %s", rs, tk, bridge, err) + return err + } + } + glog.Infof("cnci.DelRemoteSubnet ssh success %s %x %s", rs, tk, bridge, err) + */ + + return nil +} + +func cnciAddedMarshal(agentUUID string) ([]byte, error) { + var cnciAdded payloads.EventConcentratorInstanceAdded + evt := &cnciAdded.CNCIAdded + + //TODO: How do we set this up. evt.InstanceUUID = gCnci.ID + evt.InstanceUUID = agentUUID + evt.TenantUUID = gCnci.Tenant + evt.ConcentratorIP = gCnci.ComputeAddr[0].IP.String() + evt.ConcentratorMAC = gCnci.ComputeLink[0].Attrs().HardwareAddr.String() + + if evt.ConcentratorIP == "" || evt.ConcentratorMAC == "" { + glog.Errorf("cnci.cnciAddedMarshal invalid physical configuration") + return nil, fmt.Errorf("cnci.cnciAddedMarshal invalid physical configuration") + } + + glog.Infoln("cnciAdded Event ", cnciAdded) + + return yaml.Marshal(&cnciAdded) +} + +func sendNetworkEvent(client *ssntpConn, eventType ssntp.Event, eventInfo interface{}) error { + + if !client.isConnected() { + return fmt.Errorf("Unable to send %s %v", eventType, eventInfo) + } + + payload, err := generateNetEventPayload(eventType, eventInfo, client.UUID()) + if err != nil { + return fmt.Errorf("Unable parse ssntpEvent %s %v", err, eventInfo) + } + + n, err := client.SendEvent(eventType, payload) + if err != nil { + return fmt.Errorf("Unable to send %s %s %v %d", err.Error(), eventType, eventInfo, n) + } + + return nil +} + +func generateNetEventPayload(eventType ssntp.Event, eventInfo interface{}, agentUUID string) ([]byte, error) { + + switch eventType { + case ssntp.ConcentratorInstanceAdded: + glog.Infof("generating cnciAdded Event Payload %s", agentUUID) + return cnciAddedMarshal(agentUUID) + case ssntp.PublicIPAssigned: + glog.Infof("generating publicIP Assigned Event Payload %s", agentUUID) + return nil, nil + default: + return nil, fmt.Errorf("Unsupported ssntpEventInfo type: %v", eventType) + } + +} + +func unmarshallPubIP(cmd *payloads.PublicIPCommand) (net.IP, net.IP, error) { + + prIP := net.ParseIP(cmd.PrivateIP) + puIP := net.ParseIP(cmd.PublicIP) + + switch { + case prIP == nil: + return nil, nil, fmt.Errorf("invalid private IP %v", cmd.PrivateIP) + case puIP == nil: + return nil, nil, fmt.Errorf("invalid public IP %v", cmd.PublicIP) + } + + return prIP, puIP, nil + +} + +func assignPubIP(cmd *payloads.PublicIPCommand) error { + + prIP, puIP, err := unmarshallPubIP(cmd) + + if err != nil { + glog.Errorf("cnci.assignPubIP invalid params %v %v", err, cmd) + } + + if enableNetwork { + glog.Infof("cnci.assignPubIP success %v %v %v", prIP, puIP, cmd) + } + + return nil +} + +func releasePubIP(cmd *payloads.PublicIPCommand) error { + + prIP, puIP, err := unmarshallPubIP(cmd) + + if err != nil { + glog.Errorf("cnci.releasePubIP invalid params %v %v", err, cmd) + } + + if enableNetwork { + glog.Infof("cnci.releasePubIP success %v %v %v", prIP, puIP, cmd) + } + + return nil +} diff --git a/networking/cnci_agent/scripts/README.md b/networking/cnci_agent/scripts/README.md new file mode 100644 index 000000000..aa55d0bce --- /dev/null +++ b/networking/cnci_agent/scripts/README.md @@ -0,0 +1,83 @@ +# CNCI Image Creation Tools # + +## Overview ## + +Helper scripts to provison and test CNCI Images + +## CNCI Image Provisoning ## + +The CNCI Image creation scripts helps you create a CNCI Image from +a clear linux cloud image. Clear cloud images can be obtained from + +https://download.clearlinux.org/image/ + +The scripts are used to provison the image with the CNCI Agent and +the certificates it needs to connect to the ciao-scheduler. + +0. The image has to be preprovisoned with the following tools + - dnsmasq + - iptables +1. Place the appropriate certificates under the certs directory + +``` + ├── certs + │   ├── CAcert-server-localhost.pem + │   ├── cert-client-localhost.pem +``` + + +2. Ensure that you have built and installed the cnci agent +``` + cd $GOPATH/src/github.com/01org/ciao/networking/cnci_agent + go install +``` +3. Update the image +``` +./update_cnci_cloud_image.sh +``` + +This will yield a provisoned image. This can be used as a CNCI VM. + +## CNCI Verification ## + +A simple script to launch the CNCI VM using QEMU and a sample cloud-init +configuration. The cloud-init is setup to check if the CNCI Agent can +be sucessfully launched within this VM + +0. Customize the cloud-init files + +``` + ├── ciao + │   └── ciao.yaml + ├── seed + │   └── openstack + │   └── latest + ├── meta_data.json + │   └── user_data +``` + +1. Launch the VM (it will cloud-init the image) + +``` + sudo ./run_cnci_vm.sh +``` +2. Log into the VM using the cloud-init provisioned user/password (default ciao/ciao) +3. Verify the successful launch of the CNCI using + systemctl status cnci-agent + +An output of the form shown below indicates a successful provisoning of +the agent. + +``` +ciao@cncihostname ~ $ systemctl status cnci-agent -l +● cnci-agent.service - Ciao CNCI Agent + Loaded: loaded (/usr/lib/systemd/system/cnci-agent.service; enabled; vendor preset: disabled) + Active: active (running) since Thu 2016-04-07 20:34:40 UTC; 27s ago + Main PID: 229 (cnci_agent) + CGroup: /system.slice/cnci-agent.service + └─229 /usr/sbin/cnci_agent -server auto -v 3 +``` + +Note: This boot will result in the cloud-init of the image. Hence the orginal +image generated prior to the verification should be used as the CNCI image. + diff --git a/networking/cnci_agent/scripts/certs/README b/networking/cnci_agent/scripts/certs/README new file mode 100644 index 000000000..664705503 --- /dev/null +++ b/networking/cnci_agent/scripts/certs/README @@ -0,0 +1,7 @@ +Please place the Agent and CA certificate in this directory + +The certs should currently be named as follows +- CAcert-server-localhost.pem +- cert-client-localhost.pem + +This can be customized by customizing the systemd unit file diff --git a/networking/cnci_agent/scripts/ciao/ciao.yaml b/networking/cnci_agent/scripts/ciao/ciao.yaml new file mode 100644 index 000000000..26b0acadd --- /dev/null +++ b/networking/cnci_agent/scripts/ciao/ciao.yaml @@ -0,0 +1 @@ +scheduler_addr: 192.168.0.101 diff --git a/networking/cnci_agent/scripts/cnci-agent.service b/networking/cnci_agent/scripts/cnci-agent.service new file mode 100644 index 000000000..050f26b05 --- /dev/null +++ b/networking/cnci_agent/scripts/cnci-agent.service @@ -0,0 +1,13 @@ +[Unit] +Description=Ciao CNCI Agent +After=network.target + +[Service] +ExecStart=/usr/sbin/cnci_agent -server auto -v 3 +ExecReload=/bin/kill -HUP $MAINPID +KillMode=process +Restart=on-failure +RestartSec=31s + +[Install] +WantedBy=default.target diff --git a/networking/cnci_agent/scripts/run_cnci_vm.sh b/networking/cnci_agent/scripts/run_cnci_vm.sh new file mode 100755 index 000000000..c99188106 --- /dev/null +++ b/networking/cnci_agent/scripts/run_cnci_vm.sh @@ -0,0 +1,57 @@ +#!/bin/bash + + +#Create the cloud-init and Ciao specific ISO +xorriso -as mkisofs -R -V config-2 -o seed.iso seed/ +xorriso -as mkisofs -R -V config-2 -o ciao.iso ciao/ + +if [ -z "$1" ]; then + IMAGE="clear-6580-cloud-cnci.img.qcow2" +else + IMAGE="$1" +fi + +if [ -z "$2" ]; then + PDEV="eth1" +else + PDEV="$2" +fi + +if [ -z "$3" ]; then + MACVTAP="macvtap0" +else + PDEV="$3" +fi + + +#Create your own macvtap device with a random mac +sudo ip link del $MACVTAP +sudo ip link add link $PDEV name $MACVTAP type macvtap mode bridge +sudo ip link set $MACVTAP address 02:00:DE:AD:02:01 up +sudo ip link show $MACVTAP + +if [[ "$IMAGE" =~ .xz$ ]]; then + >&2 echo "File \"$IMAGE\" is still xz compressed. Uncompress it first with \"unxz\"" + exit 1 +fi + +if [ ! -f "$IMAGE" ]; then + >&2 echo "Can't find image file \"$IMAGE\"" + exit 1 +fi +rm -f debug.log + +tapindex=$(< /sys/class/net/$MACVTAP/ifindex) +tapdev=/dev/tap$tapindex + +ifconfig $MACVTAP up + +qemu-system-x86_64 \ + -enable-kvm \ + -bios OVMF.fd \ + -smp cpus=4,cores=2 -cpu host \ + -vga none -nographic \ + -drive file="$IMAGE",if=virtio,aio=threads \ + -net nic,model=virtio,macaddr=$(< /sys/class/net/$MACVTAP/address) -net tap,fd=3 3<>$tapdev \ + -drive file=seed.iso,if=virtio -drive file=ciao.iso,if=virtio \ + -debugcon file:debug.log -global isa-debugcon.iobase=0x402 diff --git a/networking/cnci_agent/scripts/seed/openstack/latest/meta_data.json b/networking/cnci_agent/scripts/seed/openstack/latest/meta_data.json new file mode 100644 index 000000000..b49a94fd9 --- /dev/null +++ b/networking/cnci_agent/scripts/seed/openstack/latest/meta_data.json @@ -0,0 +1,4 @@ +{ + "uuid": "cnciuuid", + "hostname": "cncihostname" +} diff --git a/networking/cnci_agent/scripts/seed/openstack/latest/user_data b/networking/cnci_agent/scripts/seed/openstack/latest/user_data new file mode 100644 index 000000000..76282132e --- /dev/null +++ b/networking/cnci_agent/scripts/seed/openstack/latest/user_data @@ -0,0 +1,11 @@ +#cloud-config +runcmd: + - [ touch, "/etc/bootdone" ] +users: + - name: ciao + gecos: ciaogecos + lock-passwd: false + #Temporary password for debugging the image, set to ciao + passwd: "$6$rounds=4096$5UnuSTz7u$3uzCRd62GSgVnmYkGPTpX9BWKLU3rf4jYdpOf22J/OWJqbdsVgudq.l7zRmcq9XAfKJr3pyzpb41lH/y6SrEo1" + sudo: ciao ALL=(ALL) NOPASSWD:ALL + #- ssh-rsa PLAC_YOURKEY_HERE diff --git a/networking/cnci_agent/scripts/update_cnci_cloud_image.sh b/networking/cnci_agent/scripts/update_cnci_cloud_image.sh new file mode 100755 index 000000000..7a0f864cd --- /dev/null +++ b/networking/cnci_agent/scripts/update_cnci_cloud_image.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +#Note: the clear image should have dnsmasq and iptables installed +#swupd bundle-add sysadmin-hostmgmt/kvm-host network-advanced +#You can take any clear image that supports GRE tunneling (6000 and beyond) + +#TODO: Add code here to clear out any cloud init that was carried out + +if [ -z "$1" ]; then + IMAGE="clear-6580-cloud-cnci.img.qcow2" +else + IMAGE="$1" +fi + +if [ -z "$2" ]; then + CERTS_DIR=$GOPATH/src/github.com/01org/ciao/networking/cnci_agent/scripts/certs +else + CERTS_DIR=$2 +fi + +if [ -z "$3" ]; then + CNCI_AGENT=$GOPATH/bin/cnci_agent +else + CNCI_AGENT=$3 +fi + +if [ -z "$4" ]; then + CNCI_SYSD=$GOPATH/src/github.com/01org/ciao/networking/cnci_agent/scripts/cnci-agent.service +else + CNCI_SYSD=$4 +fi + +if [ -z "$5" ]; then + PARTITION="2" +else + PARITION=$5 +fi + + + +echo "mounting image" +echo $IMAGE +sudo mkdir -p /mnt/tmp +sudo modprobe nbd max_part=63 +sudo qemu-nbd -c /dev/nbd0 $IMAGE +sudo mount /dev/nbd0p$PARTITION /mnt/tmp + +echo "Cleanup artifacts" +sudo ls -alp /mnt/tmp/var/lib/ciao +sudo rm -rf /mnt/tmp/var/lib/ciao +#echo "Checking cleanup" +#sudo ls -alp /mnt/tmp/var/lib/ciao + +#Copy the cnci_agent image +echo "copying agent image" $ +sudo cp $CNCI_AGENT /mnt/tmp/usr/sbin/ + +sudo ls -alp /mnt/tmp/usr/sbin/cnci_agent +sudo ls -alp $CNCI_AGENT +sudo diff $CNCI_AGENT /mnt/tmp/usr/sbin/cnci_agent + +#Copy the cnci_agent systemd service script +echo "copying agent systemd service script" $ +sudo cp $CNCI_SYSD /mnt/tmp/usr/lib/systemd/system/ + +sudo ls -alp /mnt/tmp/usr/lib/systemd/system/cnci-agent.service +sudo ls -alp $CNCI_SYSD +sudo diff $CNCI_SYSD /mnt/tmp/usr/lib/systemd/system/cnci-agent.service + +#Install the systemd service +#Hacking it. Ideally do it with chroot +echo "installing the service" +sudo mkdir -p /mnt/tmp/etc/systemd/system/default.target.wants +sudo rm /mnt/tmp/etc/systemd/system/default.target.wants/cnci-agent.service +sudo chroot /mnt/tmp /bin/bash -c "sudo ln -s /usr/lib/systemd/system/cnci-agent.service /etc/systemd/system/default.target.wants/" +sudo ls -alp /mnt/tmp/etc/systemd/system/default.target.wants + +#Copy the certs +echo "copying the certs" +sudo mkdir -p /mnt/tmp/var/lib/ciao/ +sudo cp $CERTS_DIR/*.pem /mnt/tmp/var/lib/ciao/ +sudo ls -alp /mnt/tmp/var/lib/ciao/ + +#Remove cloud-init traces (hack) +#echo "Checking cleanup" +#sudo ls -alp /mnt/tmp/var/lib/cloud +sudo rm -rf /mnt/tmp/var/lib/cloud +#sudo ls -alp /mnt/tmp/var/lib/cloud + +#Umount +echo "done unmounting" +sudo umount /mnt/tmp +sudo qemu-nbd -d /dev/nbd0 diff --git a/networking/cnci_agent/test_cnci_server/README.md b/networking/cnci_agent/test_cnci_server/README.md new file mode 100644 index 000000000..0928e2631 --- /dev/null +++ b/networking/cnci_agent/test_cnci_server/README.md @@ -0,0 +1,20 @@ +# CNCI Test Server # + +## Overview ## + +A simple SSNTP server that can be used to perform unit testing of the CNCI +agent. The CNCI Test Server sends a stream of event and command to any CNCI +agent that registers with it. + +The CNCI Agent is expected to handle all the requests appropriately. + +### Warning ### + +If the server is used to test a CNCI running with a CNCI VM then the server +has to be run a machine that is different from the one that is hosting the +CNCI VM. + +This is required as the CNCI VM uses macvatap for networking and macvtap +traffic cannot be recieved by the host whichout complex network plumbing + + diff --git a/networking/cnci_agent/test_cnci_server/server.go b/networking/cnci_agent/test_cnci_server/server.go new file mode 100644 index 000000000..90e04085c --- /dev/null +++ b/networking/cnci_agent/test_cnci_server/server.go @@ -0,0 +1,242 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "flag" + "fmt" + "os" + "runtime/pprof" + "time" + + "gopkg.in/yaml.v2" + + "github.com/01org/ciao/payloads" + "github.com/01org/ciao/ssntp" +) + +type ssntpTestServer struct { + ssntp ssntp.Server + name string + nConnections int + nCommands int + nStatuses int + nErrors int + nEvents int +} + +const cnciUUID = "3390740c-dce9-48d6-b83a-a717417072ce" +const tenantUUID = "2491851d-dce9-48d6-b83a-a717417072ce" +const instanceUUID = "2478251d-dce9-48d6-b83a-a717417072ce" +const agentUUID = "2478711d-dce9-48d6-b83a-a717417072ce" +const cnciIP = "192.168.0.110" +const agentIP = "192.168.0.101" +const instancePublicIP = "10.1.2.3" +const instancePrivateIP = "192.168.0.2" +const vnicMAC = "aa:bb:cc:01:02:03" +const tenantSubnet = "172.16.1.0/24" +const subnetKey = (172 + 16<<8 + 1<<16) + +func publicIPCmd() (cmd payloads.PublicIPCommand) { + cmd.ConcentratorUUID = cnciUUID + cmd.TenantUUID = tenantUUID + cmd.InstanceUUID = instanceUUID + cmd.PublicIP = instancePublicIP + cmd.PrivateIP = instancePrivateIP + cmd.VnicMAC = vnicMAC + return cmd +} + +func tenantEvent() (evt payloads.TenantAddedEvent) { + evt.AgentUUID = agentUUID + evt.AgentIP = agentIP + evt.TenantUUID = tenantUUID + evt.TenantSubnet = tenantSubnet + evt.ConcentratorUUID = cnciUUID + evt.ConcentratorIP = cnciIP + evt.SubnetKey = subnetKey + return evt +} + +func assignPublicIPMarshal() ([]byte, error) { + assignIP := payloads.CommandAssignPublicIP{ + AssignIP: publicIPCmd(), + } + y, err := yaml.Marshal(&assignIP) + if err != nil { + return nil, err + } + return y, nil +} + +func releasePublicIPMarshal() ([]byte, error) { + releaseIP := payloads.CommandReleasePublicIP{ + ReleaseIP: publicIPCmd(), + } + y, err := yaml.Marshal(&releaseIP) + if err != nil { + return nil, err + } + return y, nil +} + +func tenantAddedMarshal() ([]byte, error) { + tenantAdded := payloads.EventTenantAdded{ + TenantAdded: tenantEvent(), + } + y, err := yaml.Marshal(&tenantAdded) + if err != nil { + return nil, err + } + return y, nil +} + +func tenantRemovedMarshal() ([]byte, error) { + tenantRemoved := payloads.EventTenantRemoved{ + TenantRemoved: tenantEvent(), + } + y, err := yaml.Marshal(&tenantRemoved) + if err != nil { + return nil, err + } + return y, nil +} + +type logger struct{} + +func (l logger) Infof(format string, args ...interface{}) { + fmt.Printf("INFO: Test Server: "+format, args...) +} + +func (l logger) Errorf(format string, args ...interface{}) { + fmt.Printf("ERROR: Test Server: "+format, args...) +} + +func (l logger) Warningf(format string, args ...interface{}) { + fmt.Printf("WARNING: Test Server: "+format, args...) +} + +func (server *ssntpTestServer) ConnectNotify(uuid string, role uint32) { + server.nConnections++ + fmt.Printf("%s: %s connected (role 0x%x, current connections %d)\n", server.name, uuid, role, server.nConnections) + + //Send out the command and events right here + //Also create a table to drive this with type, type, payload + if role == ssntp.CNCIAGENT { + payload, _ := tenantAddedMarshal() + server.ssntp.SendEvent(uuid, ssntp.TenantAdded, payload) + time.Sleep(time.Second) + + payload, _ = assignPublicIPMarshal() + server.ssntp.SendCommand(uuid, ssntp.AssignPublicIP, payload) + time.Sleep(time.Second) + + payload, _ = releasePublicIPMarshal() + server.ssntp.SendCommand(uuid, ssntp.ReleasePublicIP, payload) + time.Sleep(time.Second) + + payload, _ = tenantRemovedMarshal() + server.ssntp.SendEvent(uuid, ssntp.TenantRemoved, payload) + time.Sleep(time.Second) + + payload, _ = tenantAddedMarshal() + server.ssntp.SendEvent(uuid, ssntp.TenantAdded, payload) + time.Sleep(time.Second) + } + +} + +func (server *ssntpTestServer) DisconnectNotify(uuid string) { + server.nConnections-- + fmt.Printf("%s: %s disconnected (current connections %d)\n", server.name, uuid, server.nConnections) +} + +func (server *ssntpTestServer) StatusNotify(uuid string, status ssntp.Status, frame *ssntp.Frame) { + server.nStatuses++ + fmt.Printf("%s: STATUS (#%d) from %s\n", server.name, server.nStatuses, uuid) + //server.ssntp.SendStatus(uuid, status, payload) +} + +func (server *ssntpTestServer) CommandNotify(uuid string, command ssntp.Command, frame *ssntp.Frame) { + server.nCommands++ + //server.ssntp.SendCommand(uuid, command, payload) + fmt.Printf("%s: CMD (#%d) from %s\n", server.name, server.nCommands, uuid) +} + +func (server *ssntpTestServer) EventNotify(uuid string, event ssntp.Event, frame *ssntp.Frame) { + payload := frame.Payload + + server.nEvents++ + fmt.Printf("%s: EVENT (#%d)from %s\n", server.name, server.nEvents, uuid) + if event == ssntp.ConcentratorInstanceAdded { + var cnciAdded payloads.EventConcentratorInstanceAdded + + err := yaml.Unmarshal(payload, &cnciAdded) + if err != nil { + fmt.Printf("Error unmarshaling cnciAdded [%s]\n", err) + } + fmt.Printf("CNCI Added Event [%d]\n", len(payload)) + fmt.Printf("instance UUID field [%s] \n", cnciAdded.CNCIAdded.InstanceUUID) + fmt.Printf("tenant UUID field [%s]", cnciAdded.CNCIAdded.TenantUUID) + fmt.Printf("CNCI IP field [%s]", cnciAdded.CNCIAdded.ConcentratorIP) + fmt.Printf("CNCI MAC field [%s]", cnciAdded.CNCIAdded.ConcentratorMAC) + } +} + +func (server *ssntpTestServer) ErrorNotify(uuid string, error ssntp.Error, frame *ssntp.Frame) { + server.nErrors++ + fmt.Printf("%s: ERROR (#%d)from %s\n", server.name, server.nErrors, uuid) +} + +func main() { + var cert = flag.String("cert", "/var/lib/ciao/cert-server-localhost.pem", "Client certificate") + var CAcert = flag.String("cacert", "/var/lib/ciao/CAcert-server-localhost.pem", "CA certificate") + var cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file") + var config ssntp.Config + + flag.Parse() + server := &ssntpTestServer{ + name: "Network Test Server", + nConnections: 0, + nCommands: 0, + nStatuses: 0, + nErrors: 0, + } + + if len(*cpuprofile) != 0 { + f, err := os.Create(*cpuprofile) + if err != nil { + fmt.Print(err) + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + config.Log = logger{} + config.CAcert = *CAcert + config.Cert = *cert + // config.DebugInterface = true + // Forward STATS to all Controllers + config.ForwardRules = []ssntp.FrameForwardRule{ + { + Operand: ssntp.STATS, + Dest: ssntp.Controller, + }, + } + + server.ssntp.Serve(&config, server) +} diff --git a/networking/docker/plugin/README b/networking/docker/plugin/README new file mode 100644 index 000000000..7c25ae699 --- /dev/null +++ b/networking/docker/plugin/README @@ -0,0 +1,89 @@ +This is simple standalone Docker Plugin implementation designed to test against +changes made in the docker network and IPAM plugin framework. + +It is designed to be run standalone so that any changes made in the docker +plugin framework can be tested against the evolution of the docker networking + +In ciao the plugin acts as a slave to the ciao networking framework. + +The goal here is to do no work in the plugin except inform the docker +daemon about the veth interface it needs to place inside the container + +Hence the real flow will be as follows + +0. Laucher starts the docker http server plugin thread + +Note: The launcher should be launched prior to the docker daemon. + Also we need to configure docker daemon to not create its default + bridge and host networks as they cause problems. + +1. Launcher gets a request to launch a container + The request from the Controller to launcher already has the following + information (IP Address, MAC Address and subnet for the VNIC) + Note: Based on the current ciao design the gateway for the + subnet can be inferred. + +2. Launcher invokes ciao networking to create a Container Vnic + +3. ciao Networking + a. Creates a veth pair + b. Assigns the macaddress to the container side of veth pair + c. Attaches the veth to the tenant bridge (creating it, if needed) + d. Returns the fully configured docker side veth pair to Launcher + e. Also notified launcher if the subnet needs to be created + (Note: This is the docker logical subnet) + +4. (Launcher) if a subnet creation request was returned. Uses docker API + or command line to instantiate the network in the docker database + + docker network create -d=ciao + --ipam-driver=ciao + --subnet= + --gateway= + --opt "bridge"= + subnet.Name + + Note: Our custom IPAM driver is needed to support overlapping subnets + between tenants. Otherwise the default IPAM driver meets our needs. + + Note: Fully speccing the network creation and handing control to the + ciao driver (-d) makes docker a passthro for networking. + + In the future any additional information need by the plugin can also been + sent as more options. e.g. + --opt "cnci"= + + - This in turn will result in a callback to the plugin. + + - The plugin will record this information and return success + +5. (Launcher) will then request docker to create & launch the container, + again fully specifing the networking configuration. + + docker run -it --net= --ip= --mac-address= busybox + + WARNING: There is a bug in the latest docker 1.10.03 (which has been fixed + in the 1.11 dev version) which does not pass the --ip parameter to the + remote IPAM plugin. Without this we cannot use our IPAM driver + +6. The ciao docker plugin acts as both a network and IPAM remote plugin. + It handles all the requests. Some of the more imporant ones are + + a. EndPointCreate: If the container is being created for the first time + As we have already created the VNIC, we only need to cache the endpoint + id to instance map + b. Join: When the end point is being placed inside the container + On Join the plugin will return back to docker the following information + - name of the veth pair to place within the container + - the ethernet device name prefix to be assigned to the logic + interface within the container (e.g. eth or eno) + - the default gw for the container + - any other static routes to be added within the container (if needed) + + Note: We will delete only when the launcher tells us to tear down networking. + Not when docker logically tears down the network. + +7. The docker daemon will use the values sent back by the plugin to launch the container + Move the veth into the docker container and give it the logical name. + Setup the IP address and gateway + diff --git a/networking/docker/plugin/ciao.json b/networking/docker/plugin/ciao.json new file mode 100644 index 000000000..9f022dc1b --- /dev/null +++ b/networking/docker/plugin/ciao.json @@ -0,0 +1,4 @@ +{ + "Name": "ciao", + "Addr": "http://127.0.0.1:9999" +} diff --git a/networking/docker/plugin/plugin.go b/networking/docker/plugin/plugin.go new file mode 100644 index 000000000..f418c4e35 --- /dev/null +++ b/networking/docker/plugin/plugin.go @@ -0,0 +1,810 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "net" + "net/http" + "os/exec" + "sync" + "time" + + "github.com/boltdb/bolt" + "github.com/docker/distribution/uuid" + "github.com/docker/libnetwork/drivers/remote/api" + ipamapi "github.com/docker/libnetwork/ipams/remote/api" + "github.com/golang/glog" + "github.com/gorilla/mux" +) + +type epVal struct { + Cveth string + Hveth string +} + +type nwVal struct { + Bridge string + Gateway net.IPNet +} + +var counter int + +var epMap struct { + sync.Mutex + m map[string]*epVal +} + +var nwMap struct { + sync.Mutex + m map[string]*nwVal +} + +var dbFile string +var db *bolt.DB + +func init() { + epMap.m = make(map[string]*epVal) + nwMap.m = make(map[string]*nwVal) + dbFile = "/tmp/bolt.db" +} + +func sendResponse(resp interface{}, w http.ResponseWriter) error { + rb, err := json.Marshal(resp) + glog.Infof("Sending response := %v, %v", resp, err) + fmt.Fprintf(w, "%s", rb) + return err +} + +func getBody(r *http.Request) ([]byte, error) { + body, err := ioutil.ReadAll(r.Body) + glog.Infof("URL [%s] Body [%s] Error [%v]", r.URL.Path[1:], string(body), err) + return body, err +} + +func handler(w http.ResponseWriter, r *http.Request) { + body, _ := getBody(r) + resp := api.Response{} + resp.Err = "Unhandled API request " + string(r.URL.Path[1:]) + " " + string(body) + sendResponse(resp, w) +} + +func handlerPluginActivate(w http.ResponseWriter, r *http.Request) { + getBody(r) + //TODO: Where is this encoding? + resp := `{ + "Implements": ["NetworkDriver", "IpamDriver"] +}` + fmt.Fprintf(w, "%s", resp) +} + +func handlerGetCapabilities(w http.ResponseWriter, r *http.Request) { + getBody(r) + resp := api.GetCapabilityResponse{Scope: "local"} + sendResponse(resp, w) +} + +func handlerCreateNetwork(w http.ResponseWriter, r *http.Request) { + resp := api.CreateNetworkResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.CreateNetworkRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + v, ok := req.Options["com.docker.network.generic"].(map[string]interface{}) + if !ok { + resp.Err = "Error: network options incorrect or unspecified. Please provide bridge info" + sendResponse(resp, w) + return + } + + bridge, ok := v["bridge"].(string) + if !ok { + resp.Err = "Error: network incorrect or unspecified. Please provide bridge info" + sendResponse(resp, w) + return + } + + nwMap.Lock() + defer nwMap.Unlock() + + //Record the docker network UUID to SDN bridge mapping + //This has to survive a plugin crash/restart and needs to be persisted + nwMap.m[req.NetworkID] = &nwVal{ + Bridge: bridge, + Gateway: *req.IPv4Data[0].Gateway, + } + + if err := dbAdd("nwMap", req.NetworkID, nwMap.m[req.NetworkID]); err != nil { + glog.Errorf("Unable to update db %v", err) + } + + //This will be done in the SDN controller before the API is invoked + cmd := "ip" + args := []string{"link", "add", bridge, "type", "bridge"} + if err := exec.Command(cmd, args...).Run(); err != nil { + glog.Infof("ERROR: [%v] [%v] [%v] ", cmd, args, err) + resp.Err = fmt.Sprintf("Error CreateNetwork: [%v] [%v] [%v]", + cmd, args, err) + sendResponse(resp, w) + return + } + + args = []string{"link", "set", bridge, "up"} + if err := exec.Command(cmd, args...).Run(); err != nil { + glog.Infof("ERROR: [%v] [%v] [%v] ", cmd, args, err) + resp.Err = fmt.Sprintf("Error CreateNetwork: [%v] [%v] [%v]", + cmd, args, err) + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerDeleteNetwork(w http.ResponseWriter, r *http.Request) { + resp := api.DeleteNetworkResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.DeleteNetworkRequest{} + if err = json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + glog.Infof("Delete Network := %v", req.NetworkID) + + //This would have already been done in the SDN controller + //Remove the UUID to bridge mapping in cache and in the + //persistent data store + nwMap.Lock() + bridge := nwMap.m[req.NetworkID].Bridge + delete(nwMap.m, req.NetworkID) + if err := dbDelete("nwMap", req.NetworkID); err != nil { + glog.Errorf("Unable to update db %v", err) + } + nwMap.Unlock() + + cmd := "ip" + args := []string{"link", "del", bridge} + if err := exec.Command(cmd, args...).Run(); err != nil { + glog.Infof("ERROR: [%v] [%v] [%v] ", cmd, args, err) + resp.Err = fmt.Sprintf("Error DeleteNetwork: [%v] [%v] [%v]", + cmd, args, err) + sendResponse(resp, w) + return + } + + sendResponse(resp, w) + + return +} + +func handlerEndpointOperInfof(w http.ResponseWriter, r *http.Request) { + resp := api.EndpointInfoResponse{} + body, err := getBody(r) + + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.EndpointInfoRequest{} + err = json.Unmarshal(body, &req) + + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerCreateEndpoint(w http.ResponseWriter, r *http.Request) { + resp := api.CreateEndpointResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.CreateEndpointRequest{} + if err = json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + nwMap.Lock() + bridge := nwMap.m[req.NetworkID].Bridge + nwMap.Unlock() + + epMap.Lock() + //These are setup by the SDN controller + counter++ + hVeth := fmt.Sprintf("hveth%d", counter) + cVeth := fmt.Sprintf("cveth%d", counter) + epMap.m[req.EndpointID] = &epVal{ + Cveth: cVeth, + Hveth: hVeth, + } + + if err := dbAdd("epMap", req.EndpointID, epMap.m[req.EndpointID]); err != nil { + glog.Errorf("Unable to update db %v", err) + } + if err := dbAdd("global", "counter", counter); err != nil { + glog.Errorf("Unable to update db %v", err) + } + epMap.Unlock() + + // This would have been done in the SDN controller + // Just update the cache and persistent data base + cmd := "ip" + args := []string{"link", "add", "dev", hVeth, "type", "veth", + "peer", "name", cVeth} + + if err := exec.Command(cmd, args...).Run(); err != nil { + glog.Infof("ERROR: [%v] [%v] [%v] ", cmd, args, err) + resp.Err = fmt.Sprintf("Error EndPointCreate: [%v] [%v] [%v]", + cmd, args, err) + sendResponse(resp, w) + return + } + + args = []string{"link", "set", hVeth, "mtu", "1400"} + if err := exec.Command(cmd, args...).Run(); err != nil { + glog.Infof("ERROR: [%v] [%v] [%v] ", cmd, args, err) + resp.Err = fmt.Sprintf("Error EndPointCreate: [%v] [%v] [%v]", + cmd, args, err) + sendResponse(resp, w) + return + } + + args = []string{"link", "set", cVeth, "mtu", "1400", "addr", req.Interface.MacAddress} + if err := exec.Command(cmd, args...).Run(); err != nil { + glog.Infof("ERROR: [%v] [%v] [%v] ", cmd, args, err) + resp.Err = fmt.Sprintf("Error EndPointCreate: [%v] [%v] [%v]", + cmd, args, err) + sendResponse(resp, w) + return + } + + args = []string{"link", "set", hVeth, "alias", hVeth + "_" + cVeth} + if err := exec.Command(cmd, args...).Run(); err != nil { + glog.Infof("ERROR: [%v] [%v] [%v] ", cmd, args, err) + resp.Err = fmt.Sprintf("Error EndPointCreate: [%v] [%v] [%v]", + cmd, args, err) + sendResponse(resp, w) + return + } + + args = []string{"link", "set", hVeth, "master", bridge} + if err := exec.Command(cmd, args...).Run(); err != nil { + glog.Infof("ERROR: [%v] [%v] [%v] ", cmd, args, err) + resp.Err = fmt.Sprintf("Error EndPointCreate: [%v] [%v] [%v]", + cmd, args, err) + sendResponse(resp, w) + return + } + + args = []string{"link", "set", hVeth, "up"} + if err := exec.Command(cmd, args...).Run(); err != nil { + glog.Infof("ERROR: [%v] [%v] [%v] ", cmd, args, err) + resp.Err = fmt.Sprintf("Error EndPointCreate: [%v] [%v] [%v]", + cmd, args, err) + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerDeleteEndpoint(w http.ResponseWriter, r *http.Request) { + resp := api.DeleteEndpointResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.DeleteEndpointRequest{} + if err = json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + epMap.Lock() + m := epMap.m[req.EndpointID] + delete(epMap.m, req.EndpointID) + if err := dbDelete("epMap", req.EndpointID); err != nil { + glog.Errorf("Unable to update db %v", err) + } + epMap.Unlock() + + //This will be done in the SDN controller once the + //container is deleted. However at this point there is + //a disconnect between the docker data base and SDN database + cmd := "ip" + args := []string{"link", "del", m.Hveth} + if err := exec.Command(cmd, args...).Run(); err != nil { + glog.Infof("ERROR: [%v] [%v] [%v] ", cmd, args, err) + resp.Err = fmt.Sprintf("Error EndPointDelete: [%v] [%v] [%v]", + cmd, args, err) + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerJoin(w http.ResponseWriter, r *http.Request) { + resp := api.JoinResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.JoinRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + nwMap.Lock() + epMap.Lock() + nm := nwMap.m[req.NetworkID] + em := epMap.m[req.EndpointID] + nwMap.Unlock() + epMap.Unlock() + + resp.Gateway = nm.Gateway.IP.String() + resp.InterfaceName = &api.InterfaceName{ + SrcName: em.Cveth, + DstPrefix: "eth", + } + sendResponse(resp, w) +} + +func handlerLeave(w http.ResponseWriter, r *http.Request) { + resp := api.LeaveResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.LeaveRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerDiscoverNew(w http.ResponseWriter, r *http.Request) { + resp := api.DiscoveryResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.DiscoveryNotification{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerDiscoverDelete(w http.ResponseWriter, r *http.Request) { + resp := api.DiscoveryResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.DiscoveryNotification{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerExternalConnectivity(w http.ResponseWriter, r *http.Request) { + resp := api.ProgramExternalConnectivityResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.ProgramExternalConnectivityRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerRevokeExternalConnectivity(w http.ResponseWriter, r *http.Request) { + resp := api.RevokeExternalConnectivityResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.RevokeExternalConnectivityResponse{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func ipamGetCapabilities(w http.ResponseWriter, r *http.Request) { + getBody(r) + resp := ipamapi.GetCapabilityResponse{RequiresMACAddress: true} + sendResponse(resp, w) +} + +func ipamGetDefaultAddressSpaces(w http.ResponseWriter, r *http.Request) { + resp := ipamapi.GetAddressSpacesResponse{} + getBody(r) + + resp.GlobalDefaultAddressSpace = "" + resp.LocalDefaultAddressSpace = "" + sendResponse(resp, w) +} + +func ipamRequestPool(w http.ResponseWriter, r *http.Request) { + resp := ipamapi.RequestPoolResponse{} + + body, err := getBody(r) + if err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := ipamapi.RequestPoolRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + resp.PoolID = uuid.Generate().String() + resp.Pool = req.Pool + sendResponse(resp, w) +} + +func ipamReleasePool(w http.ResponseWriter, r *http.Request) { + resp := ipamapi.ReleasePoolResponse{} + + body, err := getBody(r) + if err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := ipamapi.ReleasePoolRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func ipamRequestAddress(w http.ResponseWriter, r *http.Request) { + resp := ipamapi.RequestAddressResponse{} + + body, err := getBody(r) + if err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := ipamapi.RequestAddressRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + //TODO: Should come from the subnet mask for the subnet + if req.Address != "" { + resp.Address = req.Address + "/24" + } else { + //DOCKER BUG: The preferred address supplied in --ip does not show up. + //Bug fixed in docker 1.11 + resp.Error = "Error: Request does not have IP address. Specify using --ip" + } + sendResponse(resp, w) +} + +func ipamReleaseAddress(w http.ResponseWriter, r *http.Request) { + resp := ipamapi.ReleaseAddressResponse{} + + body, err := getBody(r) + if err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := ipamapi.ReleaseAddressRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func dbTableInit(tables []string) (err error) { + + glog.Infof("dbInit Tables := %v", tables) + for i, v := range tables { + glog.Infof("table[%v] := %v, %v", i, v, []byte(v)) + } + + err = db.Update(func(tx *bolt.Tx) error { + for _, table := range tables { + _, err := tx.CreateBucketIfNotExists([]byte(table)) + if err != nil { + return fmt.Errorf("Bucket creation error: %v %v", table, err) + } + } + return nil + }) + + if err != nil { + glog.Errorf("Table creation error %v", err) + } + + return err +} + +func dbAdd(table string, key string, value interface{}) (err error) { + + err = db.Update(func(tx *bolt.Tx) error { + var v bytes.Buffer + + if err := gob.NewEncoder(&v).Encode(value); err != nil { + glog.Errorf("Encode Error: %v %v", err, value) + return err + } + + bucket := tx.Bucket([]byte(table)) + if bucket == nil { + return fmt.Errorf("Bucket %v not found", table) + } + + err = bucket.Put([]byte(key), v.Bytes()) + if err != nil { + return fmt.Errorf("Key Store error: %v %v %v %v", table, key, value, err) + } + return nil + }) + + return err +} + +func dbDelete(table string, key string) (err error) { + + err = db.Update(func(tx *bolt.Tx) error { + + bucket := tx.Bucket([]byte(table)) + if bucket == nil { + return fmt.Errorf("Bucket %v not found", table) + } + + err = bucket.Delete([]byte(key)) + if err != nil { + return fmt.Errorf("Key Delete error: %v %v ", key, err) + } + return nil + }) + + return err +} + +func dbGet(table string, key string) (value interface{}, err error) { + + err = db.View(func(tx *bolt.Tx) error { + + bucket := tx.Bucket([]byte(table)) + if bucket == nil { + return fmt.Errorf("Bucket %v not found", table) + } + + val := bucket.Get([]byte(key)) + if val == nil { + return nil + } + + v := bytes.NewReader(val) + if err := gob.NewDecoder(v).Decode(value); err != nil { + glog.Errorf("Decode Error: %v %v %v", table, key, err) + return err + } + + return nil + }) + + return value, err +} + +func initDb() error { + + options := bolt.Options{ + Timeout: 3 * time.Second, + } + + var err error + db, err = bolt.Open(dbFile, 0644, &options) + if err != nil { + return fmt.Errorf("dbInit failed %v", err) + } + + tables := []string{"global", "nwMap", "epMap"} + if err := dbTableInit(tables); err != nil { + return fmt.Errorf("dbInit failed %v", err) + } + + c, err := dbGet("global", "counter") + if err != nil { + glog.Errorf("dbGet failed %v", err) + counter = 100 + } else { + var ok bool + counter, ok = c.(int) + if !ok { + counter = 100 + } + } + + err = db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("nwMap")) + + b.ForEach(func(k, v []byte) error { + vr := bytes.NewReader(v) + nVal := &nwVal{} + if err := gob.NewDecoder(vr).Decode(nVal); err != nil { + return fmt.Errorf("Decode Error: %v %v %v", string(k), string(v), err) + } + nwMap.m[string(k)] = nVal + glog.Infof("nwMap key=%v, value=%v\n", string(k), nVal) + return nil + }) + return nil + }) + + err = db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("epMap")) + + b.ForEach(func(k, v []byte) error { + vr := bytes.NewReader(v) + eVal := &epVal{} + if err := gob.NewDecoder(vr).Decode(eVal); err != nil { + return fmt.Errorf("Decode Error: %v %v %v", string(k), string(v), err) + } + epMap.m[string(k)] = eVal + glog.Infof("epMap key=%v, value=%v\n", string(k), eVal) + return nil + }) + return nil + }) + + return err +} + +func main() { + flag.Parse() + + if err := initDb(); err != nil { + glog.Fatalf("db init failed, quitting %v", err) + } + defer db.Close() + + r := mux.NewRouter() + r.HandleFunc("/Plugin.Activate", handlerPluginActivate) + r.HandleFunc("/NetworkDriver.GetCapabilities", handlerGetCapabilities) + r.HandleFunc("/NetworkDriver.CreateNetwork", handlerCreateNetwork) + r.HandleFunc("/NetworkDriver.DeleteNetwork", handlerDeleteNetwork) + r.HandleFunc("/NetworkDriver.CreateEndpoint", handlerCreateEndpoint) + r.HandleFunc("/NetworkDriver.DeleteEndpoint", handlerDeleteEndpoint) + r.HandleFunc("/NetworkDriver.EndpointOperInfo", handlerEndpointOperInfof) + r.HandleFunc("/NetworkDriver.Join", handlerJoin) + r.HandleFunc("/NetworkDriver.Leave", handlerLeave) + r.HandleFunc("/NetworkDriver.DiscoverNew", handlerDiscoverNew) + r.HandleFunc("/NetworkDriver.DiscoverDelete", handlerDiscoverDelete) + r.HandleFunc("/NetworkDriver.ProgramExternalConnectivity", handlerExternalConnectivity) + r.HandleFunc("/NetworkDriver.RevokeExternalConnectivity", handlerExternalConnectivity) + + r.HandleFunc("/IpamDriver.GetCapabilities", ipamGetCapabilities) + r.HandleFunc("/IpamDriver.GetDefaultAddressSpaces", ipamGetDefaultAddressSpaces) + r.HandleFunc("/IpamDriver.RequestPool", ipamRequestPool) + r.HandleFunc("/IpamDriver.ReleasePool", ipamReleasePool) + r.HandleFunc("/IpamDriver.RequestAddress", ipamRequestAddress) + r.HandleFunc("/IpamDriver.ReleaseAddress", ipamReleaseAddress) + + r.HandleFunc("/", handler) + http.ListenAndServe("127.0.0.1:9999", r) +} diff --git a/networking/libsnnet/README.md b/networking/libsnnet/README.md new file mode 100644 index 000000000..4c900b0bb --- /dev/null +++ b/networking/libsnnet/README.md @@ -0,0 +1,56 @@ +# Simple Node Network Library # + +## Overview ## + +The Simple Node Network Library (libsnnet) implements a simple SDN controller. +The library implements all the networking setup primitives required in Ciao. + +libsnnet currently provides the following capabilities +- Creation of isolated tenant overlay L2 networks +- Auto assignment of IP Addresses +- Support for Virtual Machine (QEMU)and Container workloads +- Ability to perform inbound and outbound NAT to/from the workloads + +It tries to rely on interfaces directly exposed by the kernel vs using user +space tools to ensure maximum portability. The implementation maintains state +on leaf node vs relying on centralized state. It also uses local state to +perform any network re-configuration in the event of a launcher crash or restart + +Currently the library supports creation of bridges, GRE tunnels, VM and Container +compatible interfaces (VNICs) on nodes. It also provides and the ability to +attach tunnels and vnics to bridges. + +The implementation also provides the ability to interconnect these bridges +across nodes creating L2 Overlay networks. + + +## Roles ## + +The library supports node specific networking initialization capabilities. +It currently supports setup of Compute Nodes (CN), Network Nodes (NN) and +Compute Node Concentrator Instances (CNCI) + +### Compute Node ### + +A compute node typically runs VM and Container workloads. The library provides +API's to perform network initialization as well as network interface creation +and overlay network linking. + +### Network Node ### + +The tenant overlay networks are linked together to Network Nodes. The Network +Node switch and route traffic between the tenant bridges and subnets distibuted +across multiple Compute Nodes. + +### CNCI ### + +Compute Node Concentrators or CNCIs are Virtual Machines running on +Network Nodes which handle subsets of traffic belonging to a single tenant. +A single network node can run multiple CNCI's limited by the Compute and +Network needs of the CNCIs. All tenant level switching and routing for +a given tenant is handled isolated from other tenants using the CNCI's. +The CNCIs also implement tenant specific firewall and NAT rules. In the future +they may be extended to perform traffic shaping. + + + diff --git a/networking/libsnnet/benchmark_test.go b/networking/libsnnet/benchmark_test.go new file mode 100644 index 000000000..df0bad197 --- /dev/null +++ b/networking/libsnnet/benchmark_test.go @@ -0,0 +1,210 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet_test + +import ( + "net" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +//Benchmarks Worst case latency of VNIC creation +// +//BenchmarkComputeNodeWorstCase measures the time is takes +//to instantiate a VNIC on a node that does not have that +//tenant subnet present +//This means that there will be bridge created, a GRE tunnel +//created and a tap inteface created and they are all linked +//to one another. Additionally a SSNTP event is also generated +//Based on current observation most of the time is spent in the +//kernel processing the netlink message +//To ensure that we do not pollute the test system we delete +//the VNIC. +//Hence the benchmarked time includes the time it takes to +//create and delete the VNIC (not just create). +//However the deletes are more efficent than creates +//This does not truly measure the cost of synchrnoization +//when multiple launcher threads are creating VNIC simulatenously. +//However based on current measurements the cost of a channel based +//sync is about 10ms (for a uncontended channel). The mutex is almost +//free when un-contended +// +//Test should pass ok +func BenchmarkComputeNodeWorstCase(b *testing.B) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + + _, net1, _ := net.ParseCIDR("127.0.0.0/24") //Add this so that init will pass + _, net2, _ := net.ParseCIDR("193.168.1.0/24") + _, net3, _ := net.ParseCIDR("10.3.66.0/24") + + //From YAML, on agent init + mgtNet := []net.IPNet{*net1, *net2, *net3} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + b.Fatal("cn.Init failed", err) + } + + /* Pollutes the benchmark */ + /* + if err := cn.DbRebuild(nil); err != nil { + b.Fatal("cn.dbRebuild failed") + } + */ + + //From YAML on instance init + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + vnicCfg := &libsnnet.VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 100), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: mac, + Subnet: *net2, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + for i := 0; i < b.N; i++ { + + if vnic, ssntpEvent, err := cn.CreateVnic(vnicCfg); err != nil { + b.Error("cn.CreateVnic failed", err) + } else { + //We expect a bridge creation event + if ssntpEvent == nil { + b.Error("cn.CreateVnic expected event", vnic, ssntpEvent) + } + } + + if ssntpEvent, err := cn.DestroyVnic(vnicCfg); err != nil { + b.Error("cn.DestroyVnic failed", err) + } else { + //We expect a bridge deletion event + if ssntpEvent == nil { + b.Error("cn.DestroyVnic expected event") + } + } + } +} + +//Benchmarks best case VNIC creation latency +// +//BenchmarkComputeNodeWorstCase measures the time is takes +//to instantiate a VNIC on a node that already has that +//tenant subnet present +//Hence this is just the cost to create the tap and link +//it to the brigde. +//As mentioned before this also deletes the VNIC. +//Hence the cost includes the cost to create and delete the VNIC +// +//Test should pass OK +func BenchmarkComputeNodeBestCase(b *testing.B) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + + _, net1, _ := net.ParseCIDR("127.0.0.0/24") //Add this so that init will pass + _, net2, _ := net.ParseCIDR("193.168.1.0/24") + _, net3, _ := net.ParseCIDR("10.3.66.0/24") + + //From YAML, on agent init + mgtNet := []net.IPNet{*net1, *net2, *net3} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + b.Fatal("cn.Init failed", err) + } + + if err := cn.DbRebuild(nil); err != nil { + b.Fatal("cn.dbRebuild failed") + } + + //From YAML on instance init + macSeed, _ := net.ParseMAC("CA:FE:00:01:02:ED") + vnicCfgSeed := &libsnnet.VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 11), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: macSeed, + Subnet: *net2, + SubnetKey: 0xF, + VnicID: "vuuidseed", + InstanceID: "iuuidseed", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + vnicCfg := &libsnnet.VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 100), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: mac, + Subnet: *net2, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + if vnic, ssntpEvent, err := cn.CreateVnic(vnicCfgSeed); err != nil { + b.Error("cn.CreateVnic seed failed", err, vnic, ssntpEvent) + } + + for i := 0; i < b.N; i++ { + + if vnic, ssntpEvent, err := cn.CreateVnic(vnicCfg); err != nil { + b.Error("cn.CreateVnic failed", err, vnic) + } else { + if ssntpEvent != nil { + b.Error("cn.CreateVnic unexpected event", vnic, ssntpEvent) + } + } + + if ssntpEvent, err := cn.DestroyVnic(vnicCfg); err != nil { + b.Error("cn.DestroyVnic failed", err) + } else { + if ssntpEvent != nil { + b.Error("cn.DestroyVnic unexpected event", ssntpEvent) + } + } + } + + if ssntpEvent, err := cn.DestroyVnic(vnicCfgSeed); err != nil { + b.Error("cn.DestroyVnic seed failed", err, ssntpEvent) + } +} diff --git a/networking/libsnnet/bridge.go b/networking/libsnnet/bridge.go new file mode 100644 index 000000000..635094921 --- /dev/null +++ b/networking/libsnnet/bridge.go @@ -0,0 +1,184 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +// NewBridge is used to initialize the bridge properties +// This has to be called prior to Create() or GetDevice() +func NewBridge(id string) (*Bridge, error) { + bridge := &Bridge{} + bridge.Link = &netlink.Bridge{} + bridge.GlobalID = id //TODO: Add other parameters + return bridge, nil +} + +// GetDevice associates the bridge with an existing bridge with that GlobalId. +// If there are multiple bridges incorrectly created with the same id, it will +// associate the bridge with the first +func (b *Bridge) GetDevice() error { + + if b.GlobalID == "" { + return netError(b, "GetDevice: unnamed bridge") + } + + link, err := netlink.LinkByAlias(b.GlobalID) + + if err != nil { + return netError(b, "GetDevice: link by alias %v", err) + } + + brl, ok := link.(*netlink.Bridge) + if !ok { + return netError(b, "GetDevice: incorrect interface type %v %v", b.GlobalID, link.Type()) + } + + b.Link = brl + b.LinkName = brl.Name + return nil +} + +// Create instantiates a new bridge. +func (b *Bridge) Create() error { + + if b.GlobalID == "" { + return netError(b, "create an unnamed bridge") + } + + var err error + + if b.LinkName == "" { + if b.LinkName, err = GenIface(b, true); err != nil { + return netError(b, "create %v", err) + } + + if _, err := netlink.LinkByAlias(b.GlobalID); err == nil { + return netError(b, "create interface exists: %v %v", b.GlobalID, b.LinkName) + } + } + + bridge := &netlink.Bridge{LinkAttrs: netlink.LinkAttrs{Name: b.LinkName}} + + if err := netlink.LinkAdd(bridge); err != nil { + return netError(b, "create link add %v %v", b.GlobalID, err) + } + + link, err := netlink.LinkByName(b.LinkName) + if err != nil { + return netError(b, "create LinkByName %v %v", b.GlobalID, err) + } + + brl, ok := link.(*netlink.Bridge) + if !ok { + return netError(b, "create incorrect interface type %v %v", b.GlobalID, link) + } + + b.Link = brl + if err := b.setAlias(b.GlobalID); err != nil { + b.Destroy() + return netError(b, "create set alias %v", err) + } + + return nil +} + +// Destroy an existing bridge +func (b *Bridge) Destroy() error { + if b.Link == nil || b.Link.Index == 0 { + return netError(b, "destroy bridge unnitialized") + } + + if err := netlink.LinkDel(b.Link); err != nil { + return netError(b, "destroy bridge %v", err) + } + return nil +} + +// Enable the bridge +func (b *Bridge) Enable() error { + if b.Link == nil || b.Link.Index == 0 { + return netError(b, "enable bridge unnitialized") + } + + if err := netlink.LinkSetUp(b.Link); err != nil { + return netError(b, "enable link set up", err) + } + + return nil +} + +// Disable the bridge +func (b *Bridge) Disable() error { + if b.Link == nil || b.Link.Index == 0 { + return netError(b, "disable bridge unnitialized") + } + + if err := netlink.LinkSetDown(b.Link); err != nil { + return netError(b, "disable link set down %v", err) + } + + return nil +} + +// AddIP Adds an IP Address to the bridge +func (b *Bridge) AddIP(ip *net.IPNet) error { + if b.Link == nil || b.Link.Index == 0 { + return netError(b, "add ip bridge unnitialized") + } + + addr := &netlink.Addr{IPNet: ip} + + if err := netlink.AddrAdd(b.Link, addr); err != nil { + return netError(b, "assigning IP address to bridge %v %v", addr.String(), err) + } + + return nil +} + +// DelIP Deletes an IP Address assigned to the bridge +func (b *Bridge) DelIP(ip *net.IPNet) error { + + if b.Link == nil || b.Link.Index == 0 { + return netError(b, "del ip bridge unnitialized") + } + + addr := &netlink.Addr{IPNet: ip} + + if err := netlink.AddrDel(b.Link, addr); err != nil { + return netError(b, "deleting IP address from bridge %v %v", addr.String(), err) + } + + return nil +} + +// setAlias sets up the alias on the device +func (b *Bridge) setAlias(alias string) error { + + if b.Link == nil || b.Link.Index == 0 { + return netError(b, "set alias bridge unnitialized") + } + + if err := netlink.LinkSetAlias(b.Link, alias); err != nil { + return netError(b, "setting alias on bridge %v %v", alias, err) + } + + return nil +} diff --git a/networking/libsnnet/bridge_test.go b/networking/libsnnet/bridge_test.go new file mode 100644 index 000000000..4946fb141 --- /dev/null +++ b/networking/libsnnet/bridge_test.go @@ -0,0 +1,155 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet_test + +import ( + "strings" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +//Test all Bridge primitives +// +//Tests creation, attach, enable, disable and destroy +//of a bridge interface. Any failure indicates a problem +//with the netlink library or kernel API +// +//Test is expected to pass +func TestBridge_Basic(t *testing.T) { + + bridge, _ := libsnnet.NewBridge("go_testbr") + + if err := bridge.Create(); err != nil { + t.Errorf("Bridge creation failed: %v", err) + } + + bridge1, _ := libsnnet.NewBridge("go_testbr") + + if err := bridge1.GetDevice(); err != nil { + t.Errorf("Bridge Get Device failed: %v", err) + } + + if err := bridge.Enable(); err != nil { + t.Errorf("Bridge enable failed: %v", err) + } + + if err := bridge.Disable(); err != nil { + t.Errorf("Bridge enable failed: %v", err) + } + + if err := bridge.Destroy(); err != nil { + t.Errorf("Bridge deletion failed: %v", err) + } + +} + +//Duplicate bridge detection +// +//Checks that duplicate bridge creation is handled +//gracefully and correctly +// +//Test is expected to pass +func TestBridge_Dup(t *testing.T) { + bridge, _ := libsnnet.NewBridge("go_testbr") + + if err := bridge.Create(); err != nil { + t.Errorf("Bridge creation failed: %v", err) + } + + defer bridge.Destroy() + + bridge1, _ := libsnnet.NewBridge("go_testbr") + if err := bridge1.Create(); err == nil { + t.Errorf("Duplicate Bridge creation: %v", err) + } + +} + +//Negative test cases for bridge primitives +// +//Checks various negative test scenarios are gracefully +//handled +// +//Test is expected to pass +func TestBridge_Invalid(t *testing.T) { + bridge, err := libsnnet.NewBridge("go_testbr") + + if err = bridge.GetDevice(); err == nil { + t.Errorf("Non existing bridge: %v", bridge) + } + + if !strings.HasPrefix(err.Error(), "bridge error") { + t.Errorf("Invalid error format %v", err) + } + + if err = bridge.Destroy(); err == nil { + t.Errorf("Uninitialized call: %v", err) + } + + if !strings.HasPrefix(err.Error(), "bridge error") { + t.Errorf("Invalid error format %v", err) + } + + if err = bridge.Enable(); err == nil { + t.Errorf("Uninitialized call: %v", err) + } + + if !strings.HasPrefix(err.Error(), "bridge error") { + t.Errorf("Invalid error format %v", err) + } + + if err = bridge.Disable(); err == nil { + t.Errorf("Uninitialized call: %v", err) + } + + if !strings.HasPrefix(err.Error(), "bridge error") { + t.Errorf("Invalid error format %v", err) + } +} + +//Tests attaching to an existing bridge +// +//Tests that you can attach to an existing bridge +//and perform all bridge operation on such a bridge +// +//Test is expected to pass +func TestBridge_GetDevice(t *testing.T) { + bridge, _ := libsnnet.NewBridge("go_testbr") + + if err := bridge.Create(); err != nil { + t.Errorf("Bridge creation failed: %v", err) + } + + bridge1, _ := libsnnet.NewBridge("go_testbr") + + if err := bridge1.GetDevice(); err != nil { + t.Errorf("Bridge Get Device failed: %v", err) + } + + if err := bridge1.Enable(); err != nil { + t.Errorf("Uninitialized call: %v", err) + } + + if err := bridge1.Disable(); err != nil { + t.Errorf("Uninitialized call: %v", err) + } + + if err := bridge1.Destroy(); err != nil { + t.Errorf("Bridge destroy failed: %v", err) + } +} diff --git a/networking/libsnnet/cn.go b/networking/libsnnet/cn.go new file mode 100644 index 000000000..7a1dca74e --- /dev/null +++ b/networking/libsnnet/cn.go @@ -0,0 +1,1104 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/vishvananda/netlink" +) + +//CnTimeout specifies the amount of time the API will wait for netlink +//operations to complete. When multiple threads and invoking the API +//simulatenously the APIs may take time to return due to the need to +//serialize certian netlink calls +var CnTimeout int64 = 60 + +// NetworkConfig from YAML. +// This is a subset of the top level data center configuration +type NetworkConfig struct { + ManagementNet []net.IPNet // Enumerates all possible management subnets + ComputeNet []net.IPNet // Enumerates all possible compute subnets + Mode NetworkMode //The data center networking mode +} + +// VnicConfig fram YAML +// All these fields originate from the Controller +type VnicConfig struct { + VnicRole + VnicIP net.IP + ConcIP net.IP + VnicMAC net.HardwareAddr + MTU int + SubnetKey int //optional: Currently set to SubnetIP + Subnet net.IPNet + VnicID string // UUID + InstanceID string // UUID + TenantID string // UUID + SubnetID string // UUID + ConcID string // UUID +} + +// CNSsntpEvent to be generated +type CNSsntpEvent int + +const ( + //SsntpNone : Non event + SsntpNone CNSsntpEvent = 0 + //SsntpTunAdd : Local tunnel added, remote tunnel add required + SsntpTunAdd CNSsntpEvent = 1 << iota + //SsntpTunDel : Local tunnel deleted, remote tunnel delete required + SsntpTunDel +) + +// SsntpEventInfo contains the event info that needs to be +// converted to YAML payload and sent to the CNCI via the scheduler +type SsntpEventInfo struct { + Event CNSsntpEvent // TYPE: SSNTP Type + CnciIP string // TO: IP Address of the concentrator + CnIP string // FROM: Compute Network IP for this node + Subnet string // Tenant Subnet + TenantID string // Tenant UUID + SubnetID string // Tenant Subnet UUID + ConcID string // CNCI UUID + CnID string // CN UUID + SubnetKey int + containerSubnetID string // Logical name of the container network. + // Hack: Will be removed once we drop deprecated APIs +} + +// CNContainerEvent to be generated +type CNContainerEvent int + +const ( + //ContainerNetworkInfo Informative, no action needs to be taken by caller + ContainerNetworkInfo CNContainerEvent = 0 + //ContainerNetworkAdd Caller has to create the logical docker network before starting + //a container with this VNIC + ContainerNetworkAdd CNContainerEvent = 1 << iota + //ContainerNetworkDel Caller is responsible for logical docker network deletion + //The corresponding physical network no longer exists + ContainerNetworkDel +) + +//ContainerInfo provides details that needed by docker to create the container +//associated with this VNIC +type ContainerInfo struct { + CNContainerEvent + SubnetID string + Subnet net.IPNet + Gateway net.IP + Bridge string +} + +type linkInfo struct { + index int + name string + ready chan struct{} +} + +//Network topology of the node +//The linkMap is always authoritative +type cnTopology struct { + sync.Mutex + bridgeMap map[string]map[string]bool //Bridge to VNIC mapping + linkMap map[string]*linkInfo //Alias to Link mapping + nameMap map[string]bool //Link name + containerMap map[string]bool //Bridge to container mapping +} + +func newCnTopology() (topology *cnTopology) { + topology = &cnTopology{} + initCnTopology(topology) + return topology +} + +func initCnTopology(topology *cnTopology) { + topology.bridgeMap = make(map[string]map[string]bool) + topology.linkMap = make(map[string]*linkInfo) + topology.nameMap = make(map[string]bool) + topology.containerMap = make(map[string]bool) +} + +// ComputeNode describes the high level networking setup of a compute node. +// The design allows for multiple links, however in phase 0 only the first +// link is chosen. The remaining are ignored. In the future this allows for +// backup links or link aggregation or failover +type ComputeNode struct { + ID string //UUID of CN + *NetworkConfig + MgtAddr []netlink.Addr + MgtLink []netlink.Link + ComputeAddr []netlink.Addr + ComputeLink []netlink.Link + *cnTopology +} + +// Init sets the CN node configuration +// Discovers the physical interfaces and classifies them as management or compute +// Performs any node specific networking setup. +func (cn *ComputeNode) Init() error { + + links, err := netlink.LinkList() + + if err != nil { + return NewAPIError("CN node init failed " + err.Error()) + } + + phyInterfaces := 0 + cn.MgtAddr = nil + cn.MgtLink = nil + cn.ComputeAddr = nil + cn.ComputeLink = nil + + for _, link := range links { + + if link.Type() != "device" && + link.Type() != "bond" && + link.Type() != "vlan" { + continue + } + + if link.Attrs().Name == "lo" { + continue + } + + addrs, err := netlink.AddrList(link, netlink.FAMILY_V4) + if err != nil || len(addrs) == 0 { + continue //Should be safe to ignore this + } + + phyInterfaces++ + + for _, addr := range addrs { + + if cn.ManagementNet == nil { + cn.MgtAddr = append(cn.MgtAddr, addr) + cn.MgtLink = append(cn.MgtLink, link) + } else { + for _, mgt := range cn.ManagementNet { + if mgt.Contains(addr.IPNet.IP) { + cn.MgtAddr = append(cn.MgtAddr, addr) + cn.MgtLink = append(cn.MgtLink, link) + } + } + } + + if cn.ComputeNet == nil { + cn.ComputeAddr = append(cn.ComputeAddr, addr) + cn.ComputeLink = append(cn.ComputeLink, link) + } else { + for _, comp := range cn.ComputeNet { + if comp.Contains(addr.IPNet.IP) { + cn.ComputeAddr = append(cn.ComputeAddr, addr) + cn.ComputeLink = append(cn.ComputeLink, link) + } + } + } + + } + } + + if len(cn.MgtAddr) < 1 { + return NewAPIError(fmt.Sprintf("unable to associate with management network %v", cn.ManagementNet)) + } + if len(cn.ComputeAddr) < 1 { + return NewAPIError(fmt.Sprintf("unable to associate with compute network %v", cn.ComputeNet)) + } + + if (cn.ManagementNet == nil || cn.ComputeNet == nil) && phyInterfaces > 1 { + return fmt.Errorf("unable to autoconfigure network") + } + + //TODO: Support all modes + if cn.Mode != GreTunnel { + return NewAPIError(fmt.Sprintf("Unsupported network mode %v", cn.Mode)) + } + + cn.cnTopology = newCnTopology() + + return nil +} + +type vnicAliases struct { + bridge string + vnic string + vnicPeer string + gre string +} + +const ( + bridgePrefix = "br_" + vnicPrefix = "vnic_" + grePrefix = "gre_" + cnciVnicPrefix = "cncivnic_" +) + +func (cn *ComputeNode) genCnciVnicAlias(cfg *VnicConfig) string { + return fmt.Sprintf("%s%s_%s", cnciVnicPrefix, + cfg.TenantID, + cfg.VnicID) + +} + +func (cn *ComputeNode) checkCnciVnicCfg(cfg *VnicConfig) error { + + switch { + case cfg.TenantID == "": + return fmt.Errorf("Invalid CNCI VNIC configuration = TenantID") + case cfg.VnicID == "": + return fmt.Errorf("Invalid CNCI VNIC configuration = VnicID") + } + + return nil +} + +func genCnVnicAliases(cfg *VnicConfig) *vnicAliases { + + vnic := &vnicAliases{} + + vnic.bridge = fmt.Sprintf("%s%s_%s_%s_%s", bridgePrefix, + cfg.TenantID, + cfg.SubnetID, + cfg.ConcID, + cfg.ConcIP) + + vnic.gre = fmt.Sprintf("%s%s_%s_%s_%s", grePrefix, + cfg.TenantID, + cfg.SubnetID, + cfg.ConcID, + cfg.ConcIP) + + vnic.vnic = fmt.Sprintf("%s%s_%s_%s_%s##%s", vnicPrefix, + cfg.TenantID, + cfg.SubnetID, + cfg.ConcID, + cfg.ConcIP, + cfg.VnicIP) + + return vnic +} + +func checkCnVnicCfg(cfg *VnicConfig) error { + + switch { + case cfg.TenantID == "": + return fmt.Errorf("Invalid VNIC configuration - TenantID") + case cfg.SubnetID == "": + return fmt.Errorf("Invalid VNIC configuration - SubnetID") + case cfg.ConcID == "": + return fmt.Errorf("Invalid VNIC configuration - ConcID") + case cfg.ConcIP == nil: + return fmt.Errorf("Invalid VNIC configuration - ConcIP") + case cfg.VnicID == "": + return fmt.Errorf("Invalid VNIC configuration - VnicID") + case cfg.VnicMAC == nil: + return fmt.Errorf("Invalid VNIC configuration - VnicID") + case cfg.VnicRole != TenantVM && cfg.VnicRole != TenantContainer: + return fmt.Errorf("Invalid vnic role %v", cfg) + } + + return nil +} + +type dbOp int + +const ( + dbInsVnic dbOp = 1 << iota + dbDelVnic + dbInsBr + dbDelBr + dbInsIf + dbDelIf +) + +//DbRebuild the CN network database using the information contained +//in the aliases. It can be called if the agent using the library +//crashes and loses network topology information. +//It can also be called, to rebuild the network topology on demand. +func (cn *ComputeNode) DbRebuild(links []netlink.Link) error { + + if cn.NetworkConfig == nil || cn.cnTopology == nil { + return NewAPIError(fmt.Sprintf("CN has not been initialized %v", cn)) + } + + links, err := netlink.LinkList() + if err != nil { + return NewFatalError("Cannot retrieve links" + err.Error()) + } + + cn.cnTopology.Lock() + defer cn.cnTopology.Unlock() + + initCnTopology(cn.cnTopology) + + //Add the bridges first, vnics added later as we + //do not control the order of link discovery + for _, link := range links { + alias := link.Attrs().Alias + name := link.Attrs().Name + cn.nameMap[name] = true + + if alias == "" { + continue + } + + cn.linkMap[alias] = &linkInfo{ + index: link.Attrs().Index, + name: name, + ready: make(chan struct{}), + } + defer close(cn.linkMap[alias].ready) + + if link.Type() == "bridge" { + if _, err := cn.dbUpdate(alias, "", dbInsBr); err != nil { + return NewFatalError("db rebuild " + err.Error()) + } + } + } + + //Now build the vnic maps, inefficient but simple + //This allows us to check if the bridges and tunnels are all present + for _, link := range links { + if alias := link.Attrs().Alias; alias != "" { + if strings.HasPrefix(alias, vnicPrefix) { + vnic := alias + id := strings.TrimPrefix(vnic, vnicPrefix) + id = strings.Split(id, "##")[0] + bridge := bridgePrefix + id + gre := grePrefix + id + if _, err := cn.dbUpdate(bridge, vnic, dbInsVnic); err != nil { + return NewFatalError("db rebuild: add vnic" + err.Error()) + } + if _, ok := cn.linkMap[gre]; !ok { + return NewFatalError("db rebuild: missing gre tunnel " + gre) + } + if link.Type() == "veth" { + cn.containerMap[bridge] = true + } + } + } + } + + return nil +} + +func (cn *ComputeNode) dbUpdate(bridge string, vnic string, op dbOp) (int, error) { + + switch { + + case (op & dbInsBr) == dbInsBr: + vnicMap, present := cn.bridgeMap[bridge] + if present { + return -1, fmt.Errorf("db duplicate bridge %v %v", op, bridge) + } + vnicMap = make(map[string]bool) + cn.bridgeMap[bridge] = vnicMap + return len(cn.bridgeMap), nil + + case (op & dbDelBr) == dbDelBr: + _, present := cn.bridgeMap[bridge] + if !present { + return -1, fmt.Errorf("db missing bridge %v", bridge) + } + delete(cn.bridgeMap, bridge) + return len(cn.bridgeMap), nil + + case (op & dbInsVnic) == dbInsVnic: + vnicMap, present := cn.bridgeMap[bridge] + if !present { + return -1, fmt.Errorf("db missing bridge %v %v", op, bridge) + } + + _, present = vnicMap[vnic] + if present { + return -1, fmt.Errorf("db duplicate vnic %v %v", op, vnic) + } + vnicMap[vnic] = true + return len(vnicMap), nil + + case (op & dbDelVnic) == dbDelVnic: + vnicMap, present := cn.bridgeMap[bridge] + if !present { + return -1, fmt.Errorf("db missing bridge %v", bridge) + } + + _, present = vnicMap[vnic] + if !present { + return -1, fmt.Errorf("db missing vnic %v", vnic) + } + delete(vnicMap, vnic) + return len(vnicMap), nil + + default: + return -1, fmt.Errorf("db invalid op %v %v", op, vnic) + } +} + +func (cn *ComputeNode) genLinkName(device interface{}) (string, error) { + for i := 0; i < ifaceRetryLimit; { + name, _ := GenIface(device, false) + if !cn.nameMap[name] { + cn.nameMap[name] = true + return name, nil + } + } + return "", fmt.Errorf("Unable to generate unique device name") +} + +// CreateCnciVnic creates a Cnci VNIC and sets all the underlying framework +// to ensure that the Vnic is active. The Cnci VNIC will bind to the first +// compute network interface. +func (cn *ComputeNode) CreateCnciVnic(cfg *VnicConfig) (*CnciVnic, error) { + + if cn.cnTopology == nil || cfg == nil || cfg.VnicRole != DataCenter { + return nil, NewAPIError("invalid vnic or configuration") + } + + if err := cn.checkCnciVnicCfg(cfg); err != nil { + return nil, NewAPIError(err.Error()) + } + + cvnic, err := NewCnciVnic(cn.genCnciVnicAlias(cfg)) + if err != nil { + return nil, NewAPIError(err.Error()) + } + cvnic.MACAddr = &cfg.VnicMAC + cvnic.Link.ParentIndex = cn.ComputeLink[0].Attrs().Index + + // CS Start + cn.cnTopology.Lock() + + if vLink, present := cn.linkMap[cvnic.GlobalID]; present { + cn.cnTopology.Unlock() + + cvnic.LinkName, cvnic.Link.Index, err = waitForDeviceReady(vLink) + if err != nil { + return nil, NewFatalError(cvnic.GlobalID + err.Error()) + } + return cvnic, nil + } + + if cvnic.LinkName, err = cn.genLinkName(cvnic); err != nil { + cn.cnTopology.Unlock() + return nil, NewFatalError("Unable to generate unique cvnic name") + } + + cn.linkMap[cvnic.GlobalID] = &linkInfo{ + name: cvnic.LinkName, + ready: make(chan struct{}), + } + defer close(cn.linkMap[cvnic.GlobalID].ready) + + cn.cnTopology.Unlock() + // CS End + + if err := cvnic.Create(); err != nil { + return nil, NewFatalError(err.Error()) + } + if err := cvnic.Enable(); err != nil { + return nil, NewFatalError(err.Error()) + } + + cn.linkMap[cvnic.GlobalID].index = cvnic.Link.Index + + // Now the network is ready and you can create a VM and launch it with this vnic + // vnic.Name is the interface name, the instanceMAC is the MAC Address + // qemu-system-x86_64 ... + //-net nic,model=virtio,macaddr=$(< /sys/class/net//address) \ + //-net tap,fd=3 3<>/dev/tap$(< /sys/class/net//ifindex) + return cvnic, nil + +} + +// DestroyCnciVnic destroys a Cnci VNIC. +func (cn *ComputeNode) DestroyCnciVnic(cfg *VnicConfig) error { + + if cn.cnTopology == nil || cfg == nil || cfg.VnicRole != DataCenter { + return NewAPIError("invalid vnic or configuration") + } + + if err := cn.checkCnciVnicCfg(cfg); err != nil { + return NewAPIError(err.Error()) + } + + cvnic, err := NewCnciVnic(cn.genCnciVnicAlias(cfg)) + if err != nil { + return NewAPIError(err.Error()) + } + + //Start CS + cn.cnTopology.Lock() + defer cn.cnTopology.Unlock() + + vLink, present := cn.linkMap[cvnic.GlobalID] + if !present { + return nil + } + + cvnic.LinkName, cvnic.Link.Index, err = waitForDeviceReady(vLink) + if err != nil { + return NewFatalError(cvnic.GlobalID + err.Error()) + } + delete(cn.linkMap, cvnic.GlobalID) + delete(cn.nameMap, cvnic.LinkName) + + if err := cvnic.Destroy(); err != nil { + return NewFatalError(err.Error()) + } + + return nil +} + +// CreateVnicV2 creates a tenant VNIC that can be used by containers +// or VMs +// This will replace CreateVnic +// +// If this is the first instance of a container Vnic belonging to the +// tenant on this subnet will provide the ContainerInfo which +// should be used by the caller of this API to logically create the Network +// in the Docker network database +// This is typically done using the command line or API equivalen of +// docker docker network create -d=ciao --ipam-driver=ciao +// --subnet= --gateway= +// --opt "bridge"= ContainerInfo.SubnetID +// +// If this is the first instance of a Vnic belonging to the tenant, +// will provide a SSNTP message to be sent to the Scheduler to notify the +// CNCI of this instantitation. This message is processed by the CNCI which +// will setup the far side of the tunnel which is required to connect this CN +// tenant bridge to the tenant Subnet +// +// Note: The caller of this function is responsible to send the message to the scheduler +func (cn *ComputeNode) CreateVnicV2(cfg *VnicConfig) (*Vnic, *SsntpEventInfo, *ContainerInfo, error) { + /* TODO: Need to figure out a better way to set MTU for containers */ + if cfg.VnicRole == TenantContainer { + if cfg.MTU == 0 { + cfg.MTU = 1400 + } + } + return cn.createVnicInternal(cfg) +} + +// CreateVnic creates a tenant VM VNIC and sets all the underlying framework +// +// This version of the API has been deprecated +// +// to ensure that the Vnic is active. In addition if this is the first instance +// of the Vnic belonging to the tenant, will provide a SSNTP message to be +// sent to the Scheduler to notify the CNCI of this instantitation. This +// message is processed by the CNCI which will setup the far side of the +// tunnel which is required to connect this CN tenant bridge to the tenant Subnet +// Note: The caller of this function is responsible to send the message to the scheduler +func (cn *ComputeNode) CreateVnic(cfg *VnicConfig) (*Vnic, *SsntpEventInfo, error) { + if cfg.VnicRole != TenantVM { + return nil, nil, NewAPIError("invalid vnic role") + } + v, s, _, err := cn.createVnicInternal(cfg) + return v, s, err +} + +func (cn *ComputeNode) createVnicInternal(cfg *VnicConfig) (*Vnic, *SsntpEventInfo, *ContainerInfo, error) { + var gLink *linkInfo + var cInfo *ContainerInfo + + if cn.cnTopology == nil || cfg == nil { + return nil, nil, nil, NewAPIError("invalid vnic or configuration") + } + + if err := checkCnVnicCfg(cfg); err != nil { + return nil, nil, nil, NewAPIError("invalid vnic or configuration") + } + + if err := checkCnVnicCfg(cfg); err != nil { + return nil, nil, nil, NewAPIError(err.Error()) + } + alias := genCnVnicAliases(cfg) + + bridge, err := NewBridge(alias.bridge) + if err != nil { + return nil, nil, nil, NewAPIError(err.Error()) + } + + var vnic *Vnic + switch cfg.VnicRole { + case TenantVM: + vnic, err = NewVnic(alias.vnic) + case TenantContainer: + vnic, err = NewContainerVnic(alias.vnic) + } + if err != nil { + return nil, nil, nil, NewAPIError(err.Error()) + } + vnic.MACAddr = &cfg.VnicMAC + vnic.MTU = cfg.MTU + + local := cn.ComputeAddr[0].IPNet.IP + gre, err := NewGreTunEP(alias.gre, local, cfg.ConcIP, uint32(cfg.SubnetKey)) + if err != nil { + return nil, nil, nil, NewAPIError(err.Error()) + } + + // CS Start + cn.cnTopology.Lock() + + vLink, present := cn.linkMap[vnic.GlobalID] + if present { + bLink, present := cn.linkMap[bridge.GlobalID] + cn.cnTopology.Unlock() + + vnic.LinkName, vnic.Link.Attrs().Index, err = waitForDeviceReady(vLink) + if err != nil { + return nil, nil, nil, NewFatalError(vnic.GlobalID + err.Error()) + } + if cfg.VnicRole == TenantVM { + return vnic, nil, nil, nil + } + + //Retrieve the bridge for the VNIC, which should already exist + //This is not strictly needed, but helps the caller identify the container + //network ID without resorting to any sort of caching + if !present { + return nil, nil, nil, NewFatalError(vnic.GlobalID + " Bridge not present") + } + bridge.LinkName, bridge.Link.Attrs().Index, err = waitForDeviceReady(bLink) + if err != nil { + return nil, nil, nil, NewFatalError(vnic.GlobalID + err.Error()) + } + + cInfo := getContainerInfo(cfg, vnic, bridge) + return vnic, nil, cInfo, nil + } + + if err := cn.logicallyCreateVnic(vnic); err != nil { + cn.cnTopology.Unlock() + return nil, nil, nil, NewFatalError(err.Error()) + } + vLink = cn.linkMap[vnic.GlobalID] + defer close(vLink.ready) + + bLink, present := cn.linkMap[bridge.GlobalID] + if present { + if _, err := cn.dbUpdate(bridge.GlobalID, vnic.GlobalID, dbInsVnic); err != nil { + cn.cnTopology.Unlock() + return nil, nil, nil, NewFatalError(err.Error()) + } + + var needsContainerNetwork bool + if vnic.Role == TenantContainer && !cn.containerMap[bridge.GlobalID] { + cn.containerMap[bridge.GlobalID] = true + needsContainerNetwork = true + } + + cn.cnTopology.Unlock() + + bridge.LinkName, bridge.Link.Index, err = waitForDeviceReady(bLink) + if err != nil { + return nil, nil, nil, NewFatalError(bridge.GlobalID + err.Error()) + } + + if err := createAndEnableVnic(vnic, bridge); err != nil { + return nil, nil, nil, NewFatalError(err.Error()) + } + vLink.index = vnic.Link.Attrs().Index + + cInfo = getContainerInfo(cfg, vnic, bridge) + if needsContainerNetwork { + cInfo.CNContainerEvent = ContainerNetworkAdd + } + return vnic, nil, cInfo, nil + } + + if err := cn.logicallyCreateBridge(bridge, gre, vnic); err != nil { + cn.cnTopology.Unlock() + return nil, nil, nil, NewFatalError(err.Error()) + } + + gLink = cn.linkMap[gre.GlobalID] + defer close(gLink.ready) + + bLink = cn.linkMap[bridge.GlobalID] + defer close(bLink.ready) + + if vnic.Role == TenantContainer { + cn.containerMap[bridge.GlobalID] = true + } + + cn.cnTopology.Unlock() + // CS End + + //The actual device creation is time consuming + //but is outside the critical section + //The defer close(ready) ensures that + //the channel will close even on failure + brCreateMsg := &SsntpEventInfo{ + Event: SsntpTunAdd, + CnciIP: cfg.ConcIP.String(), + ConcID: cfg.ConcID, + TenantID: cfg.TenantID, + SubnetID: cfg.SubnetID, + SubnetKey: cfg.SubnetKey, + Subnet: cfg.Subnet.String(), + CnIP: local.String(), + CnID: cn.ID, + } + + if err := createAndEnableBridge(bridge, gre); err != nil { + return nil, brCreateMsg, cInfo, NewFatalError(err.Error()) + } + bLink.index = bridge.Link.Index + gLink.index = gre.Link.Index + + if err := createAndEnableVnic(vnic, bridge); err != nil { + return nil, brCreateMsg, cInfo, NewFatalError(err.Error()) + } + vLink.index = vnic.Link.Attrs().Index + + cInfo = getContainerInfo(cfg, vnic, bridge) + cInfo.CNContainerEvent = ContainerNetworkAdd + + // Now the network is ready and you can create a VM and launch it with this vnic + // vnic.Name is the interface name, the instanceMAC is the MAC Address + // qemu-system-x86_64 ... -net nic,model=virtio,macaddr=xxxx -net tap,ifname=vnic.Name ... + return vnic, brCreateMsg, cInfo, nil +} + +func getContainerInfo(cfg *VnicConfig, vnic *Vnic, bridge *Bridge) *ContainerInfo { + //TODO. Create a ciao gateway function so that in the future + //if we ever change our gateway algorithm it will propage everywhere + gateway := cfg.Subnet.IP.To4().Mask(cfg.Subnet.Mask) + gateway[3]++ + return &ContainerInfo{ + CNContainerEvent: ContainerNetworkInfo, //Default. Caller to override + SubnetID: bridge.LinkName, + Bridge: bridge.GlobalID, + Subnet: cfg.Subnet, + Gateway: gateway, + } +} + +//TODO: Use interfaces here to perform the name and index assignment +func waitForDeviceReady(devInfo *linkInfo) (devName string, devIndex int, err error) { + select { + case <-devInfo.ready: + return devInfo.name, devInfo.index, nil + case <-time.After(time.Duration(CnTimeout) * time.Second): + return "", 0, fmt.Errorf("Timeout waiting for device ready") + } +} + +func (cn *ComputeNode) logicallyCreateVnic(vnic *Vnic) (err error) { + + if vnic.LinkName, err = cn.genLinkName(vnic); err != nil { + return err + } + + vLink := &linkInfo{ + name: vnic.LinkName, + ready: make(chan struct{}), + } + + cn.linkMap[vnic.GlobalID] = vLink + return nil +} + +//Logically instantiates the bridge and tunnel in the topology +//The physical devices are not yet created but thier names aliases +//are added to the topology reserving them +//TODO: Check for global topology issues. E.g. Two tenants with same CNCI +func (cn *ComputeNode) logicallyCreateBridge(bridge *Bridge, gre *GreTunEP, vnic *Vnic) (err error) { + if bridge.LinkName, err = cn.genLinkName(bridge); err != nil { + return err + } + if gre.LinkName, err = cn.genLinkName(gre); err != nil { + return err + } + if _, err = cn.dbUpdate(bridge.GlobalID, "", dbInsBr); err != nil { + return err + } + if _, err = cn.dbUpdate(bridge.GlobalID, vnic.GlobalID, dbInsVnic); err != nil { + return err + } + + cn.linkMap[gre.GlobalID] = &linkInfo{ + name: gre.LinkName, + ready: make(chan struct{}), + } + + cn.linkMap[bridge.GlobalID] = &linkInfo{ + name: bridge.LinkName, + ready: make(chan struct{}), + } + + return nil +} + +//Phsyically create the devices by calling into the kernel +//TODO: Try to be more fault tolerant here. We may miss errors but try to +// honor the request e.g. If bridge exists use it and try and create tunnel +func createAndEnableBridge(bridge *Bridge, gre *GreTunEP) error { + if err := bridge.Create(); err != nil { + return fmt.Errorf("Bridge creation failed %s %s", bridge.GlobalID, err.Error()) + } + if err := gre.Create(); err != nil { + return fmt.Errorf("GRE creation failed %s %s", gre.GlobalID, err.Error()) + } + if err := gre.Attach(bridge); err != nil { + return fmt.Errorf("GRE attach failed %s %s %s", gre.GlobalID, bridge.GlobalID, err.Error()) + } + + if err := gre.Enable(); err != nil { + return fmt.Errorf("GRE enable failed %s %s %s", gre.GlobalID, bridge.GlobalID, err.Error()) + } + if err := bridge.Enable(); err != nil { + return fmt.Errorf("Bridge enable failed %s %s %s", gre.GlobalID, bridge.GlobalID, err.Error()) + } + return nil +} + +//Physically create the VNIC and attach it to the bridge +func createAndEnableVnic(vnic *Vnic, bridge *Bridge) error { + if err := vnic.Create(); err != nil { + return fmt.Errorf("VNIC creation failed %s %s", vnic.GlobalID, err.Error()) + } + if err := vnic.SetHardwareAddr(*vnic.MACAddr); err != nil { + return fmt.Errorf("VNIC Set MAC Address %s %s", vnic.GlobalID, err.Error()) + } + if err := vnic.SetMTU(vnic.MTU); err != nil { + return fmt.Errorf("VNIC Set MTU Address %s %s", vnic.GlobalID, err.Error()) + } + if err := vnic.Attach(bridge); err != nil { + return fmt.Errorf("VNIC attach failed %s %s %s", vnic.GlobalID, bridge.GlobalID, err.Error()) + } + vnic.BridgeID = bridge.LinkName + if err := vnic.Enable(); err != nil { + return fmt.Errorf("VNIC enable failed %s %s %s", vnic.GlobalID, bridge.GlobalID, err.Error()) + } + return nil +} + +// DestroyVnicV2 destroys a tenant VNIC. If this happens to be the last vnic for +// this tenant subnet on this CN, the bridge and gre tunnel will also be +// destroyed and SSNTP message generated. +// +// This will replace the DestroyVnic method +// +// This message needs to be sent to the CNCI which will teardown the tunnel. +// Note: The caller of this function is responsible to send the message to the +// scheduler or CNCI +// If the ContainerInfo is set, the container logical network has to +// be deleted using the command line or API equivalent of +// docker network rm ContainerInfo.SubnetID> +func (cn *ComputeNode) DestroyVnicV2(cfg *VnicConfig) (*SsntpEventInfo, *ContainerInfo, error) { + var cInfo *ContainerInfo + + s, err := cn.DestroyVnic(cfg) + if s != nil && s.containerSubnetID != "" { + cInfo = &ContainerInfo{ + CNContainerEvent: ContainerNetworkDel, + SubnetID: s.containerSubnetID, + } + } + + return s, cInfo, err +} + +// DestroyVnic destroys a tenant VNIC. If this happens to be the last vnic for +// this tenant subnet on this CN, the bridge and gre tunnel will also be +// destroyed and SSNTP message generated. +// +// This API has been deperecated +// +// This message needs to be sent to the CNCI which will teardown the tunnel. +// Note: The caller of this function is responsible to send the message to the +// scheduler or CNCI +func (cn *ComputeNode) DestroyVnic(cfg *VnicConfig) (*SsntpEventInfo, error) { + var brDeleteMsg *SsntpEventInfo + + if cfg == nil || cn.cnTopology == nil { + return nil, NewAPIError("invalid vnic or configuration") + } + + if err := checkCnVnicCfg(cfg); err != nil { + return nil, NewAPIError(err.Error()) + } + + alias := genCnVnicAliases(cfg) + vnic, err := NewVnic(alias.vnic) + if err != nil { + return nil, NewAPIError(err.Error()) + } + + // The entire delete has to be performed in a CS + // as there is a non zero possibility that link names + // may be reused or the same GRE tunnels created + cn.cnTopology.Lock() + defer cn.cnTopology.Unlock() + + vLink, present := cn.linkMap[alias.vnic] + if !present { + return nil, nil + } + + vnic.LinkName, vnic.Link.Attrs().Index, err = waitForDeviceReady(vLink) + if err != nil { + return nil, NewFatalError(vnic.GlobalID + err.Error()) + } + err = vnic.Destroy() + if err != nil { + return nil, NewFatalError(err.Error()) + } + delete(cn.linkMap, vnic.GlobalID) + delete(cn.nameMap, vnic.LinkName) + + vnicCount, err := cn.dbUpdate(alias.bridge, alias.vnic, dbDelVnic) + if err != nil { + return nil, NewFatalError(err.Error()) + } + + if vnicCount != 0 { + return nil, nil + } + + bridge, err := NewBridge(alias.bridge) + if err != nil { + return nil, NewFatalError(err.Error()) + } + + gre, err := NewGreTunEP(alias.gre, nil, nil, 0) + if err != nil { + return nil, NewFatalError(err.Error()) + } + + brDeleteMsg = &SsntpEventInfo{ + Event: SsntpTunDel, + CnciIP: cfg.ConcIP.String(), + ConcID: cfg.ConcID, + TenantID: cfg.TenantID, + SubnetID: cfg.SubnetID, + SubnetKey: cfg.SubnetKey, + Subnet: cfg.Subnet.String(), + CnIP: cn.ComputeAddr[0].IPNet.IP.String(), + CnID: cn.ID, + } + + //TODO: Try and make forward progress even on error + gLink, present := cn.linkMap[alias.gre] + if present { + gre.LinkName, gre.Link.Index, err = waitForDeviceReady(gLink) + if err != nil { + return nil, NewFatalError(gre.GlobalID + err.Error()) + } + + err := gre.Destroy() + if err != nil { + return nil, NewFatalError("gre destroy " + gre.GlobalID + err.Error()) + } + delete(cn.nameMap, gre.LinkName) + delete(cn.linkMap, gre.GlobalID) + + } else { + //TODO: Consider logging this and continue to delete bridge + return nil, NewFatalError(fmt.Sprintf("gre tunnel not present %s", gre.GlobalID)) + } + + bLink, present := cn.linkMap[alias.bridge] + if present { + bridge.LinkName, bridge.Link.Index, err = waitForDeviceReady(bLink) + if err != nil { + return nil, NewFatalError(bridge.GlobalID + err.Error()) + } + + if err := bridge.Destroy(); err != nil { + return nil, NewFatalError("bridge destroy failed " + err.Error()) + } + // We delete the container network when the bridge is deleted + if cn.containerMap[alias.bridge] { + brDeleteMsg.containerSubnetID = bridge.LinkName + cn.containerMap[alias.bridge] = false + } + delete(cn.nameMap, bridge.LinkName) + delete(cn.linkMap, bridge.GlobalID) + + if _, err := cn.dbUpdate(alias.bridge, "", dbDelBr); err != nil { + return nil, NewFatalError("db del br " + err.Error()) + } + + } else { + return nil, NewFatalError(fmt.Sprintf("bridge not present %s", bridge.GlobalID)) + } + + return brDeleteMsg, nil +} + +//ResetNetwork will attempt to clean up all network interfaces +//created. It will not clean up any interfaces created manually +func (cn *ComputeNode) ResetNetwork() error { + + links, err := netlink.LinkList() + if err != nil { + return NewFatalError("Cannot retrieve links" + err.Error()) + } + + cn.cnTopology.Lock() + defer cn.cnTopology.Unlock() + initCnTopology(cn.cnTopology) + + //Delete everything with an alias + for _, link := range links { + alias := link.Attrs().Alias + name := link.Attrs().Name + + if alias == "" { + continue + } + + err := netlink.LinkDel(link) + //TODO: Log this and continue + if err != nil { + fmt.Printf("Unable to delete link %s %v %v", name, link, err) + } + } + + //Check if we see any remanants + //Attempt one last time to delete them + links, err = netlink.LinkList() + var badLinks []string + for _, link := range links { + alias := link.Attrs().Alias + name := link.Attrs().Name + + if alias == "" { + continue + } + + err := netlink.LinkDel(link) + if err != nil { + badLinks = append(badLinks, name+"::"+alias) + } + } + + if badLinks != nil { + return fmt.Errorf("Failed to cleanup links %v", badLinks) + } + + return nil +} diff --git a/networking/libsnnet/cn_container_test.go b/networking/libsnnet/cn_container_test.go new file mode 100644 index 000000000..d06a03d40 --- /dev/null +++ b/networking/libsnnet/cn_container_test.go @@ -0,0 +1,992 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet_test + +import ( + "net" + "os" + "os/exec" + "strings" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +var cnConNetEnv string + +func cnConInit() { + cnConNetEnv = os.Getenv("SNNET_ENV") + + if cnConNetEnv == "" { + cnConNetEnv = "10.3.66.0/24" + } +} + +func linkDump(t *testing.T) error { + out, err := exec.Command("ip", "-d", "link").CombinedOutput() + + if err != nil { + t.Errorf("unable to dump link %v", err) + } else { + t.Log("dumping link info \n", string(out)) + } + + return err +} + +func dockerRestart(t *testing.T) error { + out, err := exec.Command("service", "docker", "restart").CombinedOutput() + if err != nil { + out, err = exec.Command("systemctl", "restart", "docker").CombinedOutput() + if err != nil { + t.Error("docker restart", err) + } + } + t.Log("docker restart\n", string(out)) + return err +} + +//Will be replaced by Docker API's in launcher +//docker run -it --net= --ip= --mac-address= +//ubuntu ip addr show eth0 scope global +func dockerRunVerify(t *testing.T, name string, ip net.IP, mac net.HardwareAddr, subnetID string) error { + cmd := exec.Command("docker", "run", "--name", ip.String(), "--net="+subnetID, + "--ip="+ip.String(), "--mac-address="+mac.String(), + "ubuntu", "ip", "addr", "show", "eth0", "scope", "global") + out, err := cmd.CombinedOutput() + + if err != nil { + t.Error("docker run failed", cmd, err) + } else { + t.Log("docker run dump \n", string(out)) + } + + if !strings.Contains(string(out), ip.String()) { + t.Error("docker container IP not setup ", ip.String()) + } + if !strings.Contains(string(out), mac.String()) { + t.Error("docker container MAC not setup ", mac.String()) + } + if !strings.Contains(string(out), "mtu 1400") { + t.Error("docker container MTU not setup ") + } + + if err := dockerContainerInfo(t, name); err != nil { + t.Error("docker container inspect failed", name, err.Error()) + } + return err +} + +func dockerContainerDelete(t *testing.T, name string) error { + out, err := exec.Command("docker", "stop", name).CombinedOutput() + if err != nil { + t.Error("docker container stop failed", name, err) + } else { + t.Log("docker container stop= \n", string(out)) + } + + out, err = exec.Command("docker", "rm", name).CombinedOutput() + if err != nil { + t.Error("docker container delete failed", name, err) + } else { + t.Log("docker container delete= \n", string(out)) + } + return err +} + +func dockerContainerInfo(t *testing.T, name string) error { + out, err := exec.Command("docker", "ps", "-a").CombinedOutput() + if err != nil { + t.Error("docker ps -a", err) + } else { + t.Log("docker =\n", string(out)) + } + + out, err = exec.Command("docker", "inspect", name).CombinedOutput() + if err != nil { + t.Error("docker network inspect", name, err) + } else { + t.Log("docker network inspect \n", string(out)) + } + return err +} + +//Will be replaced by Docker API's in launcher +// docker network create -d=ciao [--ipam-driver=ciao] +// --subnet= --gateway= ContainerInfo.SubnetID +//The IPAM driver needs top of the tree Docker (which needs special build) +//is not tested yet +func dockerNetCreate(t *testing.T, subnet net.IPNet, gw net.IP, bridge string, subnetID string) error { + cmd := exec.Command("docker", "network", "create", "-d=ciao", + "--subnet="+subnet.String(), "--gateway="+gw.String(), + "--opt", "bridge="+bridge, subnetID) + + out, err := cmd.CombinedOutput() + + if err != nil { + t.Error("docker network create failed", err) + } else { + t.Log("docker network create \n", string(out)) + } + return err +} + +//Will be replaced by Docker API's in launcher +// docker network rm ContainerInfo.SubnetID +func dockerNetDelete(t *testing.T, subnetID string) error { + out, err := exec.Command("docker", "network", "rm", subnetID).CombinedOutput() + if err != nil { + t.Error("docker network delete failed", err) + } else { + t.Log("docker network delete=", string(out)) + } + return err +} +func dockerNetList(t *testing.T) error { + out, err := exec.Command("docker", "network", "ls").CombinedOutput() + if err != nil { + t.Error("docker network ls", err) + } else { + t.Log("docker network ls= \n", string(out)) + } + return err +} + +func dockerNetInfo(t *testing.T, subnetID string) error { + out, err := exec.Command("docker", "network", "inspect", subnetID).CombinedOutput() + if err != nil { + t.Error("docker network inspect", err) + } else { + t.Log("docker network inspect=", string(out)) + } + return err +} + +//Tests typical sequence of CN Container APIs +// +//This tests exercises the standard set of APIs that +//the launcher invokes when setting up a CN and creating +//Container VNICs. +// +//Test is expected to pass +func TestCNContainer_Base(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + cnConInit() + _, mnet, _ := net.ParseCIDR(cnConNetEnv) + + mgtNet := []net.IPNet{*mnet} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + dockerPlugin := libsnnet.NewDockerPlugin() + if err := dockerPlugin.Init(); err != nil { + t.Fatal("ERROR: Docker Init failed ", err) + } + + if err := dockerPlugin.Start(); err != nil { + t.Fatal("ERROR: Docker start failed ", err) + } + + //Restarting docker here so the the plugin will + //be picked up without modifing the boot scripts + if err := dockerRestart(t); err != nil { + t.Fatal("ERROR: Docker restart failed ", err) + } + + //From YAML on instance init + //Two VNICs on the same tenant subnet + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + mac2, _ := net.ParseMAC("CA:FE:00:02:02:03") + _, tnet, _ := net.ParseCIDR("192.168.111.0/24") + tip := net.ParseIP("192.168.111.100") + tip2 := net.ParseIP("192.168.111.102") + cip := net.ParseIP("192.168.200.200") + + vnicCfg := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantContainer, + VnicIP: tip, + ConcIP: cip, + VnicMAC: mac, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + vnicCfg2 := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantContainer, + VnicIP: tip2, + ConcIP: cip, + VnicMAC: mac2, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid2", + InstanceID: "iuuid2", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + var subnetID, iface string //Used to check that they match + + // Create a VNIC: Should create bridge and tunnels + if vnic, ssntpEvent, cInfo, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error(err) + } else { + switch { + case ssntpEvent == nil: + t.Error("ERROR: expected SSNTP Event") + case ssntpEvent.Event != libsnnet.SsntpTunAdd: + t.Errorf("ERROR: cn.CreateVnic event population errror %v ", err) + case cInfo == nil: + t.Error("ERROR: expected Container Event") + case cInfo.CNContainerEvent != libsnnet.ContainerNetworkAdd: + t.Error("ERROR: Expected network add", ssntpEvent, cInfo) + case cInfo.SubnetID == "": + t.Error("ERROR: expected Container SubnetID") + case cInfo.Subnet.String() == "": + t.Error("ERROR: expected Container Subnet") + case cInfo.Gateway.String() == "": + t.Error("ERROR: expected Container Gateway") + case cInfo.Bridge == "": + t.Error("ERROR: expected Container Bridge") + } + if err := validSsntpEvent(ssntpEvent, vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnic event population errror", vnic, ssntpEvent) + } + + //Cache the first subnet ID we see. All subsequent should have the same + subnetID = cInfo.SubnetID + iface = vnic.InterfaceName() + if iface == "" { + t.Error("ERROR: cn.CreateVnic interface population errror", vnic) + } + + //Launcher will attach to this name and send out the event + //Launcher will also create the logical docker network + t.Log("VNIC created =", vnic.LinkName, ssntpEvent, cInfo) + + if err := linkDump(t); err != nil { + t.Errorf("unable to dump link %v", err) + } + + //Now kick off the docker commands + if err := dockerNetCreate(t, cInfo.Subnet, cInfo.Gateway, + cInfo.Bridge, cInfo.SubnetID); err != nil { + t.Error("ERROR: docker network", cInfo, err) + } + if err := dockerNetInfo(t, cInfo.SubnetID); err != nil { + t.Error("ERROR: docker network info", cInfo, err) + } + if err := dockerRunVerify(t, vnicCfg.VnicIP.String(), vnicCfg.VnicIP, vnicCfg.VnicMAC, cInfo.SubnetID); err != nil { + t.Error("ERROR: docker run", cInfo, err) + } + if err := dockerContainerDelete(t, vnicCfg.VnicIP.String()); err != nil { + t.Error("ERROR: docker network delete", cInfo, err) + } + } + + //Duplicate VNIC creation + if vnic, ssntpEvent, cInfo, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error(err) + } else { + switch { + case ssntpEvent != nil: + t.Error("ERROR: DUP unexpected SSNTP event", vnic, ssntpEvent) + case cInfo == nil: + t.Error("ERROR: DUP expected Container Info", vnic) + case cInfo.SubnetID != subnetID: + t.Error("ERROR: DUP SubnetID mismatch", ssntpEvent, cInfo) + case cInfo.CNContainerEvent != libsnnet.ContainerNetworkInfo: + t.Error("ERROR: DUP Expected network info", ssntpEvent, cInfo) + case iface != vnic.InterfaceName(): + t.Errorf("ERROR: DUP interface mismatch [%v] [%v]", iface, vnic.InterfaceName()) + } + } + + //Second VNIC creation - Should suceed + if vnic, ssntpEvent, cInfo, err := cn.CreateVnicV2(vnicCfg2); err != nil { + t.Error(err) + } else { + switch { + case ssntpEvent != nil: + t.Error("ERROR: VNIC2 unexpected SSNTP event", vnic, ssntpEvent) + case cInfo == nil: + t.Error("ERROR: VNIC2 expected Container Info", vnic) + case cInfo.SubnetID != subnetID: + t.Error("ERROR: VNIC2 SubnetID mismatch", ssntpEvent, cInfo) + case cInfo.CNContainerEvent != libsnnet.ContainerNetworkInfo: + t.Error("ERROR: VNIC2 Expected network info", ssntpEvent, cInfo) + } + + t.Log("VNIC2 created =", vnic.LinkName, ssntpEvent, cInfo) + + iface = vnic.InterfaceName() + if iface == "" { + t.Error("ERROR: cn.CreateVnic interface population errror", vnic) + } + + if err := linkDump(t); err != nil { + t.Errorf("unable to dump link %v", err) + } + + if err := dockerRunVerify(t, vnicCfg2.VnicIP.String(), vnicCfg2.VnicIP, + vnicCfg2.VnicMAC, cInfo.SubnetID); err != nil { + t.Error("ERROR: docker run", cInfo, err) + } + if err := dockerContainerDelete(t, vnicCfg2.VnicIP.String()); err != nil { + t.Error("ERROR: docker network delete", cInfo, err) + } + } + + //Duplicate VNIC creation + if vnic, ssntpEvent, cInfo, err := cn.CreateVnicV2(vnicCfg2); err != nil { + t.Error(err) + } else { + switch { + case ssntpEvent != nil: + t.Error("ERROR: DUP unexpected SSNTP event", vnic, ssntpEvent) + case cInfo == nil: + t.Error("ERROR: DUP expected Container Info", vnic) + case cInfo.SubnetID != subnetID: + t.Error("ERROR: DUP SubnetID mismatch", ssntpEvent, cInfo) + case cInfo.CNContainerEvent != libsnnet.ContainerNetworkInfo: + t.Error("ERROR: DUP Expected network info", ssntpEvent, cInfo) + case iface != vnic.InterfaceName(): + t.Errorf("ERROR: DUP interface mismatch [%v] [%v]", iface, vnic.InterfaceName()) + } + } + + //Destroy the first one + if ssntpEvent, cInfo, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error(err) + } else { + switch { + case ssntpEvent != nil: + t.Error("ERROR: DELETE unexpected SSNTP Event", ssntpEvent) + case cInfo != nil: + t.Error("ERROR: DELETE unexpected Container Event") + } + t.Log("VNIC deleted event", ssntpEvent, cInfo) + } + + //Destroy it again + if ssntpEvent, cInfo, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error(err) + } else { + switch { + case ssntpEvent != nil: + t.Error("ERROR: DELETE unexpected SSNTP Event", ssntpEvent) + case cInfo != nil: + t.Error("ERROR: DELETE unexpected Container event", cInfo) + } + t.Log("VNIC deleted event", ssntpEvent, cInfo) + } + + // Try and destroy - should work - cInfo should be reported + if ssntpEvent, cInfo, err := cn.DestroyVnicV2(vnicCfg2); err != nil { + t.Error(err) + } else { + switch { + case ssntpEvent == nil: + t.Error("ERROR: DELETE expected SSNTP Event") + case cInfo == nil: + t.Error("ERROR: DELETE expected Container Event") + case cInfo.SubnetID != subnetID: + t.Error("ERROR: DELETE SubnetID mismatch", ssntpEvent, cInfo) + case cInfo.CNContainerEvent != libsnnet.ContainerNetworkDel: + t.Error("ERROR: DELETE Expected network delete", ssntpEvent, cInfo) + } + t.Log("VNIC deleted event", ssntpEvent, cInfo) + + if err := linkDump(t); err != nil { + t.Errorf("unable to dump link %v", err) + } + } + + //Has to be called after the VNIC has been deleted + if err := dockerNetDelete(t, subnetID); err != nil { + t.Error("ERROR:", subnetID, err) + } + if err := dockerNetList(t); err != nil { + t.Error("ERROR:", err) + } + + //Destroy it again + if ssntpEvent, cInfo, err := cn.DestroyVnicV2(vnicCfg2); err != nil { + t.Error(err) + } else { + switch { + case ssntpEvent != nil: + t.Error("ERROR: unexpected SSNTP Event", ssntpEvent) + case cInfo != nil: + t.Error("ERROR: unexpected Container event", cInfo) + } + t.Log("VNIC deleted event", ssntpEvent, cInfo) + } + + if err := dockerPlugin.Stop(); err != nil { + t.Fatal("ERROR: Docker stop failed ", err) + } + + if err := dockerPlugin.Close(); err != nil { + t.Fatal("ERROR: Docker close failed ", err) + } + +} + +func dockerRunTop(t *testing.T, name string, ip net.IP, mac net.HardwareAddr, subnetID string) error { + cmd := exec.Command("docker", "run", "--name", ip.String(), "--net="+subnetID, + "--ip="+ip.String(), "--mac-address="+mac.String(), + "ubuntu", "top", "-b", "-d1") + go cmd.Run() // Ensures that the containers stays alive. Kludgy + return nil +} + +func dockerRunPingVerify(t *testing.T, name string, ip net.IP, mac net.HardwareAddr, subnetID string, addr string) error { + cmd := exec.Command("docker", "run", "--name", ip.String(), "--net="+subnetID, + "--ip="+ip.String(), "--mac-address="+mac.String(), + "ubuntu", "ping", "-c", "1", "192.168.111.100") + out, err := cmd.CombinedOutput() + + if err != nil { + t.Error("docker run failed", cmd, err) + } else { + t.Log("docker run dump \n", string(out)) + } + + if !strings.Contains(string(out), "1 received") { + t.Error("docker connectivity test failed", ip.String()) + } + return nil +} + +//Tests connectivity between two node local Containers +// +//Tests connectivity between two node local Containers +//using ping between a long running container and +//container that does ping +// +//Test is expected to pass +func TestCNContainer_Connectivity(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + cnConInit() + _, mnet, _ := net.ParseCIDR(cnConNetEnv) + + mgtNet := []net.IPNet{*mnet} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + dockerPlugin := libsnnet.NewDockerPlugin() + if err := dockerPlugin.Init(); err != nil { + t.Fatal("ERROR: Docker Init failed ", err) + } + + if err := dockerPlugin.Start(); err != nil { + t.Fatal("ERROR: Docker start failed ", err) + } + + //Restarting docker here so the the plugin will + //be picked up without modifing the boot scripts + if err := dockerRestart(t); err != nil { + t.Fatal("ERROR: Docker restart failed ", err) + } + + //From YAML on instance init + //Two VNICs on the same tenant subnet + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + mac2, _ := net.ParseMAC("CA:FE:00:02:02:03") + _, tnet, _ := net.ParseCIDR("192.168.111.0/24") + tip := net.ParseIP("192.168.111.100") + tip2 := net.ParseIP("192.168.111.102") + cip := net.ParseIP("192.168.200.200") + + vnicCfg := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantContainer, + VnicIP: tip, + ConcIP: cip, + VnicMAC: mac, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + vnicCfg2 := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantContainer, + VnicIP: tip2, + ConcIP: cip, + VnicMAC: mac2, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid2", + InstanceID: "iuuid2", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + _, _, cInfo, err := cn.CreateVnicV2(vnicCfg) + if err != nil { + t.Error(err) + } + + err = dockerNetCreate(t, cInfo.Subnet, cInfo.Gateway, cInfo.Bridge, cInfo.SubnetID) + if err != nil { + t.Error(err) + } + + //Kick off a long running container + dockerRunTop(t, vnicCfg.VnicIP.String(), vnicCfg.VnicIP, vnicCfg.VnicMAC, cInfo.SubnetID) + + _, _, cInfo2, err := cn.CreateVnicV2(vnicCfg2) + if err != nil { + t.Error(err) + } + + if err := dockerRunPingVerify(t, vnicCfg2.VnicIP.String(), vnicCfg2.VnicIP, + vnicCfg2.VnicMAC, cInfo2.SubnetID, vnicCfg.VnicIP.String()); err != nil { + t.Error(err) + } + + //Destroy the containers + if err := dockerContainerDelete(t, vnicCfg.VnicIP.String()); err != nil { + t.Error(err) + } + if err := dockerContainerDelete(t, vnicCfg2.VnicIP.String()); err != nil { + t.Error(err) + } + + //Destroy the VNICs + if _, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error(err) + } + if _, _, err := cn.DestroyVnicV2(vnicCfg2); err != nil { + t.Error(err) + } + + //Destroy the network, has to be called after the VNIC has been deleted + if err := dockerNetDelete(t, cInfo.SubnetID); err != nil { + t.Error(err) + } + if err := dockerPlugin.Stop(); err != nil { + t.Fatal(err) + } + if err := dockerPlugin.Close(); err != nil { + t.Fatal(err) + } +} + +//Tests VM and Container VNIC Interop +// +//Tests that VM and Container VNICs can co-exist +//by created VM and Container VNICs in different orders and in each case +//tests that the Network Connectivity is functional +// +//Test is expected to pass +func TestCNContainer_Interop1(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + cnConInit() + _, mnet, _ := net.ParseCIDR(cnConNetEnv) + + mgtNet := []net.IPNet{*mnet} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + dockerPlugin := libsnnet.NewDockerPlugin() + if err := dockerPlugin.Init(); err != nil { + t.Fatal("ERROR: Docker Init failed ", err) + } + + if err := dockerPlugin.Start(); err != nil { + t.Fatal("ERROR: Docker start failed ", err) + } + + //Restarting docker here so the the plugin will + //be picked up without modifing the boot scripts + if err := dockerRestart(t); err != nil { + t.Fatal("ERROR: Docker restart failed ", err) + } + + //From YAML on instance init + //Two VNICs on the same tenant subnet + _, tnet, _ := net.ParseCIDR("192.168.111.0/24") + tip := net.ParseIP("192.168.111.100") + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + cip := net.ParseIP("192.168.200.200") + mac2, _ := net.ParseMAC("CA:FE:00:02:02:03") + tip2 := net.ParseIP("192.168.111.102") + mac3, _ := net.ParseMAC("CA:FE:00:03:02:03") + tip3 := net.ParseIP("192.168.111.103") + mac4, _ := net.ParseMAC("CA:FE:00:04:02:03") + tip4 := net.ParseIP("192.168.111.104") + + vnicCfg := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantContainer, + VnicIP: tip, + ConcIP: cip, + VnicMAC: mac, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + vnicCfg2 := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantContainer, + VnicIP: tip2, + ConcIP: cip, + VnicMAC: mac2, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid2", + InstanceID: "iuuid2", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + vnicCfg3 := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantVM, + VnicIP: tip3, + ConcIP: cip, + VnicMAC: mac3, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid3", + InstanceID: "iuuid3", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + vnicCfg4 := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantVM, + VnicIP: tip4, + ConcIP: cip, + VnicMAC: mac4, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid4", + InstanceID: "iuuid4", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + _, _, cInfo, err := cn.CreateVnicV2(vnicCfg) + if err != nil { + t.Error(err) + } + + err = dockerNetCreate(t, cInfo.Subnet, cInfo.Gateway, cInfo.Bridge, cInfo.SubnetID) + if err != nil { + t.Error(err) + } + + _, _, _, err = cn.CreateVnicV2(vnicCfg3) + if err != nil { + t.Error(err) + } + + //Kick off a long running container + dockerRunTop(t, vnicCfg.VnicIP.String(), vnicCfg.VnicIP, vnicCfg.VnicMAC, cInfo.SubnetID) + + _, _, cInfo2, err := cn.CreateVnicV2(vnicCfg2) + if err != nil { + t.Error(err) + } + + _, _, _, err = cn.CreateVnicV2(vnicCfg4) + if err != nil { + t.Error(err) + } + + if err := dockerRunPingVerify(t, vnicCfg2.VnicIP.String(), vnicCfg2.VnicIP, + vnicCfg2.VnicMAC, cInfo2.SubnetID, vnicCfg.VnicIP.String()); err != nil { + t.Error(err) + } + + //Destroy the containers + if err := dockerContainerDelete(t, vnicCfg.VnicIP.String()); err != nil { + t.Error(err) + } + if err := dockerContainerDelete(t, vnicCfg2.VnicIP.String()); err != nil { + t.Error(err) + } + + if _, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error(err) + } + if _, _, err := cn.DestroyVnicV2(vnicCfg2); err != nil { + t.Error(err) + } + if _, _, err := cn.DestroyVnicV2(vnicCfg3); err != nil { + t.Error(err) + } + if err := dockerNetDelete(t, cInfo.SubnetID); err != nil { + t.Error(err) + } + if _, _, err := cn.DestroyVnicV2(vnicCfg4); err != nil { + t.Error(err) + } + if err := dockerPlugin.Stop(); err != nil { + t.Fatal(err) + } + if err := dockerPlugin.Close(); err != nil { + t.Fatal(err) + } +} + +//Tests VM and Container VNIC Interop +// +//Tests that VM and Container VNICs can co-exist +//by created VM and Container VNICs in different orders and in each case +//tests that the Network Connectivity is functional +// +//Test is expected to pass +func TestCNContainer_Interop2(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + cnConInit() + _, mnet, _ := net.ParseCIDR(cnConNetEnv) + + mgtNet := []net.IPNet{*mnet} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + dockerPlugin := libsnnet.NewDockerPlugin() + if err := dockerPlugin.Init(); err != nil { + t.Fatal("ERROR: Docker Init failed ", err) + } + + if err := dockerPlugin.Start(); err != nil { + t.Fatal("ERROR: Docker start failed ", err) + } + + //Restarting docker here so the the plugin will + //be picked up without modifing the boot scripts + if err := dockerRestart(t); err != nil { + t.Fatal("ERROR: Docker restart failed ", err) + } + + //From YAML on instance init + //Two VNICs on the same tenant subnet + _, tnet, _ := net.ParseCIDR("192.168.111.0/24") + tip := net.ParseIP("192.168.111.100") + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + cip := net.ParseIP("192.168.200.200") + mac2, _ := net.ParseMAC("CA:FE:00:02:02:03") + tip2 := net.ParseIP("192.168.111.102") + mac3, _ := net.ParseMAC("CA:FE:00:03:02:03") + tip3 := net.ParseIP("192.168.111.103") + mac4, _ := net.ParseMAC("CA:FE:00:04:02:03") + tip4 := net.ParseIP("192.168.111.104") + + vnicCfg := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantContainer, + VnicIP: tip, + ConcIP: cip, + VnicMAC: mac, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + vnicCfg2 := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantContainer, + VnicIP: tip2, + ConcIP: cip, + VnicMAC: mac2, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid2", + InstanceID: "iuuid2", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + vnicCfg3 := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantVM, + VnicIP: tip3, + ConcIP: cip, + VnicMAC: mac3, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid3", + InstanceID: "iuuid3", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + vnicCfg4 := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantVM, + VnicIP: tip4, + ConcIP: cip, + VnicMAC: mac4, + Subnet: *tnet, + SubnetKey: 0xF, + VnicID: "vuuid4", + InstanceID: "iuuid4", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + _, _, _, err := cn.CreateVnicV2(vnicCfg3) + if err != nil { + t.Error(err) + } + _, _, cInfo, err := cn.CreateVnicV2(vnicCfg) + if err != nil { + t.Error(err) + } + + err = dockerNetCreate(t, cInfo.Subnet, cInfo.Gateway, cInfo.Bridge, cInfo.SubnetID) + if err != nil { + t.Error(err) + } + + //Kick off a long running container + dockerRunTop(t, vnicCfg.VnicIP.String(), vnicCfg.VnicIP, vnicCfg.VnicMAC, cInfo.SubnetID) + + _, _, cInfo2, err := cn.CreateVnicV2(vnicCfg2) + if err != nil { + t.Error(err) + } + if err := dockerRunPingVerify(t, vnicCfg2.VnicIP.String(), vnicCfg2.VnicIP, + vnicCfg2.VnicMAC, cInfo2.SubnetID, vnicCfg.VnicIP.String()); err != nil { + t.Error(err) + } + + //Destroy the containers + if err := dockerContainerDelete(t, vnicCfg.VnicIP.String()); err != nil { + t.Error(err) + } + if err := dockerContainerDelete(t, vnicCfg2.VnicIP.String()); err != nil { + t.Error(err) + } + + _, _, _, err = cn.CreateVnicV2(vnicCfg4) + if err != nil { + t.Error(err) + } + + //Destroy the VNICs + if _, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error(err) + } + if _, _, err := cn.DestroyVnicV2(vnicCfg2); err != nil { + t.Error(err) + } + + //Destroy the network, has to be called after the VNIC has been deleted + if err := dockerNetDelete(t, cInfo.SubnetID); err != nil { + t.Error(err) + } + if _, _, err := cn.DestroyVnicV2(vnicCfg4); err != nil { + t.Error(err) + } + if _, _, err := cn.DestroyVnicV2(vnicCfg3); err != nil { + t.Error(err) + } + if err := dockerPlugin.Stop(); err != nil { + t.Fatal(err) + } + if err := dockerPlugin.Close(); err != nil { + t.Fatal(err) + } +} diff --git a/networking/libsnnet/cn_test.go b/networking/libsnnet/cn_test.go new file mode 100644 index 000000000..5c5c35162 --- /dev/null +++ b/networking/libsnnet/cn_test.go @@ -0,0 +1,1052 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet_test + +import ( + "fmt" + "net" + "os" + "strconv" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +var cnNetEnv string + +func cninit() { + cnNetEnv = os.Getenv("SNNET_ENV") + + if cnNetEnv == "" { + cnNetEnv = "10.3.66.0/24" + } +} + +//Tests the scaling of the CN VNIC Creation +// +//This tests creates a large number of VNICs across a number +//of subnets +// +//Test should pass OK +func TestCN_Scaling(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + + cninit() + _, mnet, _ := net.ParseCIDR(cnNetEnv) + + //From YAML, on agent init + mgtNet := []net.IPNet{*mnet} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + if err := cn.ResetNetwork(); err != nil { + t.Error("ERROR: cn.ResetNetwork failed", err) + } + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + //From YAML on instance init + tenantID := "tenantuuid" + concIP := net.IPv4(192, 168, 111, 1) + + var maxBridges, maxVnics int + if testing.Short() { + maxBridges = scaleCfg.maxBridgesShort + maxVnics = scaleCfg.maxVnicsShort + } else { + maxBridges = scaleCfg.maxBridgesLong + maxVnics = scaleCfg.maxVnicsLong + } + + for s3 := 1; s3 <= maxBridges; s3++ { + s4 := 0 + _, tenantNet, _ := net.ParseCIDR("193.168." + strconv.Itoa(s3) + "." + strconv.Itoa(s4) + "/24") + subnetID := "suuid_" + strconv.Itoa(s3) + "_" + strconv.Itoa(s4) + + for s4 := 2; s4 <= maxVnics; s4++ { + + vnicIP := net.IPv4(192, 168, byte(s3), byte(s4)) + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + + vnicID := "vuuid_" + strconv.Itoa(s3) + "_" + strconv.Itoa(s4) + instanceID := "iuuid_" + strconv.Itoa(s3) + "_" + strconv.Itoa(s4) + vnicCfg := &libsnnet.VnicConfig{ + VnicIP: vnicIP, + ConcIP: concIP, + VnicMAC: mac, + Subnet: *tenantNet, + SubnetKey: s3, + VnicID: vnicID, + InstanceID: instanceID, + SubnetID: subnetID, + TenantID: tenantID, + ConcID: "cnciuuid", + } + + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + t.Logf("VNIC Created vnic[%v] cfg[%v] event[%v]", vnic, vnicCfg, ssntpEvent) + } + } + } + + for s3 := 1; s3 <= maxBridges; s3++ { + s4 := 0 + _, tenantNet, _ := net.ParseCIDR("193.168." + strconv.Itoa(s3) + "." + strconv.Itoa(s4) + "/24") + subnetID := "suuid_" + strconv.Itoa(s3) + "_" + strconv.Itoa(s4) + + for s4 := 2; s4 <= maxVnics; s4++ { + + vnicIP := net.IPv4(192, 168, byte(s3), byte(s4)) + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + + vnicID := "vuuid_" + strconv.Itoa(s3) + "_" + strconv.Itoa(s4) + instanceID := "iuuid_" + strconv.Itoa(s3) + "_" + strconv.Itoa(s4) + vnicCfg := &libsnnet.VnicConfig{ + VnicIP: vnicIP, + ConcIP: concIP, + VnicMAC: mac, + Subnet: *tenantNet, + SubnetKey: 0xF, + VnicID: vnicID, + InstanceID: instanceID, + SubnetID: subnetID, + TenantID: tenantID, + ConcID: "cnciuuid", + } + + if ssntpEvent, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.DestroyVnicV2 failed event", vnicCfg, err) + } else { + t.Logf("VNIC Destroyed cfg[%v] event[%v]", vnicCfg, ssntpEvent) + } + } + } +} + +//Tests the ResetNetwork API +// +//This test creates multiple VNICs belonging to multiple tenants +//It then uses the ResetNetwork API to reset the node's networking +//state to a clean state (as in reset). This test also check that +//the API can be called midway through a node's lifecyle and +//the DbRebuild API can be used to re-construct the node's +//networking state +// +//Test should pass OK +func TestCN_ResetNetwork(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + + _, net2, _ := net.ParseCIDR("193.168.1.0/24") + cninit() + _, net3, _ := net.ParseCIDR(cnNetEnv) + + //From YAML, on agent init + mgtNet := []net.IPNet{*net2, *net3} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + if err := cn.ResetNetwork(); err != nil { + t.Error("ERROR: cn.ResetNetwork failed", err) + } + + //From YAML on instance init + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + vnicCfg := &libsnnet.VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 100), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: mac, + Subnet: *net2, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + if ssntpEvent == nil { + t.Error("ERROR: cn.CreateVnicV2 expected event", vnic) + } + t.Log("VNIC1 duplicate created =", vnic.LinkName, ssntpEvent) + } + + vnicCfg.TenantID = "tuuid2" + vnicCfg.ConcIP = net.IPv4(192, 168, 1, 2) + + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + if ssntpEvent == nil { + t.Error("ERROR: cn.CreateVnicV2 expected event", vnic) + } + t.Log("VNIC1 duplicate created =", vnic.LinkName, ssntpEvent) + } + + if err := cn.ResetNetwork(); err != nil { + t.Error("ERROR: cn.ResetNetwork failed", err) + } + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + vnicCfg.TenantID = "tuuid" + vnicCfg.ConcIP = net.IPv4(192, 168, 1, 1) + + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + if ssntpEvent == nil { + t.Error("ERROR: cn.CreateVnicV2 expected event", vnic) + } + t.Log("VNIC1 duplicate created =", vnic.LinkName, ssntpEvent) + } + + vnicCfg.TenantID = "tuuid2" + vnicCfg.ConcIP = net.IPv4(192, 168, 1, 2) + + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + if ssntpEvent == nil { + t.Error("ERROR: cn.CreateVnicV2 expected event", vnic) + } + t.Log("VNIC1 duplicate created =", vnic.LinkName, ssntpEvent) + } + + if err := cn.ResetNetwork(); err != nil { + t.Error("ERROR: cn.ResetNetwork failed", err) + } + +} + +//Tests multiple VNIC's creation +// +//This tests tests if multiple VNICs belonging to multiple +//tenants can be sucessfully created and deleted on a given CN +//This tests also checks for the generation of the requisite +//SSNTP message that the launcher is expected to send to the +//CNCI via the scheduler +// +//Test should pass OK +func TestCN_MultiTenant(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + + //_, net1, _ := net.ParseCIDR("127.0.0.0/24") + _, net2, _ := net.ParseCIDR("193.168.1.0/24") + cninit() + _, net3, _ := net.ParseCIDR(cnNetEnv) + + //From YAML, on agent init + mgtNet := []net.IPNet{*net2, *net3} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + if err := cn.ResetNetwork(); err != nil { + t.Error("ERROR: cn.ResetNetwork failed", err) + } + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + if err := cn.ResetNetwork(); err != nil { + t.Error("ERROR: cn.ResetNetwork failed", err) + } + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + //From YAML on instance init + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + vnicCfg := &libsnnet.VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 100), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: mac, + Subnet: *net2, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + if ssntpEvent == nil { + t.Error("ERROR: cn.CreateVnicV2 expected event", vnic) + } + t.Log("VNIC1 duplicate created =", vnic.LinkName, ssntpEvent) + } + + vnicCfg.TenantID = "tuuid2" + vnicCfg.ConcIP = net.IPv4(192, 168, 1, 2) + + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + if ssntpEvent == nil { + t.Error("ERROR: cn.CreateVnicV2 expected event", vnic) + } + t.Log("VNIC1 duplicate created =", vnic.LinkName, ssntpEvent) + } + + if ssntpEvent, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.DestroyVnicV2 failed event", vnicCfg, err) + } else { + if ssntpEvent == nil { + t.Error("ERROR: cn.DestroyVnicV2 expected event", vnicCfg, err) + } + } + + vnicCfg.TenantID = "tuuid" + vnicCfg.ConcIP = net.IPv4(192, 168, 1, 1) + + if ssntpEvent, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.DestroyVnicV2 failed event", vnicCfg, err) + } else { + if ssntpEvent == nil { + t.Error("ERROR: cn.DestroyVnicV2 expected event", vnicCfg, err) + } + } +} + +//Negative tests for CN API +// +//This tests for various invalid API invocations +//This test has to be greatly enhanced. +// +//Test is expected to pass +func TestCN_Negative(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + + //_, net1, _ := net.ParseCIDR("127.0.0.0/24") + _, net2, _ := net.ParseCIDR("193.168.1.0/24") + cninit() + _, net3, _ := net.ParseCIDR(cnNetEnv) + + //From YAML, on agent init + mgtNet := []net.IPNet{*net2, *net3} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + //From YAML on instance init + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + vnicCfg := &libsnnet.VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 100), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: mac, + Subnet: *net2, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + //TenantID: "tuuid", Leaving it blank should fail + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Log("ERROR: cn.CreateVnicV2 should have failed", err) + } else { + //Launcher will attach to this name and send out the event + t.Error("Failure expected VNIC created =", vnic.LinkName, ssntpEvent) + } + + //Fix the errors + vnicCfg.TenantID = "tuuid" + + // Try and create it again. + var vnicName string + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + if ssntpEvent == nil { + t.Error("ERROR: cn.CreateVnicV2 expected event", vnic) + } + t.Log("VNIC1 duplicate created =", vnic.LinkName, ssntpEvent) + vnicName = vnic.LinkName + } + + //Try and create a duplicate. should work + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + if ssntpEvent != nil { + t.Error("ERROR: cn.CreateVnicV2 unexpected event", vnic, vnicCfg, ssntpEvent) + } + t.Log("VNIC1 duplicate created =", vnic.LinkName, ssntpEvent) + if vnicName != vnic.LinkName { + t.Error("ERROR: VNIC names do not match", vnicName, vnic.LinkName) + } + } + + // Try and destroy + if ssntpEvent, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.DestroyVnicV2 failed event", vnicCfg, err) + } else { + if ssntpEvent == nil { + t.Error("ERROR: cn.DestroyVnicV2 expected event", vnicCfg, err) + } + } +} + +//Tests a node can serve as both CN and NN simultaneously +// +//This test checks that at least from the networking point +//of view we can create both Instance VNICs and CNCI VNICs +//on the same node. Even though the launcher does not +//support this mode today, the networking layer allows +//creation and co-existence of both type of VNICs on the +//same node and they will both work +// +//Test should pass OK +func TestCN_AndNN(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + + //_, net1, _ := net.ParseCIDR("127.0.0.0/24") //Add this so that init will pass + _, net1, _ := net.ParseCIDR("192.168.0.0/24") + _, net2, _ := net.ParseCIDR("192.168.1.0/24") + cninit() + _, net3, _ := net.ParseCIDR(cnNetEnv) + + mgtNet := []net.IPNet{*net1, *net2, *net3} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + //From YAML on instance init + cnciMac, _ := net.ParseMAC("CA:FE:CC:01:02:03") + cnciVnicCfg := &libsnnet.VnicConfig{ + VnicRole: libsnnet.DataCenter, + VnicMAC: cnciMac, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + } + + // Create a VNIC + var cnciVnic1Name string + if cnciVnic, err := cn.CreateCnciVnic(cnciVnicCfg); err != nil { + t.Error("ERROR: cn.CreateCnciVnic failed", err) + } else { + //Launcher will attach to this name and send out the event + t.Log("VNIC1 created =", cnciVnic.LinkName) + cnciVnic1Name = cnciVnic.LinkName + } + + var cnciVnic1DupName string + // Try and create it again. Should return cached value + if cnciVnic, err := cn.CreateCnciVnic(cnciVnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 duplicate failed", err) + } else { + t.Log("VNIC1 duplicate created =", cnciVnic.LinkName) + cnciVnic1DupName = cnciVnic.LinkName + } + + if cnciVnic1Name != cnciVnic1DupName { + t.Error("ERROR: cn.CreateCnciVnic VNIC1 and VNIC1 Dup interface names do not match", cnciVnic1Name, cnciVnic1DupName) + } + t.Log("cn.CreateVnicV2 VNIC1 and VNIC1 Dup interface names", cnciVnic1Name, cnciVnic1DupName) + + //From YAML on instance init + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + vnicCfg := &libsnnet.VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 100), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: mac, + Subnet: *net2, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + // Create a VNIC: Should create bridge and tunnels + var vnic1Name, vnic1DupName string + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + //We expect a bridge creation event + if ssntpEvent == nil { + t.Error("ERROR: cn.CreateVnicV2 expected event", vnic, ssntpEvent) + } + //Launcher will attach to this name and send out the event + t.Log("VNIC1 created =", vnic.LinkName, ssntpEvent) + vnic1Name = vnic.LinkName + } + + // Try and create it again. Should return cached value + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 duplicate failed", err, ssntpEvent) + } else { + //We do not expect a bridge creation event + if ssntpEvent != nil { + t.Error("ERROR: cn.CreateVnicV2 duplicate unexpected event", vnic, ssntpEvent) + } + //Launcher will attach to this name and send out the event + t.Log("VNIC1 duplicate created =", vnic.LinkName, ssntpEvent) + vnic1DupName = vnic.LinkName + } + + if vnic1Name != vnic1DupName { + t.Error("ERROR: cn.CreateVnicV2 VNIC1 and VNIC2 interface names do not match", vnic1Name, vnic1DupName) + } + t.Log("cn.CreateVnicV2 VNIC1 and VNIC2 interface names", vnic1Name, vnic1DupName) + + mac2, _ := net.ParseMAC("CA:FE:00:01:02:22") + vnicCfg2 := &libsnnet.VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 2), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: mac2, + Subnet: *net2, + SubnetKey: 0xF, + VnicID: "vuuid2", + InstanceID: "iuuid2", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + // Create a second VNIC on the same tenant subnet + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg2); err != nil { + t.Error("ERROR: cn.CreateVnicV2 VNIC2 failed", err, ssntpEvent) + } else { + //We do not expect a bridge creation event + if ssntpEvent != nil { + t.Error("ERROR: cn.CreateVnicV2 VNIC2 unexpected event", vnic, ssntpEvent) + } + //Launcher will attach to this name and send out the event + t.Log("VNIC2 created =", vnic.LinkName, ssntpEvent) + } + + if ssntpEvent, _, err := cn.DestroyVnicV2(vnicCfg2); err != nil { + if ssntpEvent != nil { + t.Error("ERROR: cn.DestroyVnicV2 VNIC2 unexpected event", err, ssntpEvent) + } + t.Error("ERROR: cn.DestroyVnicV2 VNIC2 destroy attempt failed", err) + } + + cnciMac2, _ := net.ParseMAC("CA:FE:CC:01:02:22") + cnciVnicCfg2 := &libsnnet.VnicConfig{ + VnicRole: libsnnet.DataCenter, + VnicMAC: cnciMac2, + VnicID: "vuuid2", + InstanceID: "iuuid2", + TenantID: "tuuid2", + } + + // Create and destroy a second VNIC + if cnciVnic, err := cn.CreateCnciVnic(cnciVnicCfg2); err != nil { + t.Error("ERROR: cn.CreateVnicV2 VNIC2 failed", err) + } else { + //Launcher will attach to this name + t.Log("VNIC2 created =", cnciVnic.LinkName) + } + if err := cn.DestroyCnciVnic(cnciVnicCfg2); err != nil { + t.Error("ERROR: cn.DestroyCnciVnic VNIC2 destroy attempt failed", err) + } + + // Destroy the first VNIC + if err := cn.DestroyCnciVnic(cnciVnicCfg); err != nil { + t.Error("ERROR: cn.DestroyCnciVnic VNIC1 failed", err) + } + + // Try and destroy it again - should work + if err := cn.DestroyCnciVnic(cnciVnicCfg); err != nil { + t.Error("ERROR: cn.DestroyCnciVnic VNIC1 duplicate destroy attempt failed", err) + } + + // Destroy the first VNIC - Deletes the bridge and tunnel + if ssntpEvent, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.DestroyVnicV2 VNIC1 failed", err) + } else { + //We expect a bridge deletion event + if ssntpEvent == nil { + t.Error("ERROR: cn.DestroyVnicV2 VNIC1 expected event") + } + //Launcher will send this event out + t.Log("cn.Destroy VNIC1 ssntp event", ssntpEvent) + } + + // Try and destroy it again - should work + if ssntpEvent, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + if ssntpEvent != nil { + t.Error("ERROR: cn.DestroyVnicV2 VNIC1 duplicate unexpected event", err, ssntpEvent) + } + t.Error("ERROR: cn.DestroyVnicV2 VNIC1 duplicate destroy attempt failed", err) + } +} + +//Tests typical sequence of NN APIs +// +//This tests exercises the standard set of APIs that +//the launcher invokes when setting up a NN and creating +//VNICs. It check for duplicate VNIC creation, duplicate +//VNIC deletion +// +//Test is expected to pass +func TestNN_Base(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + + //_, net1, _ := net.ParseCIDR("127.0.0.0/24") //Add this so that init will pass + _, net1, _ := net.ParseCIDR("193.168.0.0/24") + _, net2, _ := net.ParseCIDR("193.168.1.0/24") + cninit() + _, net3, _ := net.ParseCIDR(cnNetEnv) + + mgtNet := []net.IPNet{*net1, *net2, *net3} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + //From YAML on instance init + cnciMac, _ := net.ParseMAC("CA:FE:00:01:02:03") + cnciVnicCfg := &libsnnet.VnicConfig{ + VnicRole: libsnnet.DataCenter, + VnicMAC: cnciMac, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + } + + // Create a VNIC + var cnciVnic1Name string + if cnciVnic, err := cn.CreateCnciVnic(cnciVnicCfg); err != nil { + t.Error("ERROR: cn.CreateCnciVnic failed", err) + } else { + //Launcher will attach to this name and send out the event + t.Log("VNIC1 created =", cnciVnic.LinkName) + cnciVnic1Name = cnciVnic.LinkName + } + + var cnciVnic1DupName string + // Try and create it again. Should return cached value + if cnciVnic, err := cn.CreateCnciVnic(cnciVnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 duplicate failed", err) + } else { + t.Log("VNIC1 duplicate created =", cnciVnic.LinkName) + cnciVnic1DupName = cnciVnic.LinkName + } + + if cnciVnic1Name != cnciVnic1DupName { + t.Error("ERROR: cn.CreateCnciVnic VNIC1 and VNIC1 Dup interface names do not match", cnciVnic1Name, cnciVnic1DupName) + } + t.Log("cn.CreateVnicV2 VNIC1 and VNIC1 Dup interface names", cnciVnic1Name, cnciVnic1DupName) + + cnciMac2, _ := net.ParseMAC("CA:FE:00:01:02:22") + cnciVnicCfg2 := &libsnnet.VnicConfig{ + VnicRole: libsnnet.DataCenter, + VnicMAC: cnciMac2, + VnicID: "vuuid2", + InstanceID: "iuuid2", + TenantID: "tuuid2", + } + + // Create and destroy a second VNIC + if cnciVnic, err := cn.CreateCnciVnic(cnciVnicCfg2); err != nil { + t.Error("ERROR: cn.CreateVnicV2 VNIC2 failed", err) + } else { + //Launcher will attach to this name + t.Log("VNIC2 created =", cnciVnic.LinkName) + } + if err := cn.DestroyCnciVnic(cnciVnicCfg2); err != nil { + t.Error("ERROR: cn.DestroyCnciVnic VNIC2 destroy attempt failed", err) + } + + // Destroy the first VNIC + if err := cn.DestroyCnciVnic(cnciVnicCfg); err != nil { + t.Error("ERROR: cn.DestroyCnciVnic VNIC1 failed", err) + } + + // Try and destroy it again - should work + if err := cn.DestroyCnciVnic(cnciVnicCfg); err != nil { + t.Error("ERROR: cn.DestroyCnciVnic VNIC1 duplicate destroy attempt failed", err) + } +} + +func validSsntpEvent(ssntpEvent *libsnnet.SsntpEventInfo, cfg *libsnnet.VnicConfig) error { + + //Note: Checking for non nil values just to ensure the caller called it with all + //parameters setup properly. + switch { + case ssntpEvent.ConcID != cfg.ConcID: + case ssntpEvent.ConcID == "": + + case ssntpEvent.CnciIP != cfg.ConcIP.String(): + case ssntpEvent.CnciIP == "": + + //case ssntpEvent.CnIP != has to be set by the caller + + case ssntpEvent.Subnet != cfg.Subnet.String(): + case ssntpEvent.Subnet == "": + + case ssntpEvent.SubnetKey != cfg.SubnetKey: + case ssntpEvent.SubnetKey == 0: + case ssntpEvent.SubnetKey == -1: + + case ssntpEvent.SubnetID != cfg.SubnetID: + case ssntpEvent.SubnetID == "": + + case ssntpEvent.TenantID != cfg.TenantID: + case ssntpEvent.TenantID == "": + default: + return nil + } + + return fmt.Errorf("SsntpEvent: fields do not match %v != %v", ssntpEvent, cfg) +} + +//Tests typical sequence of CN APIs +// +//This tests exercises the standard set of APIs that +//the launcher invokes when setting up a CN and creating +//VNICs. It check for duplicate VNIC creation, duplicate +//VNIC deletion +// +//Test is expected to pass +func TestCN_Base(t *testing.T) { + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + + _, net1, _ := net.ParseCIDR("127.0.0.0/24") //Add this so that init will pass + _, net2, _ := net.ParseCIDR("193.168.1.0/24") + cninit() + _, net3, _ := net.ParseCIDR(cnNetEnv) + + mgtNet := []net.IPNet{*net2} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + //Negative + if err := cn.Init(); err == nil { + t.Error("ERROR: cn.Init failed", err) + } + + //From YAML, on agent init + mgtNet = []net.IPNet{*net1, *net2, *net3} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + //From YAML on instance init + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + vnicCfg := &libsnnet.VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 100), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: mac, + Subnet: *net2, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + // Create a VNIC: Should create bridge and tunnels + var vnic1Name, vnic1DupName string + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 failed", err) + } else { + //We expect a bridge creation event + if ssntpEvent == nil { + t.Error("ERROR: cn.CreateVnicV2 expected event", vnic, ssntpEvent) + } + if ssntpEvent != nil { + //Check the fields of the ssntpEvent + if err := validSsntpEvent(ssntpEvent, vnicCfg); err != nil { + t.Errorf("ERROR: cn.CreateVnicV2 event population errror %v ", err) + } + if ssntpEvent.Event != libsnnet.SsntpTunAdd { + t.Error("ERROR: cn.CreateVnicV2 event population errror", vnic, ssntpEvent) + } + } + //Launcher will attach to this name and send out the event + t.Log("VNIC1 created =", vnic.LinkName, ssntpEvent) + vnic1Name = vnic.LinkName + } + + // Try and create it again. Should return cached value + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.CreateVnicV2 duplicate failed", err, ssntpEvent) + } else { + //We do not expect a bridge creation event + if ssntpEvent != nil { + t.Error("ERROR: cn.CreateVnicV2 duplicate unexpected event", vnic, ssntpEvent) + } + //Launcher will attach to this name and send out the event + t.Log("VNIC1 duplicate created =", vnic.LinkName, ssntpEvent) + vnic1DupName = vnic.LinkName + } + + if vnic1Name != vnic1DupName { + t.Error("ERROR: cn.CreateVnicV2 VNIC1 and VNIC2 interface names do not match", vnic1Name, vnic1DupName) + } + t.Log("cn.CreateVnicV2 VNIC1 and VNIC2 interface names", vnic1Name, vnic1DupName) + + mac2, _ := net.ParseMAC("CA:FE:00:01:02:22") + vnicCfg2 := &libsnnet.VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 2), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: mac2, + Subnet: *net2, + SubnetKey: 0xF, + VnicID: "vuuid2", + InstanceID: "iuuid2", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + // Create a second VNIC on the same tenant subnet + if vnic, ssntpEvent, _, err := cn.CreateVnicV2(vnicCfg2); err != nil { + t.Error("ERROR: cn.CreateVnicV2 VNIC2 failed", err, ssntpEvent) + } else { + //We do not expect a bridge creation event + if ssntpEvent != nil { + t.Error("ERROR: cn.CreateVnicV2 VNIC2 unexpected event", vnic, ssntpEvent) + } + //Launcher will attach to this name and send out the event + t.Log("VNIC2 created =", vnic.LinkName, ssntpEvent) + } + + if ssntpEvent, _, err := cn.DestroyVnicV2(vnicCfg2); err != nil { + if ssntpEvent != nil { + t.Error("ERROR: cn.DestroyVnicV2 VNIC2 unexpected event", err, ssntpEvent) + } + t.Error("ERROR: cn.DestroyVnicV2 VNIC2 destroy attempt failed", err) + } + + // Destroy the first VNIC - Deletes the bridge and tunnel + if ssntpEvent, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Error("ERROR: cn.DestroyVnicV2 VNIC1 failed", err) + } else { + //We expect a bridge deletion event + if ssntpEvent == nil { + t.Error("ERROR: cn.DestroyVnicV2 VNIC1 expected event") + } + if ssntpEvent != nil { + //Check the fields of the ssntpEvent + if err := validSsntpEvent(ssntpEvent, vnicCfg); err != nil { + t.Errorf("ERROR: cn.DestroyVnicV2 event population errror %v", err) + } + if ssntpEvent.Event != libsnnet.SsntpTunDel { + t.Error("ERROR: cn.DestroyVnicV2 event population errror", vnicCfg, ssntpEvent) + } + } + //Launcher will send this event out + t.Log("cn.Destroy VNIC1 ssntp event", ssntpEvent) + } + + // Try and destroy it again - should work + if ssntpEvent, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + if ssntpEvent != nil { + t.Error("ERROR: cn.DestroyVnicV2 VNIC1 duplicate unexpected event", err, ssntpEvent) + } + t.Error("ERROR: cn.DestroyVnicV2 VNIC1 duplicate destroy attempt failed", err) + } +} + +//Whitebox test the CN API +// +//This tests exercises tests the primitive operations +//that the CN API rely on. This is used to check any +//issues with the underlying netlink library or kernel +//This tests fails typically if the kernel or netlink +//implementation changes +// +//Test is expected to pass +func TestCN_Whitebox(t *testing.T) { + var instanceMAC net.HardwareAddr + var err error + + // Typical inputs in YAML from Controller + tenantUUID := "tenantUuid" + instanceUUID := "tenantUuid" + subnetUUID := "subnetUuid" + subnetKey := uint32(0xF) + concUUID := "concUuid" + //The IP corresponding to CNCI, maybe we can use DNS resolution here? + concIP := net.IPv4(192, 168, 1, 1) + //The IP corresponding to the VNIC that carries tenant traffic + cnIP := net.IPv4(127, 0, 0, 1) + if instanceMAC, err = net.ParseMAC("CA:FE:00:01:02:03"); err != nil { + t.Errorf("Invalid MAC address") + } + + // Create the CN tenant bridge only if it does not exist + bridgeAlias := fmt.Sprintf("br_%s_%s_%s", tenantUUID, subnetUUID, concUUID) + bridge, _ := libsnnet.NewBridge(bridgeAlias) + + if err := bridge.GetDevice(); err != nil { + // First instance to land, create the bridge and tunnel + if err := bridge.Create(); err != nil { + t.Errorf("Bridge creation failed: %v", err) + } + defer bridge.Destroy() + + // Create the tunnel to connect to the CNCI + local := cnIP + remote := concIP + + greAlias := fmt.Sprintf("gre_%s_%s_%s", tenantUUID, subnetUUID, concUUID) + gre, _ := libsnnet.NewGreTunEP(greAlias, local, remote, subnetKey) + + if err := gre.Create(); err != nil { + t.Errorf("GRE Tunnel Creation failed: %v", err) + } + defer gre.Destroy() + + if err := gre.Attach(bridge); err != nil { + t.Errorf("GRE Tunnel attach failed: %v", err) + } + + if err := gre.Enable(); err != nil { + t.Errorf("GRE Tunnel enable failed: %v", err) + } + + if err := bridge.Enable(); err != nil { + t.Errorf("Bridge enable failed: %v", err) + } + + // At this point connect to the CNCI and request creation + // of the other end of the tunnel + // This may take a while and may need backoff and retry + } + + // Create the VNIC for the instance + vnicAlias := fmt.Sprintf("vnic_%s_%s_%s_%s", tenantUUID, instanceUUID, instanceMAC, concUUID) + vnic, _ := libsnnet.NewVnic(vnicAlias) + vnic.MACAddr = &instanceMAC + + if err := vnic.Create(); err != nil { + t.Errorf("Vnic Create failed: %v", err) + } + defer vnic.Destroy() + + if err := vnic.Attach(bridge); err != nil { + t.Errorf("Vnic attach failed: %v", err) + } + + if err := vnic.Enable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := bridge.Enable(); err != nil { + t.Errorf("Bridge enable: %v", err) + } +} diff --git a/networking/libsnnet/cnci.go b/networking/libsnnet/cnci.go new file mode 100644 index 000000000..9e442df35 --- /dev/null +++ b/networking/libsnnet/cnci.go @@ -0,0 +1,570 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "fmt" + "net" + "strings" + "sync" + + "github.com/vishvananda/netlink" +) + +const ( + cnciTimeout = 5 +) + +// Cnci represents a Concentrator for a single tenant +// All subnets belonging to this tenant that are handled +// by this concentrator. A seperate bridge will be setup +// for each subnet with its own dnsmasq service +// Traffic is routable between tenant bridges +type Cnci struct { + *NetworkConfig + MgtAddr []netlink.Addr //TODO: Remove this and just use the link + MgtLink []netlink.Link + ComputeAddr []netlink.Addr //TODO: Remove this and just use the link + ComputeLink []netlink.Link + + ID string // UUID of the concentrator generated by the Controller + Tenant string // UUID of the tenant + + // IPAddress of the concentrator that is routable + // The UUID to IP mapping in this case has to be + // performed using the datacenter DHCP + IP net.IP + + // Public IPAddress this concentator is assigned + PublicIPs []net.IP + PublicIPMap map[string]net.IP //Key is public IPNet + + topology *cnciTopology +} + +//Network topology of the node +type cnciTopology struct { + sync.Mutex + linkMap map[string]*linkInfo //Alias to Link mapping + nameMap map[string]bool //Link name + bridgeMap map[string]*bridgeInfo +} + +func newCnciTopology() *cnciTopology { + return &cnciTopology{ + linkMap: make(map[string]*linkInfo), + nameMap: make(map[string]bool), + bridgeMap: make(map[string]*bridgeInfo), + } +} + +func reinitTopology(topology *cnciTopology) { + topology.linkMap = make(map[string]*linkInfo) + topology.nameMap = make(map[string]bool) + topology.bridgeMap = make(map[string]*bridgeInfo) +} + +type bridgeInfo struct { + tunnels int + *Dnsmasq +} + +func enableForwarding() error { + return nil +} + +//This will return error if it cannot find valid physical +//interfaces with IP addresses assigned +//This may be just a delay in acquiring IP addresses +func (cnci *Cnci) findPhyNwInterface() error { + + links, err := netlink.LinkList() + if err != nil { + return err + } + + phyInterfaces := 0 + cnci.MgtAddr = nil + cnci.MgtLink = nil + cnci.ComputeAddr = nil + cnci.ComputeLink = nil + + for _, link := range links { + + if link.Type() != "device" { + continue + } + + if link.Attrs().Name == "lo" { + continue + } + + addrs, err := netlink.AddrList(link, netlink.FAMILY_V4) + if err != nil || len(addrs) == 0 { + continue //Should be safe to ignore this + } + + phyInterfaces++ + + for _, addr := range addrs { + + if cnci.ManagementNet == nil { + cnci.MgtAddr = append(cnci.MgtAddr, addr) + cnci.MgtLink = append(cnci.MgtLink, link) + } else { + for _, mgt := range cnci.ManagementNet { + if mgt.Contains(addr.IPNet.IP) { + cnci.MgtAddr = append(cnci.MgtAddr, addr) + cnci.MgtLink = append(cnci.MgtLink, link) + } + } + } + + if cnci.ComputeNet == nil { + cnci.ComputeAddr = append(cnci.ComputeAddr, addr) + cnci.ComputeLink = append(cnci.ComputeLink, link) + } else { + for _, comp := range cnci.ComputeNet { + if comp.Contains(addr.IPNet.IP) { + cnci.ComputeAddr = append(cnci.ComputeAddr, addr) + cnci.ComputeLink = append(cnci.ComputeLink, link) + } + } + } + } + } + + if len(cnci.MgtAddr) == 0 { + return fmt.Errorf("unable to associate with management network %v", cnci.ManagementNet) + } + if len(cnci.ComputeAddr) == 0 { + return fmt.Errorf("unable to associate with compute network %v", cnci.ComputeNet) + } + + //Give a different error here so we do not retry + if (cnci.ManagementNet == nil || cnci.ComputeNet == nil) && phyInterfaces > 1 { + return fmt.Errorf("unable to autoconfigure network") + } + + return nil +} + +// Init sets the CNCI configuration +// Discovers the physical interfaces and classifies them as management or compute +// Performs any node specific networking setup. +func (cnci *Cnci) Init() error { + + if cnci.NetworkConfig == nil { + return fmt.Errorf("CNCI uninitalized") + } + + err := cnci.findPhyNwInterface() + if err != nil { + return err + } + + cnci.topology = newCnciTopology() + if err = cnci.RebuildTopology(); err != nil { + return err + } + + if err = enableForwarding(); err != nil { + return err + } + return nil +} + +//RebuildTopology CNCI network database using the information contained +//in the aliases. It can be called if the agent using the library +//crashes and loses network topology information. +//It can also be called, to rebuild the network topology on demand. +//TODO: Restarting the DNS Masq here - Define a re-attach method +//TODO: Log failures when making best effort progress - fmt.Printxxx +func (cnci *Cnci) RebuildTopology() error { + + if cnci.NetworkConfig == nil || cnci.topology == nil { + return fmt.Errorf("cnci not initialized") + } + + links, err := netlink.LinkList() + if err != nil { + return err + } + + cnci.topology.Lock() + defer cnci.topology.Unlock() + reinitTopology(cnci.topology) + + //Update the link and name map + //Do this to ensure the link map is updated even on failure + for _, link := range links { + alias := link.Attrs().Alias + name := link.Attrs().Name + + cnci.topology.nameMap[name] = true + + if alias == "" { + continue + } + cnci.topology.linkMap[alias] = &linkInfo{ + index: link.Attrs().Index, + name: name, + ready: make(chan struct{}), + } + close(cnci.topology.linkMap[alias].ready) + } + + //Create the bridge map + for _, link := range links { + if link.Type() != "bridge" { + continue + } + + bridgeID := link.Attrs().Alias + + if !strings.HasPrefix(bridgeID, bridgePrefix) { + continue + } + + br, err := NewBridge(bridgeID) + if err != nil { + return (err) + } + + if err = br.GetDevice(); err != nil { + return (err) + } + + subnet, err := stringToSubnet(strings.TrimPrefix(bridgeID, bridgePrefix)) + if err != nil { + return (err) + } + + dns, err := startDnsmasq(br, cnci.Tenant, *subnet) + if err != nil { + return (err) + } + + cnci.topology.bridgeMap[bridgeID] = &bridgeInfo{ + Dnsmasq: dns, + } + } + + //Ensure that all tunnels have the associated bridges + for _, link := range links { + if link.Type() != "gretap" { + continue + } + + gre := link.Attrs().Alias + if !strings.HasPrefix(gre, grePrefix) { + continue + } + + subnetID := strings.TrimPrefix(strings.Split(gre, "##")[0], grePrefix) + bridgeID := bridgePrefix + subnetID + + if _, ok := cnci.topology.linkMap[bridgeID]; !ok { + return fmt.Errorf("missing bridge for gre tunnel %s", gre) + } + + brInfo, ok := cnci.topology.bridgeMap[bridgeID] + if !ok { + return fmt.Errorf("missing bridge map for gre tunnel %s", gre) + } + brInfo.tunnels++ + } + + return nil +} + +func subnetToString(subnet net.IPNet) string { + return strings.Replace(subnet.String(), "/", "+", -1) +} + +func stringToSubnet(subnet string) (*net.IPNet, error) { + s := strings.Replace(subnet, "+", "/", -1) + _, ipNet, err := net.ParseCIDR(s) + return ipNet, err +} + +func genBridgeAlias(subnet net.IPNet) string { + return fmt.Sprintf("%s%s", bridgePrefix, subnetToString(subnet)) +} + +func genGreAlias(subnet net.IPNet, cnIP net.IP) string { + return fmt.Sprintf("%s%s##%s", grePrefix, subnetToString(subnet), cnIP.String()) +} + +func genLinkName(device interface{}, nameMap map[string]bool) (string, error) { + for i := 0; i < ifaceRetryLimit; { + name, _ := GenIface(device, false) + if !nameMap[name] { + nameMap[name] = true + return name, nil + } + } + return "", fmt.Errorf("Unable to generate unique device name") +} + +func startDnsmasq(bridge *Bridge, tenant string, subnet net.IPNet) (*Dnsmasq, error) { + dns, err := NewDnsmasq(bridge.GlobalID, tenant, subnet, 0, bridge) + if err != nil { + return nil, fmt.Errorf("NewDnsmasq failed %v", err) + } + + if _, err = dns.Attach(); err != nil { + err = dns.Restart() + if err != nil { + return nil, fmt.Errorf("dns.start failed %v", err) + } + } + return dns, nil +} + +func createCnciBridge(bridge *Bridge, brInfo *bridgeInfo, tenant string, subnet net.IPNet) (err error) { + if err = bridge.Create(); err != nil { + return err + } + if err = bridge.Enable(); err != nil { + return err + } + brInfo.Dnsmasq, err = startDnsmasq(bridge, tenant, subnet) + return err +} + +func createCnciTunnel(gre *GreTunEP) (err error) { + if err = gre.Create(); err != nil { + return err + } + if err = gre.Enable(); err != nil { + return err + } + return nil +} + +func checkInputParams(subnet net.IPNet, subnetKey int, cnIP net.IP) error { + switch { + case subnet.IP == nil: + return fmt.Errorf("Invalid input parameters - Subnet IP") + case subnet.Mask == nil: + return fmt.Errorf("Invalid input parameters - Subnet Mask") + case subnetKey == 0: + return fmt.Errorf("Invalid input parameters - Subnet Key") + case cnIP == nil: + return fmt.Errorf("Invalid input parameters - CN IP") + } + return nil +} + +//AddRemoteSubnet attaches a remote subnet to a local bridge on the CNCI +//If the bridge and DHCP server does not exist it will be created +//If the tunnel exists and the bridge does not exit the bridge is created +//The bridge name interface name is returned if the bridge is newly created +func (cnci *Cnci) AddRemoteSubnet(subnet net.IPNet, subnetKey int, cnIP net.IP) (string, error) { + + if err := checkInputParams(subnet, subnetKey, cnIP); err != nil { + return "", err + } + + bridge, err := NewBridge(genBridgeAlias(subnet)) + if err != nil { + return "", err + } + + gre, err := NewGreTunEP(genGreAlias(subnet, cnIP), cnci.ComputeAddr[0].IPNet.IP, cnIP, uint32(subnetKey)) + if err != nil { + return "", err + } + + // CS Start + cnci.topology.Lock() + bLink, brExists := cnci.topology.linkMap[bridge.GlobalID] + gLink, greExists := cnci.topology.linkMap[gre.GlobalID] + + if brExists && greExists { + cnci.topology.Unlock() + return bLink.name, err + } + + var brInfo *bridgeInfo + if !brExists { + if bridge.LinkName, err = genLinkName(bridge, cnci.topology.nameMap); err != nil { + cnci.topology.Unlock() + return "", err + } + + bLink = &linkInfo{ + name: bridge.LinkName, + ready: make(chan struct{}), + } + cnci.topology.linkMap[bridge.GlobalID] = bLink + brInfo = &bridgeInfo{} + cnci.topology.bridgeMap[bridge.GlobalID] = brInfo + } else { + var present bool + brInfo, present = cnci.topology.bridgeMap[bridge.GlobalID] + if !present { + cnci.topology.Unlock() + return "", fmt.Errorf("Internal error. Missing bridge info") + } + } + + if !greExists { + if gre.LinkName, err = genLinkName(gre, cnci.topology.nameMap); err != nil { + cnci.topology.Unlock() + return "", err + } + + gLink = &linkInfo{ + name: gre.LinkName, + ready: make(chan struct{}), + } + cnci.topology.linkMap[gre.GlobalID] = gLink + brInfo.tunnels++ + } + cnci.topology.Unlock() + //End CS + + var berr, gerr error + if !brExists { + berr = createCnciBridge(bridge, brInfo, cnci.Tenant, subnet) + bLink.index = bridge.Link.Index + close(bLink.ready) + } + + if !greExists { + gerr = createCnciTunnel(gre) + gLink.index = gre.Link.Index + close(gLink.ready) + } + + if berr != nil { + return "", berr + } + if gerr != nil { + return "", gerr + } + + if brExists { + bridge.LinkName, bridge.Link.Index, err = waitForDeviceReady(bLink) + if err != nil { + return "", fmt.Errorf("AddRemoteSubnet %s %v", bridge.GlobalID, err) + } + } + if greExists { + gre.LinkName, gre.Link.Index, err = waitForDeviceReady(gLink) + if err != nil { + return "", fmt.Errorf("AddRemoteSubnet %s %v", gre.GlobalID, err) + } + } + + err = gre.Attach(bridge) + if brExists { + return "", err + } + return bridge.LinkName, err + +} + +//DelRemoteSubnet detaches a remote subnet from the local bridge +//The bridge and DHCP server is kept around as they impose minimal overhead +//and helps in the case where instances keep getting added and deleted constantly +func (cnci *Cnci) DelRemoteSubnet(subnet net.IPNet, subnetKey int, cnIP net.IP) error { + + if err := checkInputParams(subnet, subnetKey, cnIP); err != nil { + return err + } + + bridgeID := genBridgeAlias(subnet) + + gre, err := NewGreTunEP(genGreAlias(subnet, cnIP), + cnci.ComputeAddr[0].IPNet.IP, + cnIP, uint32(subnetKey)) + + if err != nil { + return err + } + + // CS Start + cnci.topology.Lock() + defer cnci.topology.Unlock() + + gLink, present := cnci.topology.linkMap[gre.GlobalID] + + if !present { + //TODO: Log this and continue + fmt.Println("Deleting non existent tunnel ", gre.GlobalID) + return nil + } + + if brInfo, present := cnci.topology.bridgeMap[bridgeID]; !present { + //TODO: Log this and continue + fmt.Println("internal error bridge does not exist ", bridgeID) + } else { + brInfo.tunnels-- + } + + gre.LinkName, gre.Link.Index, err = waitForDeviceReady(gLink) + if err != nil { + return fmt.Errorf("AddRemoteSubnet %s %v", gre.GlobalID, err) + } + + delete(cnci.topology.nameMap, gre.GlobalID) + delete(cnci.topology.linkMap, gre.GlobalID) + err = gre.Destroy() + + return err +} + +//Shutdown stops all DHCP Servers. Tears down all links and tunnels +//It will continue even on encountering an error and perform as much +//cleanup as possible +func (cnci *Cnci) Shutdown() error { + var lasterr error + + for _, b := range cnci.topology.bridgeMap { + if b.Dnsmasq != nil { + if err := b.Dnsmasq.Stop(); err != nil { + lasterr = err + } + } else { + lasterr = fmt.Errorf("invalid dnsmasq %v", b) + } + } + + for alias, linfo := range cnci.topology.linkMap { + if linfo != nil { + //HACKING: Better to create the right type + vnic, err := NewVnic(alias) + if err != nil { + lasterr = err + continue + } + vnic.LinkName, vnic.Link.Attrs().Index, err = waitForDeviceReady(linfo) + if err != nil { + lasterr = err + continue + } + if err := vnic.Destroy(); err != nil { + lasterr = err + } + } + } + + return lasterr +} diff --git a/networking/libsnnet/cnci_test.go b/networking/libsnnet/cnci_test.go new file mode 100644 index 000000000..66382b205 --- /dev/null +++ b/networking/libsnnet/cnci_test.go @@ -0,0 +1,201 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// and a concentator instance CNCI. +// The CN code will be abstracted and presented as a API that can be used +// by the launcher to create a VNIC +// The CNCI code will be run within a CNCI daemon that listens to messages on +// on SNTP + +package libsnnet_test + +import ( + "fmt" + "net" + "os" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +var cnciNetEnv string + +func cnciinit() { + cnciNetEnv = os.Getenv("SNNET_ENV") + + if cnNetEnv == "" { + cnNetEnv = "10.3.66.0/24" + } +} + +//Tests all CNCI APIs +// +//Tests all operations typically performed on a CNCI +//Test includes adding and deleting a remote subnet +//Rebuild of the topology database (to simulate agent crash) +//It also tests the reset of the node to clean status +// +//Test should pass ok +func TestCNCI_Init(t *testing.T) { + cnci := &libsnnet.Cnci{} + + cnci.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cnci.ID = "cnciuuid" + + cnciinit() + _, net1, _ := net.ParseCIDR(cnNetEnv) + _, net2, _ := net.ParseCIDR("192.168.1.0/24") + + mgtNet := []net.IPNet{*net1, *net2} + cnci.ManagementNet = mgtNet + cnci.ComputeNet = mgtNet + + if err := cnci.Init(); err != nil { + t.Fatal(err) + } + + if err := cnci.RebuildTopology(); err != nil { + t.Fatal(err) + } + + if _, err := cnci.AddRemoteSubnet(*net2, 1234, net.ParseIP("192.168.0.102")); err != nil { + t.Error(err) + } + + if _, err := cnci.AddRemoteSubnet(*net2, 1234, net.ParseIP("192.168.0.103")); err != nil { + t.Error(err) + } + + if _, err := cnci.AddRemoteSubnet(*net2, 1234, net.ParseIP("192.168.0.104")); err != nil { + t.Error(err) + } + + if err := cnci.DelRemoteSubnet(*net2, 1234, net.ParseIP("192.168.0.102")); err != nil { + t.Error(err) + } + + if err := cnci.RebuildTopology(); err != nil { + t.Fatal(err) + } + + if _, err := cnci.AddRemoteSubnet(*net2, 1234, net.ParseIP("192.168.0.105")); err != nil { + t.Error(err) + } + + if err := cnci.DelRemoteSubnet(*net2, 1234, net.ParseIP("192.168.0.103")); err != nil { + t.Error(err) + } + + if err := cnci.DelRemoteSubnet(*net2, 1234, net.ParseIP("192.168.0.105")); err != nil { + t.Error(err) + } + + if err := cnci.DelRemoteSubnet(*net2, 1234, net.ParseIP("192.168.0.102")); err != nil { + t.Error(err) + } + if err := cnci.Shutdown(); err != nil { + t.Fatal(err) + } +} + +//Whitebox test case of CNCI API primitives +// +//This tests ensure that the lower level primitive +//APIs that the CNCI uses are still sane and function +//as expected. This test is expected to catch any +//issues due to change in the underlying libraries +//kernel features and applications (like dnsmasq, +//netlink) that the CNCI API relies on +//The goal of this test is to ensure we can rebase our +//depdencies and catch any dependency errors +// +//Test is expected to pass +func TestCNCI_Internal(t *testing.T) { + + // Typical inputs in YAML + tenantUUID := "tenantUuid" + concUUID := "concUuid" + cnUUID := "cnUuid" + subnetUUID := "subnetUuid" + subnetKey := uint32(0xF) + reserved := 10 + cnciIP := net.IPv4(127, 0, 0, 1) + subnet := net.IPNet{ + IP: net.IPv4(192, 168, 1, 0), + Mask: net.IPv4Mask(255, 255, 255, 0), + } + // +The DHCP configuration, MAC to IP mapping is another inputs + // This will be sent a-priori or based on design each time an instance is created + + // Create the CNCI aggregation bridge + bridgeAlias := fmt.Sprintf("br_%s_%s_%s", tenantUUID, subnetUUID, concUUID) + bridge, _ := libsnnet.NewBridge(bridgeAlias) + + if err := bridge.Create(); err != nil { + t.Errorf("Bridge creation failed: %v", err) + } + defer bridge.Destroy() + + if err := bridge.Enable(); err != nil { + t.Errorf("Bridge enable failed: %v", err) + } + + // Attach the DNS masq against the CNCI bridge. This gives it an IP address + d, err := libsnnet.NewDnsmasq(bridgeAlias, tenantUUID, subnet, reserved, bridge) + + if err != nil { + t.Errorf("DNS Masq New failed: %v", err) + } + + if err := d.Start(); err != nil { + t.Errorf("DNS Masq Start: %v", err) + } + defer d.Stop() + + // At this time the bridge is ready waiting for tunnels to be created + // The next step will happen each time a tenant bridge is created for + // this tenant on a CN + cnIP := net.IPv4(127, 0, 0, 1) + + // Wait on SNTP messages requesting tunnel creation + // Create a GRE tunnel that connects a tenant bridge to the CNCI bridge + // for that subnet. The CNCI will have many bridges one for each subnet + // the belongs to the tenant + greAlias := fmt.Sprintf("gre_%s_%s_%s", tenantUUID, subnetUUID, cnUUID) + local := cnciIP + remote := cnIP + key := subnetKey + + gre, _ := libsnnet.NewGreTunEP(greAlias, local, remote, key) + + if err := gre.Create(); err != nil { + t.Errorf("GRE Tunnel Creation failed: %v", err) + } + defer gre.Destroy() + + if err := gre.Attach(bridge); err != nil { + t.Errorf("GRE Tunnel attach failed: %v", err) + } + + if err := gre.Enable(); err != nil { + t.Errorf("GRE Tunnel enable failed: %v", err) + } +} diff --git a/networking/libsnnet/cncivnic.go b/networking/libsnnet/cncivnic.go new file mode 100644 index 000000000..804d1594c --- /dev/null +++ b/networking/libsnnet/cncivnic.go @@ -0,0 +1,189 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +// NewCnciVnic is used to initialize the CnciVnic properties +// This has to be called prior to Create() or GetDevice() +func NewCnciVnic(id string) (*CnciVnic, error) { + CnciVnic := &CnciVnic{} + CnciVnic.Link = &netlink.Macvtap{} + CnciVnic.Link.Mode = netlink.MACVLAN_MODE_VEPA + //TODO: Use bridge mode as default for now for easier integration + //CnciVnic.Link.Mode = netlink.MACVLAN_MODE_BRIDGE + //TODO: Figure out correct default + //Currently using the same as ip command + CnciVnic.Link.TxQLen = 500 + CnciVnic.GlobalID = id + return CnciVnic, nil +} + +// GetDevice is used to associate with an existing CnciVnic provided it satisfies +// the needs of a CnciVnic. Returns error if the CnciVnic does not exist +func (v *CnciVnic) GetDevice() error { + + if v.GlobalID == "" { + return netError(v, "getdevice unnamed cnci vnic") + } + + link, err := netlink.LinkByAlias(v.GlobalID) + if err != nil { + return netError(v, "getdevice interface does not exist: %v", v.GlobalID) + } + + vl, ok := link.(*netlink.Macvtap) + if !ok { + return netError(v, "getdevice incorrect interface type %v %v", v.GlobalID, link.Type()) + } + + if link.Type() != "macvtap" { + return netError(v, "getdevice incorrect interface type %v %v", v.GlobalID, link.Type()) + } + + v.LinkName = vl.Name + v.Link = vl + return nil +} + +// Create instantiates new vnic +func (v *CnciVnic) Create() error { + var err error + + if v.GlobalID == "" { + return netError(v, "create cannot create an unnamed cnci vnic") + } + + if v.LinkName == "" { + if v.LinkName, err = GenIface(v, true); err != nil { + return netError(v, "create geniface %v %v", v.GlobalID, err) + } + + if _, err := netlink.LinkByAlias(v.GlobalID); err == nil { + return netError(v, "create interface exists %v %v", v.GlobalID, err) + } + } + + v.Link.Name = v.LinkName + if v.Link.ParentIndex == 0 { + return netError(v, "create parent index not set %v %v", v.GlobalID, v.Link) + } + + if err := netlink.LinkAdd(v.Link); err != nil { + return netError(v, "create netlink.LinkAdd %v %v", v.GlobalID, err) + } + + link, err := netlink.LinkByName(v.LinkName) + if err != nil { + return netError(v, "create netlink.LinkAdd %v %v", v.GlobalID, err) + } + + vl, ok := link.(*netlink.Macvtap) + if !ok { + return netError(v, "create incorrect interface type %v %v", v.GlobalID, link.Type()) + } + + v.Link = vl + + if err := v.setAlias(v.GlobalID); err != nil { + v.Destroy() + return netError(v, "create set alias %v %v", v.GlobalID, err) + } + + if v.MACAddr != nil { + if err := v.setHardwareAddr(*v.MACAddr); err != nil { + v.Destroy() + return netError(v, "create set hardware addr %v %v %v", v.MACAddr.String(), v.GlobalID, err) + } + } + + return nil +} + +// Destroy a vnic +func (v *CnciVnic) Destroy() error { + + if v.Link == nil { + return netError(v, "destroy invalid Link: %v", v) + } + + if err := netlink.LinkDel(v.Link); err != nil { + return netError(v, "destroy link del %v", err) + } + + return nil + +} + +// Enable the vnic +func (v *CnciVnic) Enable() error { + + if v.Link == nil { + return netError(v, "enable invalid link: %v", v) + } + + if err := netlink.LinkSetUp(v.Link); err != nil { + return netError(v, "enable link up %v", err) + } + + return nil + +} + +// Disable the vnic +func (v *CnciVnic) Disable() error { + + if v.Link == nil { + return netError(v, "disable invalid link: %v", v) + } + + if err := netlink.LinkSetDown(v.Link); err != nil { + return netError(v, "disable link down %v", err) + } + + return nil +} + +func (v *CnciVnic) setAlias(alias string) error { + + if v.Link == nil { + return netError(v, "set alias vnic unnitialized") + } + + if err := netlink.LinkSetAlias(v.Link, alias); err != nil { + return netError(v, "set alias link set alias %v %v", alias, err) + } + + return nil +} + +func (v *CnciVnic) setHardwareAddr(hwaddr net.HardwareAddr) error { + + if v.Link == nil { + return netError(v, "set hw addr vnic unnitialized") + } + + if err := netlink.LinkSetHardwareAddr(v.Link, hwaddr); err != nil { + return netError(v, "set hwaddr %v %v", hwaddr.String(), err) + } + + return nil +} diff --git a/networking/libsnnet/cncivnic_test.go b/networking/libsnnet/cncivnic_test.go new file mode 100644 index 000000000..33beb0ab4 --- /dev/null +++ b/networking/libsnnet/cncivnic_test.go @@ -0,0 +1,202 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet_test + +import ( + "fmt" + "net" + "strings" + "testing" + + "github.com/vishvananda/netlink" + + "github.com/01org/ciao/networking/libsnnet" +) + +//Just pick the first physical interface with an IP +func getFirstPhyDevice() (int, error) { + + links, err := netlink.LinkList() + if err != nil { + return 0, err + } + + for _, link := range links { + + if link.Type() != "device" { + continue + } + + if link.Attrs().Name == "lo" { + continue + } + + addrs, err := netlink.AddrList(link, netlink.FAMILY_V4) + if err != nil || len(addrs) == 0 { + continue + } + + return link.Attrs().Index, nil + } + + return 0, fmt.Errorf("Unable to obtain physical device") + +} + +//Test CNCI VNIC primitives +// +//Tests all the primitives used to create a CNCI instance +//compatible vnic including create, enable, disable, destroy +// +//Test is expected to pass +func TestCnciVnic_Basic(t *testing.T) { + + cnciVnic, _ := libsnnet.NewCnciVnic("testcnciVnic") + + pIndex, err := getFirstPhyDevice() + + if err != nil { + t.Errorf("CnciVnic creation failed: %v", err) + } + + cnciVnic.Link.ParentIndex = pIndex + cnciVnic.Link.HardwareAddr, _ = net.ParseMAC("DE:AD:BE:EF:01:02") + + if err := cnciVnic.Create(); err != nil { + t.Errorf("CnciVnic creation failed: %v", err) + } + + if err := cnciVnic.Enable(); err != nil { + t.Errorf("CnciVnic enable failed: %v", err) + } + + if err := cnciVnic.Disable(); err != nil { + t.Errorf("CnciVnic enable failed: %v", err) + } + + if err := cnciVnic.Destroy(); err != nil { + t.Errorf("CnciVnic deletion failed: %v", err) + } + +} + +//Test duplicate creation +// +//Tests the creation of a duplicate interface is handled +//gracefully +// +//Test is expected to pass +func TestCnciVnic_Dup(t *testing.T) { + cnciVnic, _ := libsnnet.NewCnciVnic("testcnciVnic") + + pIndex, err := getFirstPhyDevice() + if err != nil { + t.Errorf("CnciVnic creation failed: %v", err) + } + cnciVnic.Link.ParentIndex = pIndex + + if err := cnciVnic.Create(); err != nil { + t.Errorf("CnciVnic creation failed: %v", err) + } + + defer cnciVnic.Destroy() + + cnciVnic1, _ := libsnnet.NewCnciVnic("testcnciVnic") + cnciVnic1.Link.ParentIndex = pIndex + + if err := cnciVnic1.Create(); err == nil { + t.Errorf("Duplicate CnciVnic creation: %v", err) + } + +} + +//Negative test cases +// +//Tests for graceful handling of various Negative +//primitive invocation scenarios +// +//Test is expected to pass +func TestCnciVnic_Invalid(t *testing.T) { + cnciVnic, err := libsnnet.NewCnciVnic("testcnciVnic") + + if err = cnciVnic.GetDevice(); err == nil { + t.Errorf("Non existent device: %v", cnciVnic) + } + if !strings.HasPrefix(err.Error(), "cncivnic error") { + t.Errorf("Invalid error format %v", err) + } + + if err = cnciVnic.Enable(); err == nil { + t.Errorf("Non existent device: %v", cnciVnic) + } + if !strings.HasPrefix(err.Error(), "cncivnic error") { + t.Errorf("Invalid error format %v", err) + } + + if err = cnciVnic.Disable(); err == nil { + t.Errorf("Non existent device: %v", cnciVnic) + } + if !strings.HasPrefix(err.Error(), "cncivnic error") { + t.Errorf("Invalid error format %v", err) + } + + if err = cnciVnic.Destroy(); err == nil { + t.Errorf("Non existent device: %v", cnciVnic) + } + if !strings.HasPrefix(err.Error(), "cncivnic error") { + t.Errorf("Invalid error format %v", err) + } + +} + +//Test ability to attach +// +//Tests that you can attach to an existing CNCI VNIC and +//perform all CNCI VNIC operations on the attached VNIC +// +//Test is expected to pass +func TestCnciVnic_GetDevice(t *testing.T) { + cnciVnic1, _ := libsnnet.NewCnciVnic("testcnciVnic") + + pIndex, err := getFirstPhyDevice() + if err != nil { + t.Errorf("CnciVnic creation failed: %v", err) + } + cnciVnic1.Link.ParentIndex = pIndex + + if err := cnciVnic1.Create(); err != nil { + t.Errorf("CnciVnic creation failed: %v", err) + } + + cnciVnic, _ := libsnnet.NewCnciVnic("testcnciVnic") + + if err := cnciVnic.GetDevice(); err != nil { + t.Errorf("CnciVnic Get Device failed: %v", err) + } + + if err := cnciVnic.Enable(); err != nil { + t.Errorf("CnciVnic enable failed: %v", err) + } + + if err := cnciVnic.Disable(); err != nil { + t.Errorf("CnciVnic enable failed: %v", err) + } + + if err := cnciVnic.Destroy(); err != nil { + t.Errorf("CnciVnic deletion failed: %v", err) + } +} diff --git a/networking/libsnnet/debug.go b/networking/libsnnet/debug.go new file mode 100644 index 000000000..931304dbd --- /dev/null +++ b/networking/libsnnet/debug.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "fmt" + + "github.com/vishvananda/netlink" +) + +// Show dumps as much information about the device as possible to stdout +func Show(name string) error { + + link, err := netlink.LinkByAlias(name) + + if err != nil { + fmt.Println("fetching by name") + link, err = netlink.LinkByName(name) + if err != nil { + return fmt.Errorf("interface name and alias does not exist: %v", name) + } + } + + switch t := link.(type) { + default: + fmt.Printf("Type: %v\n", t) + fmt.Printf("Link type: %v\n", link.Type()) + fmt.Println("Attributes :", link.Attrs()) + fmt.Println("Alias :", link.Attrs().Alias) + fmt.Println("Details : ", link) + } + + return nil +} diff --git a/networking/libsnnet/dnsmasq.go b/networking/libsnnet/dnsmasq.go new file mode 100644 index 000000000..507a903ba --- /dev/null +++ b/networking/libsnnet/dnsmasq.go @@ -0,0 +1,420 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "encoding/binary" + "errors" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "strconv" + "strings" + "syscall" +) + +//Paths for various configuration and status files +//TODO: Set these up to correct defaults +const ( + pidPath = "/tmp/" + leasePath = "/tmp/" + configPath = "/tmp/" + hostsPath = "/tmp/" + MACPrefix = "02:00" //Prefix for all private MAC addresses +// CONFIG_PATH = "/etc/" +// PID_PATH = "/var/run/" +) + +// Dnsmasq contains all the information required to spawn +// a dnsmasq process on behalf of a tenant on a concentrator +type Dnsmasq struct { + SubnetID string // UUID of the Tenant Subnet to which the dnsmasq supports + CNCIId string // UUID of the CNCI instance + TenantID string // UUID of the Tenant to which the CNCI belongs to + TenantNet net.IPNet // The tenant subnet served by this dnsmasq, has to be /29 or larger + ReservedIPs int // Reserve IP at the start of subnet + ConcIP net.IP // IP Address of the CNCI + IPMap map[string]*DhcpEntry // Static mac to IP map, key is macaddress + Dev *Bridge // The bridge on which dnsmasq will attach + MTU int // MTU that takes into account the tunnel overhead + DomainName string // Domain Name to be assigned to the subnet + + // Private fields + dhcpSize int + subnet net.IP // The DHCP addresses will be served from this subnet + gateway net.IPNet // The address of the bridge. Will also be default gw to the instances + start net.IP // First address in the DHCP range Skipping ReservedIPs + end net.IP // Last address in the DHCP range excluding broadcast + confFile string + pidFile string + leaseFile string + hostsFile string +} + +// NewDnsmasq initializes a new dnsmasq instance and attaches it to the specified bridge +// The dnsmasq object is initialized but no operations have been executed or files created +// This is a pure in-memory operation +func NewDnsmasq(id string, tenant string, subnet net.IPNet, reserved int, b *Bridge) (*Dnsmasq, error) { + if b == nil { + return nil, fmt.Errorf("invalid bridge") + } + + d := &Dnsmasq{ + SubnetID: id, + TenantID: tenant, + TenantNet: subnet, + ReservedIPs: reserved, + IPMap: make(map[string]*DhcpEntry), + Dev: b, + } + + if err := d.getFileConfiguration(); err != nil { + return nil, err + } + + if err := d.setMTU(); err != nil { + return nil, err + } + + if err := d.getSubnetConfiguration(); err != nil { + return nil, err + } + + return d, nil +} + +// Start the dnsmasq service +// This creates the actual files and performs configuration +func (d *Dnsmasq) Start() error { + if err := d.createConfigFile(); err != nil { + return fmt.Errorf("d.createConfigFile failed %v", err) + } + + if err := d.createHostsFile(); err != nil { + return fmt.Errorf("d.createHostsFile failed %v", err) + } + + if err := d.Dev.AddIP(&d.gateway); err != nil { + d.Dev.DelIP(&d.gateway) //TODO: check it already has the IP + if err = d.Dev.AddIP(&d.gateway); err != nil { + return fmt.Errorf("d.Dev.AddIP failed %v %v", err, d.gateway.String()) + } + } + + if err := d.launch(); err != nil { + return fmt.Errorf("d.launch failed %v", err) + } + + return nil +} + +// Attach to an existing service +// Returns -1 and error on failure +// Returns pid of current process on success +func (d *Dnsmasq) Attach() (int, error) { + pid, err := d.getPid() + + if err != nil { + return -1, fmt.Errorf("No pid file %v", err) + } + + if err = syscall.Kill(pid, syscall.Signal(0)); err != nil { + return -1, fmt.Errorf("Process does not exist or unable to attach %v", err) + } + return pid, nil +} + +// Stop the dnsmasq service +func (d *Dnsmasq) Stop() error { + var cumError []error + + pid, err := d.Attach() + + if err != nil { + cumError = append(cumError, fmt.Errorf("Process does not exist %v", err)) + } + + if pid != -1 { + if err = syscall.Kill(pid, syscall.SIGKILL); err != nil { //TODO: Try TERM + cumError = append(cumError, fmt.Errorf("Unable to kill dnsmasq %v", err)) + } else { + if err := os.Remove(d.pidFile); err != nil { + cumError = append(cumError, fmt.Errorf("Unable to delete file %v %v", d.pidFile, err)) + } + } + } + + if err = d.Dev.DelIP(&d.gateway); err != nil { + cumError = append(cumError, fmt.Errorf("Unable to delete bridge IP %v", err)) + } + + if err = os.Remove(d.confFile); err != nil { + cumError = append(cumError, fmt.Errorf("Unable to delete file %v %v", d.confFile, err)) + } + if err = os.Remove(d.hostsFile); err != nil { + cumError = append(cumError, fmt.Errorf("Unable to delete file %v %v", d.hostsFile, err)) + } + os.Remove(d.leaseFile) + + if cumError != nil { + allErrors := "" + for _, e := range cumError { + allErrors = allErrors + e.Error() + } + return errors.New(allErrors) + } + + return nil +} + +// Restart will stop and restart a new instance of dnsmasq +func (d *Dnsmasq) Restart() error { + d.Stop() //Ignore any errors + + if err := d.Start(); err != nil { + return fmt.Errorf("d.Start failed %v", err) + } + return nil +} + +// Reload is called to update the configuration of the dnsmasq +// service. It is typically called when its configuration is updated +func (d *Dnsmasq) Reload() error { + + pid, err := d.Attach() + if err != nil { + return err + } + + if err = d.getSubnetConfiguration(); err != nil { + return fmt.Errorf("Unable to get subnet configuration %v", err) + } + + //Note: This file will not take effect. Update it anyway + if err = d.createConfigFile(); err != nil { + return fmt.Errorf("Unable to delete config file %v", err) + } + if err = d.createHostsFile(); err != nil { + return fmt.Errorf("Unable to delete hosts file %v", err) + } + if err = syscall.Kill(pid, syscall.SIGHUP); err != nil { + return fmt.Errorf("Unable to reload/SIGHUP dnsmasq %v", err) + } + return nil +} + +// AddDhcpEntry adds/updates a DHCP mapping. Typically invoked when a new +// instance is added to the subnet served by this dnsmasq service. +// Reload() has to be invoked to activate this entry is the service is already +// running +func (d *Dnsmasq) AddDhcpEntry(entry *DhcpEntry) error { + d.IPMap[entry.MACAddr.String()] = entry + return nil +} + +// Populates the file specific private variables +func (d *Dnsmasq) getFileConfiguration() error { + + if d.SubnetID == "" { + return fmt.Errorf("invalid configuration %v", d) + } + + d.pidFile = fmt.Sprintf("%sdnsmasq_%s.pid", pidPath, d.SubnetID) + d.confFile = fmt.Sprintf("%sdnsmasq_%s.conf", configPath, d.SubnetID) + d.leaseFile = fmt.Sprintf("%sdnsmasq_%s.leases", leasePath, d.SubnetID) + d.hostsFile = fmt.Sprintf("%sdnsmasq_%s.hosts", hostsPath, d.SubnetID) + + return nil +} + +// Populates the subnet specific private variables +func (d *Dnsmasq) getSubnetConfiguration() error { + + // We need at least 2 IPs to work + // One for the bridge and one for the tenant + ones, bits := d.TenantNet.Mask.Size() + if bits != 32 || ones > 30 || ones == 0 { + return fmt.Errorf("invalid subnet %s", d.TenantNet.String()) + } + subnetSize := ^(^0 << uint32(32-ones)) + 1 + + // We need at least one IP for DHCP + // 3 are reserved for subnet, gateway, and broadcast (subnet i.e. .0 can be + // used but is currently is not due to legacy convention) + if d.dhcpSize = subnetSize - d.ReservedIPs - 3; d.dhcpSize <= 0 { + return fmt.Errorf("invalid reservation %s %v", d.TenantNet.String(), d.ReservedIPs) + } + + //No deep copy implementation in net.IP + //Mask is the closest to a deep copy + //TODO Implement deep copy + d.subnet = d.TenantNet.IP.To4().Mask(d.TenantNet.Mask) + if d.subnet == nil { + return fmt.Errorf("invalid subnet") + } + + d.gateway.IP = d.TenantNet.IP.To4().Mask(d.TenantNet.Mask) + d.gateway.Mask = d.TenantNet.Mask + d.start = d.TenantNet.IP.To4().Mask(d.TenantNet.Mask) + d.end = d.TenantNet.IP.To4().Mask(d.TenantNet.Mask) + //End Hack + + //Skip the network address + d.gateway.IP[3]++ + + //Designate the first IP after network, gateway and reserved range + startU32 := binary.BigEndian.Uint32(d.start) + startU32 += uint32(2 + d.ReservedIPs) + binary.BigEndian.PutUint32(d.start, startU32) + + endU32 := binary.BigEndian.Uint32(d.end) + endU32 += startU32 + uint32(d.dhcpSize) + binary.BigEndian.PutUint32(d.end, endU32) + + //Generate all valid IPs in this subnet and pre-assign a MAC address + for i := 0; i < d.dhcpSize; i++ { + vIP := make(net.IP, net.IPv4len) + binary.BigEndian.PutUint32(vIP, startU32+uint32(i)) + + //last 4 bytes will directly map to the desired IP address + macStr := fmt.Sprintf("%s:%02x:%02x:%02x:%02x", MACPrefix, vIP[0], vIP[1], vIP[2], vIP[3]) + macAddr, err := net.ParseMAC(macStr) + if err != nil { + return err + } + + dhcpEntry := &DhcpEntry{ + MACAddr: macAddr, + IPAddr: vIP, + } + + if err := d.AddDhcpEntry(dhcpEntry); err != nil { + return err + } + } + + return nil +} + +func (d *Dnsmasq) createHostsFile() error { + file, err := os.Create(d.hostsFile) + if err != nil { + return err + } + defer file.Close() + + for _, e := range d.IPMap { + s := fmt.Sprintf("%s,%s", e.MACAddr, e.IPAddr) + if e.Hostname != "" { + s = fmt.Sprintf("%s,%s", s, e.Hostname) + } + s = fmt.Sprintf("%s,id:*\n", s) + if _, err := file.WriteString(s); err != nil { + return err + } + } + + file.Sync() + return nil +} + +func (d *Dnsmasq) createConfigFile() error { + params := make([]string, 20) + + if d.Dev == nil { + return fmt.Errorf("bridge nil") + } + + if d.Dev.LinkName == "" { + return fmt.Errorf("bridge uninitialized") + } + + params = append(params, fmt.Sprintf("pid-file=%s\n", d.pidFile)) + params = append(params, fmt.Sprintf("dhcp-leasefile=%s\n", d.leaseFile)) + params = append(params, fmt.Sprintf("dhcp-hostsfile=%s\n", d.hostsFile)) + //params = append(params, "strict-order\n") + //params = append(params, "expand-hosts\n") + if d.DomainName != "" { + params = append(params, "domain=%s\n", d.DomainName) + } + params = append(params, "domain-needed\n") + params = append(params, "bogus-priv\n") + params = append(params, "bind-interfaces\n") + params = append(params, fmt.Sprintf("interface=%s\n", d.Dev.LinkName)) + params = append(params, "except-interface=lo\n") + params = append(params, "dhcp-no-override\n") + params = append(params, "dhcp-ignore=tag!known\n") + params = append(params, fmt.Sprintf("listen-address=%s\n", d.gateway.IP.String())) + params = append(params, fmt.Sprintf("dhcp-range=%s,static\n", d.subnet.String())) + params = append(params, fmt.Sprintf("dhcp-lease-max=%d\n", d.dhcpSize)) + params = append(params, fmt.Sprintf("dhcp-option-force=26,%d\n", d.MTU)) + //params = append(params, "log-dhcp\n") + + file, err := os.Create(d.confFile) + if err != nil { + return fmt.Errorf("Unable to create file %v %v", d.confFile, err) + } + defer file.Close() + + for _, s := range params { + if _, err := file.WriteString(s); err != nil { + return err + } + } + + file.Sync() + + return nil +} + +func (d *Dnsmasq) launch() error { + prog := "dnsmasq" + args := fmt.Sprintf("--conf-file=%s", d.confFile) + + cmd := exec.Command(prog, args) + _, err := cmd.Output() + + if err != nil { + return err + } + return nil +} + +func (d *Dnsmasq) getPid() (int, error) { + + pidbytes, err := ioutil.ReadFile(d.pidFile) + if err != nil { + return -1, err + } + + //TODO: Check against the kernel.pid_max + pidStr := strings.Trim(string(pidbytes), "\n") + pid, err := strconv.ParseUint(pidStr, 10, 32) + if err != nil { + return -1, err + } + + return int(pid), nil +} + +func (d *Dnsmasq) setMTU() error { + // TODO: Setup MTU based on tunnel type + d.MTU = 1400 + return nil +} diff --git a/networking/libsnnet/dnsmasq_test.go b/networking/libsnnet/dnsmasq_test.go new file mode 100644 index 000000000..e69018fc9 --- /dev/null +++ b/networking/libsnnet/dnsmasq_test.go @@ -0,0 +1,226 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet_test + +import ( + "io/ioutil" + "net" + "strconv" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +//Test normal operation DCHP/DNS server setup for a CNCI +// +//This test created a bridge, assigns an IP to it, attaches +//a bridge local dnsmasq process to serve DHCP and DNS on this +//brigde. It also tests for reload of the dnsmasq, stop and +//restart +// +//Test is expected to pass +func TestDnsmasq_Basic(t *testing.T) { + + id := "concuuid" + tenant := "tenantuuid" + reserved := 0 + subnet := net.IPNet{ + IP: net.IPv4(192, 168, 1, 0), + Mask: net.IPv4Mask(255, 255, 255, 0), + } + + bridge, _ := libsnnet.NewBridge("dns_testbr") + + if err := bridge.Create(); err != nil { + t.Errorf("Bridge creation failed: %v", err) + } + defer bridge.Destroy() + + d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge) + if err != nil { + t.Errorf("DNS Masq New failed: %v", err) + } + + if len(d.IPMap) != (256 - reserved - 3) { + t.Errorf("Incorrect subnet calculation") + } + + if err := d.Start(); err != nil { + t.Errorf("DNS Masq Start: %v", err) + } + + if err := d.Reload(); err != nil { + t.Errorf("DNS Masq Reload: %v", err) + } + + if err := d.Restart(); err != nil { + t.Errorf("DNS Masq Restart: %v", err) + } + + if err := d.Stop(); err != nil { + t.Errorf("DNS Masq Stop: %v", err) + } + + if err := d.Restart(); err != nil { + t.Errorf("DNS Masq Restart: %v", err) + } + + if err := d.Reload(); err != nil { + t.Errorf("DNS Masq Reload: %v", err) + } + + if err := d.Stop(); err != nil { + t.Errorf("DNS Masq Stop: %v", err) + } + +} + +//Dnsmasq negative test cases +// +//Tests that error conditions are handled gracefully +//Checks that duplicate subnet creation is handled properly +//Note: This test needs improvement +// +//Test is expected to pass +func TestDnsmasq_Negative(t *testing.T) { + + id := "concuuid" + tenant := "tenantuuid" + reserved := 10 + subnet := net.IPNet{ + IP: net.IPv4(192, 168, 1, 0), + Mask: net.IPv4Mask(255, 255, 255, 0), + } + + bridge, _ := libsnnet.NewBridge("dns_testbr") + + if err := bridge.Create(); err != nil { + t.Errorf("Bridge creation failed: %v", err) + } + defer bridge.Destroy() + + // Note: Reinstantiate d each time as that + // is how it will be used + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if err := d.Start(); err != nil { + t.Errorf("DNS Masq Start: %v", err) + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + + //Attach should work + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if pid, err := d.Attach(); err != nil { + t.Errorf("DNS Masq attach should not have failed %v", err) + } else { + t.Logf("attached to pid %v", pid) + pidStr := strconv.Itoa(pid) + fileName := "/proc/" + pidStr + "/cmdline" + contents, err := ioutil.ReadFile(fileName) + t.Logf("File [%v] has %v %v", fileName, string(contents), err) + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + + //Restart should work + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if err := d.Restart(); err != nil { + t.Errorf("DNS Masq Restart failed: %v", err) + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + + //Reload should work + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if err := d.Reload(); err != nil { + t.Errorf("DNS Masq Reload failed: %v", err) + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + + // Duplicate creation + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if err := d.Start(); err == nil { + t.Errorf("DNS Masq Started twice") + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + + // Stop it + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if err := d.Stop(); err != nil { + t.Errorf("DNS Masq Stop: %v", err) + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + + //Attach should fail + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if pid, err := d.Attach(); err == nil { + t.Errorf("DNS Masq attach should have failed %v", pid) + pidStr := strconv.Itoa(pid) + fileName := "/proc/" + pidStr + "/cmdline" + contents, err := ioutil.ReadFile(fileName) + t.Errorf("File [%v] has %v %v", fileName, string(contents), err) + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + + //Stop should fail + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if err := d.Stop(); err == nil { + t.Errorf("DNS Masq Stop should fail") + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + + //Reload should fail + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if err := d.Reload(); err == nil { + t.Errorf("DNS Masq Reload should have failed") + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + + //Restart should not fail + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if err := d.Restart(); err != nil { + t.Errorf("DNS Masq Restart should have failed %v", err) + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + + // Stop it + if d, err := libsnnet.NewDnsmasq(id, tenant, subnet, reserved, bridge); err == nil { + if err := d.Stop(); err != nil { + t.Errorf("DNS Masq Stop failed: %v", err) + } + } else { + t.Errorf("DNS Masq New failed: %v", err) + } + +} diff --git a/networking/libsnnet/docker_database.go b/networking/libsnnet/docker_database.go new file mode 100644 index 000000000..f0071a342 --- /dev/null +++ b/networking/libsnnet/docker_database.go @@ -0,0 +1,218 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "bytes" + "encoding/gob" + "fmt" + "os" + "path" + "time" + + "github.com/boltdb/bolt" + "github.com/golang/glog" +) + +const ( + dbFile = "docker_plugin.db" +) + +type dockerBoltDB struct { + Name string + DB *bolt.DB +} + +func newDockerBoltDb() *dockerBoltDB { + return &dockerBoltDB{ + Name: "docker_bolt.DB", + } +} + +type dbProvider dockerBoltDB + +//NewDockerBoltDBProvider returns a bolt based database that conforms +//to the DockerDBProvider interface +func NewDockerBoltDBProvider() DockerDBProvider { + return (*dbProvider)(newDockerBoltDb()) +} + +func (db *dbProvider) DbInit(dbDir string) error { + + if err := os.MkdirAll(dbDir, 0755); err != nil { + return fmt.Errorf("Unable to create db directory (%s) %v", dbDir, err) + } + + dbPath := path.Join(dbDir, dbFile) + + options := bolt.Options{ + Timeout: 3 * time.Second, + } + + var err error + db.DB, err = bolt.Open(dbPath, 0644, &options) + if err != nil { + return fmt.Errorf("initDb failed %v", err) + } + + return err +} + +func (db *dbProvider) DbClose() error { + return db.DB.Close() +} + +//TODO: There must be a better way to do this (besides reflection) +func (db *dbProvider) DbMapRebuild(table string, dockerMap interface{}) error { + tables := []string{table} + if err := db.DbTableInit(tables); err != nil { + return fmt.Errorf("dbInit failed %v", err) + } + + switch dmap := dockerMap.(type) { + case *DockerNwMap: + dmap.m = make(map[string]*DockerNwVal) + case *DockerEpMap: + dmap.m = make(map[string]*DockerEpVal) + default: + return fmt.Errorf("error: invalid map type %T", dmap) + } + + err := db.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(table)) + + b.ForEach(func(k, v []byte) error { + vr := bytes.NewReader(v) + + switch dmap := dockerMap.(type) { + case *DockerNwMap: + val := &DockerNwVal{} + if err := gob.NewDecoder(vr).Decode(val); err != nil { + return fmt.Errorf("Decode Error: %v %v %v", string(k), string(v), err) + } + glog.Infof("%v key=%v, value=%v\n", table, string(k), val) + + dmap.m[string(k)] = val + case *DockerEpMap: + val := &DockerEpVal{} + if err := gob.NewDecoder(vr).Decode(val); err != nil { + return fmt.Errorf("Decode Error: %v %v %v", string(k), string(v), err) + } + glog.Infof("%v key=%v, value=%v\n", table, string(k), val) + dmap.m[string(k)] = val + } + + return nil + }) + return nil + }) + return err +} + +func (db *dbProvider) DbTableInit(tables []string) (err error) { + + glog.Infof("dbInit Tables := %v", tables) + for i, v := range tables { + glog.Infof("table[%v] := %v, %v", i, v, []byte(v)) + } + + err = db.DB.Update(func(tx *bolt.Tx) error { + for _, table := range tables { + _, err := tx.CreateBucketIfNotExists([]byte(table)) + if err != nil { + return fmt.Errorf("Bucket creation error: %v %v", table, err) + } + } + return nil + }) + + if err != nil { + glog.Errorf("Table creation error %v", err) + } + + return err +} + +func (db *dbProvider) DbAdd(table string, key string, value interface{}) (err error) { + + err = db.DB.Update(func(tx *bolt.Tx) error { + var v bytes.Buffer + + if err := gob.NewEncoder(&v).Encode(value); err != nil { + glog.Errorf("Encode Error: %v %v", err, value) + return err + } + + bucket := tx.Bucket([]byte(table)) + if bucket == nil { + return fmt.Errorf("Bucket %v not found", table) + } + + err = bucket.Put([]byte(key), v.Bytes()) + if err != nil { + return fmt.Errorf("Key Store error: %v %v %v %v", table, key, value, err) + } + return nil + }) + + return err +} + +func (db *dbProvider) DbDelete(table string, key string) (err error) { + + err = db.DB.Update(func(tx *bolt.Tx) error { + + bucket := tx.Bucket([]byte(table)) + if bucket == nil { + return fmt.Errorf("Bucket %v not found", table) + } + + err = bucket.Delete([]byte(key)) + if err != nil { + return fmt.Errorf("Key Delete error: %v %v ", key, err) + } + return nil + }) + + return err +} + +func (db *dbProvider) DbGet(table string, key string) (value interface{}, err error) { + + err = db.DB.View(func(tx *bolt.Tx) error { + + bucket := tx.Bucket([]byte(table)) + if bucket == nil { + return fmt.Errorf("Bucket %v not found", table) + } + + val := bucket.Get([]byte(key)) + if val == nil { + return nil + } + + v := bytes.NewReader(val) + if err := gob.NewDecoder(v).Decode(value); err != nil { + glog.Errorf("Decode Error: %v %v %v", table, key, err) + return err + } + + return nil + }) + + return value, err +} diff --git a/networking/libsnnet/docker_plugin.go b/networking/libsnnet/docker_plugin.go new file mode 100644 index 000000000..0f2f84a10 --- /dev/null +++ b/networking/libsnnet/docker_plugin.go @@ -0,0 +1,831 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path" + "strings" + "sync" + "time" + + "github.com/docker/distribution/uuid" + "github.com/docker/libnetwork/drivers/remote/api" + ipamapi "github.com/docker/libnetwork/ipams/remote/api" + "github.com/golang/glog" + "github.com/gorilla/mux" + "github.com/tylerb/graceful" +) + +/* +The plugin acts as a slave to the ciao networking framework. +The goal here is to do no work in the plugin except inform the docker +daemon about the veth interface it needs to place inside the container + +Hence the real flow will be as follows + +0. Laucher creates and starts the DockerPlugin thread + +Note: The launcher should be launched prior to the docker daemon. + Also we need to configure docker daemon to not create its default + bridge and host networks as they cause problems. + +1. Launcher gets a request to launch a container + The request from the Controller to launcher already has the following + information (IP Address, MAC Address and subnet for the VNIC) + Note: Based on the current ciao design the gateway for the + subnet can be inferred. + +2. Launcher invokes ciao networking to create a Container Vnic + +3. ciao Networking + a. Creates a veth pair + b. Assigns the macaddress to the container side of veth pair + c. Attaches the veth to the tenant bridge (creating it if needed) + d. Returns the fully configured docker side veth pair to Launcher + e. Also returns if the subnet needs to be created + (Note: This is the docker logical subnet) + +4. (Launcher) if a subnet creation request was returned. Uses docker API + or command line to + docker network create -d=ciao + --ipam-driver=ciao + --subnet= + --gateway= + --opt "bridge"= + subnet.Name + + Note: Our custom IPAM driver is needed only when we start using overlapping + subnets between tenants. Otherwise the default IPAM driver meets our needs. + + + Note: Fully speccing the network creation and handing control to the + ciao driver (-d) makes docker a passthro for networking. + Note: The docker IPAM seems to try to setup its own gateway. WHY? + + In the future any more information we need can also be sent as more + options. e.g. + --opt "cnci"= + + + - This in turn will result in a callback to our HTTP plugin. + We will just return success as we are already aware of this + (Yes. We call docker and docker calls us back) + +5. (Launcher) will then request docker to create & launch the container, + again fully specifing the networking configuration. + + docker run -it --net= --ip= --mac-address= busybox + + WARNING. WARNING: There is a bug in the latest docker 1.10.03 (which has been fixed + in the 1.11 dev version) which does not pass the --ip parameter to the + remote IPAM plugin. Without this we cannot use our IPAM driver + +6. The ciao docker plugin acts as both a network and IPAM remote plugin. + It handles all the requests. Some of the more imporant ones are + a. EndPointCreate: If the container is being created for the first time + As we have already created the VNIC, we only need to cache the endpoint id to instance map + b. Join: When the end point is being placed inside the container + On Join the plugin will return back to docker the following information + - name of the veth pair to place within the container + - the ethernet device name prefix to be assigned to the logic interface + within the container (e.g. eth or eno) + - the default gw for the container + - any other static routes to be added within the container (if needed) + + Note: We will delete only when the launcher tells us to tear down networking. + Not when docker logically tears down the network. + +7. The docker daemon will use the values sent back by the plugin to launch the container + Move the veth into the docker container and give it the logical name. + Setup the IP address and gateway + +*/ + +//DockerPluginCfg controls plugin attributes +//these may be overidden by the caller if needed +var DockerPluginCfg = struct { + Name string + Dir string + Addr string + DataDir string + Timeout time.Duration +}{ + Name: "ciao", + Dir: "/etc/docker/plugins", + Addr: "127.0.0.1:9999", + DataDir: "/var/lib/ciao/networking", + Timeout: 1 * time.Second, +} + +// A DockerDBProvider represents a persistent data base provider +// that can be used by the DockerPlugin to store its internal state +type DockerDBProvider interface { + //Initializes the Database + DbInit(dir string) error + //Populates the DockerPlugin cache from the database + DbMapRebuild(table string, dockerMap interface{}) error + //Closes the database + DbClose() error + //Creates the tables if the tables do not already exist in the database + DbTableInit(tables []string) error + //Adds the key value pair to the table + DbAdd(table string, key string, value interface{}) error + //Adds the key value pair to the table + DbDelete(table string, key string) error + //Retrives the value corresponding to the key from the table + DbGet(table string, key string) (interface{}, error) +} + +//DockerEpVal stores ciao VNIC info for a particular docker endpoint +type DockerEpVal struct { + ID string + IP string + Cveth string + Hveth string +} + +//DockerNwVal stores ciao CN tenant bridge mapping +type DockerNwVal struct { + Bridge string + Gateway net.IPNet +} + +const ( + tableNetworkMap = "NetworkMap" + tableEndPointMap = "EndPointMap" +) + +//DockerEpMap maintains the Endpoint UUID to ciao VNIC mapping +type DockerEpMap struct { + sync.Mutex + m map[string]*DockerEpVal //index: Docker End Point UUID +} + +//DockerNwMap maintains the Docker Network UUID to ciao Network mappings +type DockerNwMap struct { + sync.Mutex + m map[string]*DockerNwVal //index: Docker Network UUID +} + +// DockerPlugin describes a single instance of a docker plugin +// In the current design the plugin acts as an IPAM and Network Plugin +type DockerPlugin struct { + DockerDBProvider //Database used to persist the Docker to ciao Mapping + //This is needed as the Docker Daemon and ciao have + //different lifecycles and UUIDs + *mux.Router + *graceful.Server + DockerEpMap + DockerNwMap +} + +func sendResponse(resp interface{}, w http.ResponseWriter) error { + rb, err := json.Marshal(resp) + glog.Infof("Sending response := %v, %v", resp, err) + fmt.Fprintf(w, "%s", rb) + return err +} + +func getBody(r *http.Request) ([]byte, error) { + body, err := ioutil.ReadAll(r.Body) + glog.Infof("URL [%s] Body [%s] Error [%v]", r.URL.Path[1:], string(body), err) + return body, err +} + +func handler(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + body, _ := getBody(r) + resp := api.Response{} + resp.Err = "Unhandled API request " + string(r.URL.Path[1:]) + " " + string(body) + sendResponse(resp, w) +} + +func handlerPluginActivate(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + getBody(r) + //TODO: Where is this encoding? + resp := `{ + "Implements": ["NetworkDriver", "IpamDriver"] +}` + fmt.Fprintf(w, "%s", resp) +} + +func handlerGetCapabilities(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + getBody(r) + resp := api.GetCapabilityResponse{Scope: "local"} + sendResponse(resp, w) +} + +func handlerCreateNetwork(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.CreateNetworkResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.CreateNetworkRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + v, ok := req.Options["com.docker.network.generic"].(map[string]interface{}) + if !ok { + resp.Err = "Error: network options incorrect or unspecified. Please provide bridge info" + sendResponse(resp, w) + return + } + + bridge, ok := v["bridge"].(string) + if !ok { + resp.Err = "Error: network incorrect or unspecified. Please provide bridge info" + sendResponse(resp, w) + return + } + + d.DockerNwMap.Lock() + defer d.DockerNwMap.Unlock() + + /* Record the docker network UUID to ciao bridge mapping + This has to survive a plugin crash/restart and needs to be persisted + */ + d.DockerNwMap.m[req.NetworkID] = &DockerNwVal{ + Bridge: bridge, + Gateway: *req.IPv4Data[0].Gateway, + } + + if err := d.DbAdd(tableNetworkMap, req.NetworkID, d.DockerNwMap.m[req.NetworkID]); err != nil { + glog.Errorf("Unable to update db %v", err) + } + + sendResponse(resp, w) +} + +func handlerDeleteNetwork(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.DeleteNetworkResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.DeleteNetworkRequest{} + if err = json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + glog.Infof("Delete Network := %v", req.NetworkID) + + /* Actual network delete would have already been done in ciao + Remove the UUID to bridge mapping in cache and in the + persistent data store + */ + d.DockerNwMap.Lock() + bridge := d.DockerNwMap.m[req.NetworkID].Bridge + delete(d.DockerNwMap.m, req.NetworkID) + if err := d.DbDelete(tableNetworkMap, req.NetworkID); err != nil { + glog.Errorf("Unable to update db %v %v", bridge, err) + } + d.DockerNwMap.Unlock() + + sendResponse(resp, w) + + return +} + +func handlerEndpointOperInfof(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.EndpointInfoResponse{} + body, err := getBody(r) + + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.EndpointInfoRequest{} + err = json.Unmarshal(body, &req) + + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerCreateEndpoint(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.CreateEndpointResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.CreateEndpointRequest{} + if err = json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + if req.Interface.Address == "" { + resp.Err = "Error: IP Address parameter not provided in docker run" + sendResponse(resp, w) + return + } + + d.DockerNwMap.Lock() + bridge := d.DockerNwMap.m[req.NetworkID].Bridge + d.DockerNwMap.Unlock() + + if bridge == "" { + resp.Err = "Error: incompatible network" + sendResponse(resp, w) + return + } + + // These are already setup by the SDN controller + // Get the alias for the VNIC based on the bridge and IP + subnetTuple := strings.TrimPrefix(bridge, bridgePrefix) + ip, _, err := net.ParseCIDR(req.Interface.Address) + if err != nil { + resp.Err = "Error: Invalid IP Address " + err.Error() + sendResponse(resp, w) + return + } + + vnicID := fmt.Sprintf("%s%s##%s", vnicPrefix, subnetTuple, ip.String()) + + //We can also get this directly from the SDN controller. + //However that will prevent the plugin from being its own service + //in the future + vnic, err := NewContainerVnic(vnicID) + if err != nil { + resp.Err = "Error: invalid interface " + err.Error() + sendResponse(resp, w) + return + } + + if err := vnic.GetDevice(); err != nil { + resp.Err = "Error: invalid interface " + err.Error() + sendResponse(resp, w) + return + } + + d.DockerEpMap.Lock() + defer d.DockerEpMap.Unlock() + + d.DockerEpMap.m[req.EndpointID] = &DockerEpVal{ + ID: vnicID, + IP: req.Interface.Address, + Hveth: vnic.InterfaceName(), + Cveth: vnic.PeerName(), + } + + if err := d.DbAdd(tableEndPointMap, req.EndpointID, d.DockerEpMap.m[req.EndpointID]); err != nil { + glog.Errorf("Unable to update db %v", err) + } + + sendResponse(resp, w) +} + +func handlerDeleteEndpoint(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.DeleteEndpointResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.DeleteEndpointRequest{} + if err = json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + d.DockerEpMap.Lock() + //ID := d.DockerEpMap.m[req.EndpointID].ID + delete(d.DockerEpMap.m, req.EndpointID) + if err := d.DbDelete(tableEndPointMap, req.EndpointID); err != nil { + glog.Errorf("Unable to update db %v", err) + } + d.DockerEpMap.Unlock() + + /* + // This will be done in the SDN controller once the + // container is deleted. However at this point there is + // a disconnect between the docker data base and SDN database + vnic, err := NewContainerVnic(ID) + if err != nil { + if err := vnic.GetDevice(); err != nil { + glog.Errorf("Link has not been deleted %v", err) + if err := vnic.Destroy(); err != nil { + glog.Errorf("Unable to delete link %v", err) + } + } + } + */ + + sendResponse(resp, w) +} + +func handlerJoin(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.JoinResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.JoinRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + d.DockerNwMap.Lock() + d.DockerEpMap.Lock() + nm := d.DockerNwMap.m[req.NetworkID] + em := d.DockerEpMap.m[req.EndpointID] + d.DockerNwMap.Unlock() + d.DockerEpMap.Unlock() + + resp.Gateway = nm.Gateway.IP.String() + resp.InterfaceName = &api.InterfaceName{ + SrcName: em.Cveth, + DstPrefix: "eth", + } + sendResponse(resp, w) +} + +func handlerLeave(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.LeaveResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.LeaveRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerDiscoverNew(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.DiscoveryResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.DiscoveryNotification{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerDiscoverDelete(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.DiscoveryResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.DiscoveryNotification{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerExternalConnectivity(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.ProgramExternalConnectivityResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.ProgramExternalConnectivityRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func handlerRevokeExternalConnectivity(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := api.RevokeExternalConnectivityResponse{} + + body, err := getBody(r) + if err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := api.RevokeExternalConnectivityResponse{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Err = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func ipamGetCapabilities(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + getBody(r) + resp := ipamapi.GetCapabilityResponse{RequiresMACAddress: true} + sendResponse(resp, w) +} + +func ipamGetDefaultAddressSpaces(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := ipamapi.GetAddressSpacesResponse{} + getBody(r) + + resp.GlobalDefaultAddressSpace = "" + resp.LocalDefaultAddressSpace = "" + sendResponse(resp, w) +} + +func ipamRequestPool(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := ipamapi.RequestPoolResponse{} + + body, err := getBody(r) + if err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := ipamapi.RequestPoolRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + resp.PoolID = uuid.Generate().String() + resp.Pool = req.Pool + sendResponse(resp, w) +} + +func ipamReleasePool(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := ipamapi.ReleasePoolResponse{} + + body, err := getBody(r) + if err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := ipamapi.ReleasePoolRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +func ipamRequestAddress(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := ipamapi.RequestAddressResponse{} + + body, err := getBody(r) + if err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := ipamapi.RequestAddressRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + //TODO: Should come from the subnet mask for the subnet + if req.Address != "" { + resp.Address = req.Address + "/24" + } else { + //DOCKER BUG: The preferred address supplied in --ip does not show up. + //Bug fixed in docker 1.11 + resp.Error = "Error: Request does not have IP address. Specify using --ip" + } + sendResponse(resp, w) +} + +func ipamReleaseAddress(d *DockerPlugin, w http.ResponseWriter, r *http.Request) { + resp := ipamapi.ReleaseAddressResponse{} + + body, err := getBody(r) + if err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + req := ipamapi.ReleaseAddressRequest{} + if err := json.Unmarshal(body, &req); err != nil { + resp.Error = "Error: " + err.Error() + sendResponse(resp, w) + return + } + + sendResponse(resp, w) +} + +//DockerHandler is the default handler for unhandled events +//It returns error to the caller +func DockerHandler(d *DockerPlugin, + fn func(*DockerPlugin, http.ResponseWriter, *http.Request)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + fn(d, w, r) + } +} + +//NewDockerPlugin instantiates a new Docker Plugin instance +func NewDockerPlugin() *DockerPlugin { + return &DockerPlugin{ + DockerDBProvider: NewDockerBoltDBProvider(), + } +} + +func createDockerPluginConfig(dir string) error { + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("Unable to create plugin directory (%s) %v", dir, err) + } + + pluginSpecFile := path.Join(dir, DockerPluginCfg.Name+".json") + + var spec struct { + Name string + Addr string + } + spec.Name = DockerPluginCfg.Name + spec.Addr = "http://" + DockerPluginCfg.Addr + b, err := json.Marshal(spec) + if err != nil { + return fmt.Errorf("Unable to create plugin spec (%v) %v", spec, err) + } + err = ioutil.WriteFile(pluginSpecFile, b, 0644) + if err != nil { + return fmt.Errorf("Unable to create plugin file (%s) %v", pluginSpecFile, err) + } + return nil +} + +//Init initializes the docker Plugin. Has to be called after init, but before +//start. +func (d *DockerPlugin) Init() error { + d.DockerEpMap.m = make(map[string]*DockerEpVal) + d.DockerNwMap.m = make(map[string]*DockerNwVal) + + if err := createDockerPluginConfig(DockerPluginCfg.Dir); err != nil { + return err + } + + if err := d.DbInit(DockerPluginCfg.DataDir); err != nil { + return err + } + if err := d.DbMapRebuild(tableNetworkMap, &d.DockerNwMap); err != nil { + return err + } + if err := d.DbMapRebuild(tableEndPointMap, &d.DockerEpMap); err != nil { + return err + } + + if d.Router == nil { + d.Router = mux.NewRouter() + } + r := d.Router + r.HandleFunc("/Plugin.Activate", DockerHandler(d, handlerPluginActivate)) + r.HandleFunc("/NetworkDriver.GetCapabilities", DockerHandler(d, handlerGetCapabilities)) + r.HandleFunc("/NetworkDriver.CreateNetwork", DockerHandler(d, handlerCreateNetwork)) + r.HandleFunc("/NetworkDriver.DeleteNetwork", DockerHandler(d, handlerDeleteNetwork)) + r.HandleFunc("/NetworkDriver.CreateEndpoint", DockerHandler(d, handlerCreateEndpoint)) + r.HandleFunc("/NetworkDriver.DeleteEndpoint", DockerHandler(d, handlerDeleteEndpoint)) + r.HandleFunc("/NetworkDriver.EndpointOperInfo", DockerHandler(d, handlerEndpointOperInfof)) + r.HandleFunc("/NetworkDriver.Join", DockerHandler(d, handlerJoin)) + r.HandleFunc("/NetworkDriver.Leave", DockerHandler(d, handlerLeave)) + r.HandleFunc("/NetworkDriver.DiscoverNew", DockerHandler(d, handlerDiscoverNew)) + r.HandleFunc("/NetworkDriver.DiscoverDelete", DockerHandler(d, handlerDiscoverDelete)) + r.HandleFunc("/NetworkDriver.ProgramExternalConnectivity", DockerHandler(d, handlerExternalConnectivity)) + r.HandleFunc("/NetworkDriver.RevokeExternalConnectivity", DockerHandler(d, handlerExternalConnectivity)) + + r.HandleFunc("/IpamDriver.GetCapabilities", DockerHandler(d, ipamGetCapabilities)) + r.HandleFunc("/IpamDriver.GetDefaultAddressSpaces", DockerHandler(d, ipamGetDefaultAddressSpaces)) + r.HandleFunc("/IpamDriver.RequestPool", DockerHandler(d, ipamRequestPool)) + r.HandleFunc("/IpamDriver.ReleasePool", DockerHandler(d, ipamReleasePool)) + r.HandleFunc("/IpamDriver.RequestAddress", DockerHandler(d, ipamRequestAddress)) + r.HandleFunc("/IpamDriver.ReleaseAddress", DockerHandler(d, ipamReleaseAddress)) + + r.HandleFunc("/", DockerHandler(d, handler)) + return nil +} + +// Start the DockerPlugin. This activates the HTTP/HTTPS server +// The DockerPlugin has to be started prior to the launch of the +// Docker Daemon +func (d *DockerPlugin) Start() error { + + d.Server = &graceful.Server{ + Timeout: DockerPluginCfg.Timeout, + + Server: &http.Server{ + Addr: DockerPluginCfg.Addr, + Handler: d.Router, + }, + } + + go func() { + glog.Infof("Starting HTTP Server") + //err := http.ListenAndServe(dockerPluginCfg.Addr, d.Router) + //glog.Error("Unable to start HTTP Server [%v]", err) + d.Server.ListenAndServe() + }() + return nil +} + +//Stop the DockerPlugin +//The DockerPlugin has to be stopped after the Docker Daemon +//has been stopped. If the the plugin is stopped when the docker +//daemon is still active the docker daemon has a timeout and +//retry mechanism. Hence if the docker plugin is restarted +//within the retry windows, the docker APIs will still succeed +func (d *DockerPlugin) Stop() error { + //TODO: To be implemented + d.Server.Stop(DockerPluginCfg.Timeout) + return nil +} + +//Close the DockerPlugin +//This has to be called if the caller has ever performed an Init() +//failing to close may lead to database curruption +func (d *DockerPlugin) Close() error { + return d.DbClose() +} diff --git a/networking/libsnnet/firewall.go b/networking/libsnnet/firewall.go new file mode 100644 index 000000000..a07b2e058 --- /dev/null +++ b/networking/libsnnet/firewall.go @@ -0,0 +1,448 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "fmt" + "net" + "os" + "os/exec" + "strconv" + + "github.com/coreos/go-iptables/iptables" + "github.com/vishvananda/netlink" +) + +/* https://wiki.archlinux.org/index.php/iptables + + XXXXXXXXXXXXXXXXXX + XXX Network XXX + XXXXXXXXXXXXXXXXXX + + + | + v + +-------------+ +------------------+ + |table: filter| <---+ | table: nat | + |chain: INPUT | | | chain: PREROUTING| + +-----+-------+ | +--------+---------+ + | | | + v | v + [local process] | **************** +--------------+ + | +---------+ Routing decision +------> |table: filter | + v **************** |chain: FORWARD| +**************** +------+-------+ +Routing decision | +**************** | + | | + v **************** | ++-------------+ +------> Routing decision <---------------+ +|table: nat | | **************** +|chain: OUTPUT| | + ++-----+-------+ | | + | | v + v | +-------------------+ ++--------------+ | | table: nat | +|table: filter | +----+ | chain: POSTROUTING| +|chain: OUTPUT | +--------+----------+ ++--------------+ | + v + XXXXXXXXXXXXXXXXXX + XXX Network XXX + XXXXXXXXXXXXXXXXXX + +*/ + +const ( + procIPFwd = "/proc/sys/net/ipv4/ip_forward" +) + +//FwAction defines firewall action to be performed +type FwAction int + +const ( + //FwDisable disables + FwDisable FwAction = iota + //FwEnable enables + FwEnable +) + +//String representing the firewall action +func (f *FwAction) String() string { + switch *f { + case FwEnable: + return "Enable" + case FwDisable: + return "Disable" + default: + return "Invalid value" + } +} + +//Firewall defines a single firewall instance +type Firewall struct { + ExtInterfaces []string + *iptables.IPTables +} + +//InitFirewall Enables routing on the node and NAT on all +//external facing interfaces. Enable NAT right away to prevent +//tenant traffic escape +//TODO: Only enable external routing. Internal routing should +//always be enabled +func InitFirewall(devices ...string) (*Firewall, error) { + + if len(devices) == 0 { + return nil, fmt.Errorf("initFirewall: Invalid input params") + } + + ipt, err := iptables.New() + if err != nil { + return nil, fmt.Errorf("initFirewall: Unable to setup iptables %v", err) + } + + f := &Firewall{ + IPTables: ipt, + } + + for _, device := range devices { + + //iptables -t nat -A POSTROUTING -o $device -j MASQUERADE + err = ipt.AppendUnique("nat", "POSTROUTING", + "-o", device, "-j", "MASQUERADE") + + if err != nil { + ok, err := ipt.Exists("nat", "POSTROUTING", + "-o", device, "-j", "MASQUERADE") + if !ok { + return nil, fmt.Errorf("Error: InitFirewall NAT enable [%v] %v", device, err) + } + } + + f.ExtInterfaces = append(f.ExtInterfaces, device) + } + + if err = Routing(FwEnable); err != nil { + return nil, fmt.Errorf("Error: InitFirewall routing enable %v", err) + } + + return f, nil + +} + +//ShutdownFirewall Disables routing and NAT +//TODO: Only external routing should be disabled. +func (f *Firewall) ShutdownFirewall() error { + + if err := Routing(FwDisable); err != nil { + return fmt.Errorf("Error: Shutdown Firewall routing disable %v", err) + } + + for _, device := range f.ExtInterfaces { + + err := f.Delete("nat", "POSTROUTING", + "-o", device, "-j", "MASQUERADE") + + if err != nil { + return fmt.Errorf("Error: Shutdown Firewall NAT disable %v", err) + } + } + + return nil +} + +//Routing enable or disables routing +//echo 0 > /proc/sys/net/ipv4/ip_forward +//echo 1 > /proc/sys/net/ipv4/ip_forward +func Routing(action FwAction) error { + file, err := os.OpenFile(procIPFwd, os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return fmt.Errorf("Routing: Unable to open %v %v", procIPFwd, err) + } + defer file.Close() + + switch action { + case FwEnable: + _, err = file.WriteString("1") + case FwDisable: + _, err = file.WriteString("0") + } + + if err != nil { + return fmt.Errorf("Routing failed %v %v", action, err) + } + + return nil +} + +//ExtFwding enables or disables fwding between an externally connected interface +//and a tenant bridge (hence a tenant subnet) +//Each tenant subnet created needs explicit enabling/disabling +func (f *Firewall) ExtFwding(action FwAction, extDevice string, intDevice string) error { + switch action { + case FwEnable: + //iptables -A FORWARD -i $extDevice -o $intDevice + // -m state --state RELATED,ESTABLISHED -j ACCEPT + err := f.AppendUnique("filter", "FORWARD", + "-i", extDevice, "-o", intDevice, + "-m", "state", "--state", "RELATED,ESTABLISHED", "-j", "ACCEPT") + + if err != nil { + return fmt.Errorf("enable inbound fwding failed: %v", err) + } + + //iptables -A FORWARD -i $intDevice -o $extDevice -j ACCEPT + err = f.AppendUnique("filter", "FORWARD", + "-i", intDevice, "-o", extDevice, "-j", "ACCEPT") + if err != nil { + return fmt.Errorf("enable outbound fwding failed: %v [%s] [%s]", + err, intDevice, extDevice) + } + case FwDisable: + //iptables -D FORWARD -i $extDevice -o $intDevice + // -m state --state RELATED,ESTABLISHED -j ACCEPT + err := f.Delete("filter", "FORWARD", + "-i", extDevice, "-o", intDevice, + "-m", "state", "--state", "RELATED,ESTABLISHED", "-j", "ACCEPT") + + if err != nil { + return fmt.Errorf("disable inbound fwding failed: %v", err) + } + + //iptables -D FORWARD -i $intDevice -o $extDevice -j ACCEPT + err = f.Delete("filter", "FORWARD", + "-i", intDevice, "-o", extDevice, "-j", "ACCEPT") + if err != nil { + return fmt.Errorf("disable outbound fwding failed: %v [%s] [%s]", + err, intDevice, extDevice) + } + } + + return nil +} + +//ExtPortAccess Enables/Disables port access via external device and port +//to an internal IP address and port for the specified protocol +func (f *Firewall) ExtPortAccess(action FwAction, protocol string, extDevice string, + externalPort int, internalIP net.IP, internalPort int) error { + ePort := strconv.Itoa(externalPort) + iPort := strconv.Itoa(internalPort) + + var err error + switch action { + case FwEnable: + //iptables -t nat -A PREROUTING + //-i $extDevice -p $protocol --dport $extPort -j DNAT + //--to $intIP:$intPort + err = f.AppendUnique("nat", "PREROUTING", + "-i", extDevice, "-p", protocol, "--dport", ePort, "-j", "DNAT", + "--to", internalIP.String()+":"+iPort) + + if err != nil { + ok, err2 := f.Exists("nat", "PREROUTING", + "-i", extDevice, "-p", protocol, "--dport", ePort, "-j", "DNAT", + "--to", internalIP.String()+":"+iPort) + + if !ok { + err = fmt.Errorf("unable to enable ssh %v %v [%v],[%v]", + internalIP, iPort, err, err2) + } + } + case FwDisable: + //iptables -t nat -D PREROUTING + //-i $extDevice -p $protocol --dport $extPort -j DNAT + //--to $intIP:$intPort + err = f.Delete("nat", "PREROUTING", + "-i", extDevice, "-p", protocol, "--dport", ePort, "-j", "DNAT", + "--to", internalIP.String()+":"+iPort) + + if err != nil { + ok, err2 := f.Exists("nat", "PREROUTING", + "-i", extDevice, "-p", protocol, "--dport", ePort, "-j", "DNAT", + "--to", internalIP.String()+":"+iPort) + + if ok { + err = fmt.Errorf("unable to disable ssh %v %v [%v],[%v]", + internalIP, iPort, err, err2) + } + } + } + + if err != nil { + return fmt.Errorf("Unable to %v access for %v %v %v %v %v", + action, protocol, extDevice, internalIP, externalPort, err) + } + + return nil +} + +func ipAssign(action FwAction, ip net.IP, iface string) error { + + link, err := netlink.LinkByName(iface) + if err != nil { + return fmt.Errorf("Unable to detect interface %v %v", iface, err) + } + + addr := &netlink.Addr{IPNet: &net.IPNet{ + IP: ip.To4(), + Mask: net.IPv4Mask(255, 255, 255, 255), + }, + } + + switch action { + case FwEnable: + err := netlink.AddrAdd(link, addr) + if err == nil { + return nil + } + //Delete the address if it exists and re-add + //This is more definitive than searching the IP list + err = netlink.AddrDel(link, addr) + if err != nil { + return fmt.Errorf("Unable to assign IP to interface %s %v %v", ip, iface, err) + } + err = netlink.AddrAdd(link, addr) + if err != nil { + return fmt.Errorf("Unable to assign IP to interface %s %v %v", ip, iface, err) + } + case FwDisable: + err = netlink.AddrDel(link, addr) + if err == nil { + return nil + } + + //Check if someone deleted it + addrs, err := netlink.AddrList(link, netlink.FAMILY_V4) + if err != nil || len(addrs) == 0 { + return fmt.Errorf("Unable to unassign IP from interface %s %v %v", ip, iface, err) + } + + for _, ad := range addrs { + if ad.Equal(*addr) { + return fmt.Errorf("Unable to unassign IP from interface %s %v %v", ip, iface, err) + } + } + return nil + } + + return nil + +} + +//PublicIPAccess Enables/Disables public access to an internal IP +//TODO: Consider NATing only when exiting +//TODO: Create our own tables vs using default one +func (f *Firewall) PublicIPAccess(action FwAction, + internalIP net.IP, publicIP net.IP, extInterface string) error { + + switch action { + case FwEnable: + + err := ipAssign(FwEnable, publicIP, extInterface) + if err != nil { + return fmt.Errorf("Public IP Assignment failure %v", err) + } + + //iptables -t nat -A PREROUTING -d $publicIP/32 -j DNAT --to-destination $internalIP + err = f.AppendUnique("nat", "PREROUTING", + "-d", publicIP.String()+"/32", "-j", "DNAT", "--to-destination", internalIP.String()) + + if err != nil { + ok, err2 := f.Exists("nat", "PREROUTING", + "-d", publicIP.String()+"/32", "-j", "DNAT", "--to-destination", internalIP.String()) + + if !ok { + err = fmt.Errorf("Unable to perform public IP PREROUTING %v %s %s [%v],[%v]", + action, internalIP, publicIP, err, err2) + } + } + + //iptables -t nat -A POSTROUTING -s $internalIP/32 -j SNAT -–to-source $publicIP + err = f.AppendUnique("nat", "POSTROUTING", + "-s", internalIP.String()+"/32", "-j", "SNAT", "--to-source", publicIP.String()) + + if err != nil { + ok, err2 := f.Exists("nat", "POSTROUTING", + "-s", internalIP.String()+"/32", "-j", "SNAT", "--to-source", publicIP.String()) + + if !ok { + err = fmt.Errorf("Unable to perform public IP POSTROUTNG %v %s %s [%v],[%v]", + action, internalIP, publicIP, err, err2) + } + } + + return nil + + case FwDisable: + err := ipAssign(FwDisable, publicIP, extInterface) + if err != nil { + return fmt.Errorf("Public IP Assignment failure %v", err) + } + + //iptables -t nat -D PREROUTING -d $publicIP/32 -j DNAT –to-destination $internalIP + err = f.Delete("nat", "PREROUTING", + "-d", publicIP.String()+"/32", "-j", "DNAT", "--to-destination", internalIP.String()) + if err != nil { + ok, err1 := f.Exists("nat", "PREROUTING", + "-d", publicIP.String()+"/32", "-j", "DNAT", "--to-destination", internalIP.String()) + if ok { + return fmt.Errorf("Unable to disable public IP PREROUTING %s %s %v %v", + publicIP, internalIP, err, err1) + + } + } + + //iptables -t nat -D POSTROUTING -s $internalIP/32 -j SNAT –to-source $publicIP + err = f.Delete("nat", "POSTROUTING", + "-s", internalIP.String()+"/32", "-j", "SNAT", "--to-source", publicIP.String()) + + if err != nil { + ok, err1 := f.Exists("nat", "POSTROUTING", + "-s", internalIP.String()+"/32", "-j", "SNAT", "--to-source", publicIP.String()) + if ok { + return fmt.Errorf("Unable to disable public IP POSTROUTING %s %s %v %v", + publicIP, internalIP, err, err1) + } + } + return nil + default: + return fmt.Errorf("Invalid parameter %v", action) + } +} + +//DumpIPTables provides a utility routine that returns +//the current state of the iptables +func DumpIPTables() string { + table, err := exec.Command("iptables-save").CombinedOutput() + if err != nil { + return fmt.Sprintf("unable to iptables-save %v", err) + } + return fmt.Sprintf("iptables-save =[%s]", string(table)) +} + +//DebugSSHPortForIP provides a utility routine that returns +//the ssh port on the tenant CNCI that can be used to reach +//a tenant instance with a given IP address +func DebugSSHPortForIP(ip net.IP) (int, error) { + const natOffset = 33000 + + extPort := int(natOffset) + (int(ip[2]) << 8) + int(ip[3]) + if extPort >= int(65535) { + return -1, fmt.Errorf("invalid IP %s", ip) + } + + return extPort, nil +} diff --git a/networking/libsnnet/firewall_test.go b/networking/libsnnet/firewall_test.go new file mode 100644 index 000000000..d339616fa --- /dev/null +++ b/networking/libsnnet/firewall_test.go @@ -0,0 +1,212 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet_test + +import ( + "net" + "os" + "os/exec" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +var fwIf, fwIfInt string + +func fwinit() { + fwIf = os.Getenv("FWIF_ENV") + + if fwIf == "" { + fwIf = "eth0" + } + + fwIfInt = os.Getenv("FWIFINT_ENV") + + if fwIfInt == "" { + fwIfInt = "eth1" + } +} + +//Test firewall init for CNCI +// +//Performs basic checks of firewall primities +//Failure indicates problem with underlying dependencies +//which could be iptables or nftables +// +//Test should pass +func TestFw_Init(t *testing.T) { + fwinit() + fw, err := libsnnet.InitFirewall(fwIf) + + if err != nil { + t.Fatalf("Error: InitFirewall %v %v %v", fwIf, err, fw) + } + + err = fw.ShutdownFirewall() + if err != nil { + t.Errorf("Error: Unable to shutdown firewall %v", err) + } +} + +//Tests SSH port forwarding primitives +// +//Tests the primitives used by CNCI to setup/teardown port forwarding +// +//Test should pass +func TestFw_Ssh(t *testing.T) { + fwinit() + fw, err := libsnnet.InitFirewall(fwIf) + if err != nil { + t.Fatalf("Error: InitFirewall %v %v %v", fwIf, err, fw) + } + + err = fw.ExtPortAccess(libsnnet.FwEnable, "tcp", fwIf, 12345, + net.ParseIP("192.168.0.101"), 22) + + if err != nil { + t.Errorf("Error: ssh fwd failed %v", err) + } + + err = fw.ExtPortAccess(libsnnet.FwDisable, "tcp", fwIf, 12345, + net.ParseIP("192.168.0.101"), 22) + + if err != nil { + t.Errorf("Error: ssh fwd disable failed %v", err) + } + + err = fw.ShutdownFirewall() + if err != nil { + t.Errorf("Error: Unable to shutdown firewall %v", err) + } +} + +//Tests setting up NAT +// +//Test check if a NAT rule can be setup to peform outbound +//NAT from a given internal interface to a specified +//external interface (which has a dynamic IP, i.e DHCP) +// +//Test is expected to pass +func TestFw_Nat(t *testing.T) { + fwinit() + fw, err := libsnnet.InitFirewall(fwIf) + if err != nil { + t.Fatalf("Error: InitFirewall %v %v %v", fwIf, err, fw) + } + + err = fw.ExtFwding(libsnnet.FwEnable, fwIf, fwIfInt) + if err != nil { + t.Errorf("Error: NAT failed %v", err) + } + + err = fw.ExtFwding(libsnnet.FwDisable, fwIf, fwIfInt) + if err != nil { + t.Errorf("Error: NAT disable failed %v", err) + } + + err = fw.ShutdownFirewall() + if err != nil { + t.Errorf("Error: Unable to shutdown firewall %v", err) + } +} + +//Not fully implemented +// +//Not fully implemented +// +//Expected to pass +func TestFw_PublicIP(t *testing.T) { + fwinit() + fw, err := libsnnet.InitFirewall(fwIf) + if err != nil { + t.Fatalf("Error: InitFirewall %v %v %v", fwIf, err, fw) + } + + intIP := net.ParseIP("192.168.0.101") + pubIP := net.ParseIP("192.168.0.131") + + err = fw.PublicIPAccess(libsnnet.FwEnable, intIP, pubIP, fwIfInt) + if err != nil { + t.Errorf("%v", err) + } + + t.Logf("%s", libsnnet.DumpIPTables()) + + err = fw.PublicIPAccess(libsnnet.FwDisable, intIP, pubIP, fwIfInt) + if err != nil { + t.Errorf("%v", err) + } + + err = fw.ShutdownFirewall() + if err != nil { + t.Errorf("Error: Unable to shutdown firewall %v", err) + } +} + +//Exercises all valid CNCI Firewall APIs +// +//This tests performs the sequence of operations typically +//performed by a CNCI Agent. +// +//Test is expected to pass +func TestFw_All(t *testing.T) { + fwinit() + fw, err := libsnnet.InitFirewall(fwIf) + if err != nil { + t.Fatalf("Error: InitFirewall %v %v %v", fwIf, err, fw) + } + + err = fw.ExtFwding(libsnnet.FwEnable, fwIf, fwIfInt) + if err != nil { + t.Errorf("Error: NAT failed %v", err) + } + + err = fw.ExtPortAccess(libsnnet.FwEnable, "tcp", fwIf, 12345, + net.ParseIP("192.168.0.101"), 22) + + if err != nil { + t.Errorf("Error: ssh fwd failed %v", err) + } + + t.Logf("%s", libsnnet.DumpIPTables()) + + procIPFwd := "/proc/sys/net/ipv4/ip_forward" + out, err := exec.Command("cat", procIPFwd).CombinedOutput() + + if err != nil { + t.Errorf("unable to dump ip_forward %v", err) + } else { + t.Logf("ip_forward =[%s]", string(out)) + } + + err = fw.ExtPortAccess(libsnnet.FwDisable, "tcp", fwIf, 12345, + net.ParseIP("192.168.0.101"), 22) + + if err != nil { + t.Errorf("Error: ssh fwd disable failed %v", err) + } + + err = fw.ExtFwding(libsnnet.FwDisable, fwIf, fwIfInt) + if err != nil { + t.Errorf("Error: NAT disable failed %v", err) + } + + err = fw.ShutdownFirewall() + if err != nil { + t.Errorf("Error: Unable to shutdown firewall %v", err) + } +} diff --git a/networking/libsnnet/gre.go b/networking/libsnnet/gre.go new file mode 100644 index 000000000..da4cadd99 --- /dev/null +++ b/networking/libsnnet/gre.go @@ -0,0 +1,215 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +// NewGreTunEP is used to initialize the gre tunnel properties +// This has to be called prior to Create() or GetDevice() +func NewGreTunEP(id string, localIP net.IP, remoteIP net.IP, key uint32) (*GreTunEP, error) { + gre := &GreTunEP{} + gre.Link = &netlink.Gretap{} + gre.GlobalID = id + gre.LocalIP = localIP + gre.RemoteIP = remoteIP + gre.Key = key + return gre, nil +} + +// GetDevice associates the tunnel with an existing GRE tunnel end point +func (g *GreTunEP) GetDevice() error { + + if g.GlobalID == "" { + return netError(g, "get device unnamed gretap device") + } + + link, err := netlink.LinkByAlias(g.GlobalID) + if err != nil { + return netError(g, "get device interface does not exist: %v %v", g.GlobalID, err) + } + + gl, ok := link.(*netlink.Gretap) + if !ok { + return netError(g, "get device incorrect interface type %v %v", g.GlobalID, link.Type()) + } + g.Link = gl + g.LinkName = gl.Name + g.LocalIP = gl.Local + g.RemoteIP = gl.Remote + if gl.IKey == gl.OKey { + g.Key = gl.IKey + } else { + return netError(g, "get device incorrect params IKey != OKey %v %v", g.GlobalID, gl) + } + + return nil +} + +// Create instantiates a tunnel +func (g *GreTunEP) Create() error { + var err error + + if g.GlobalID == "" || g.Key == 0 { + return netError(g, "create cannot create an unnamed gretap device") + } + + if g.LinkName == "" { + if g.LinkName, err = GenIface(g, false); err != nil { + return netError(g, "create geniface %v, %v", g.GlobalID, err) + } + + if lerr, err := netlink.LinkByAlias(g.GlobalID); err == nil { + return netError(g, "create interface exists %v, %v", g.GlobalID, lerr) + } + } + + attrs := netlink.NewLinkAttrs() + attrs.Name = g.LinkName + + gretap := &netlink.Gretap{LinkAttrs: attrs, + IKey: g.Key, + OKey: g.Key, + Local: g.LocalIP, + Remote: g.RemoteIP, + PMtuDisc: 1, + } + + if err := netlink.LinkAdd(gretap); err != nil { + return netError(g, "create link add %v %v", g.GlobalID, err) + } + + link, err := netlink.LinkByName(g.LinkName) + if err != nil { + return netError(g, "create link by name %v %v", g.GlobalID, err) + } + + gl, ok := link.(*netlink.Gretap) + if !ok { + return netError(g, "create incorrect interface type %v, %v", g.GlobalID, link.Type()) + } + g.Link = gl + + if err := g.setAlias(g.GlobalID); err != nil { + g.Destroy() + return netError(g, "create link set alias %v %v", g.GlobalID, err) + } + + return nil +} + +// Destroy an existing Tunnel +func (g *GreTunEP) Destroy() error { + + if g.Link == nil || g.Link.Index == 0 { + return netError(g, "destroy invalid gre link: %v", g) + } + + if err := netlink.LinkDel(g.Link); err != nil { + return netError(g, "destroy link del %v", err) + } + + return nil +} + +// Enable the GreTunnel +func (g *GreTunEP) Enable() error { + + if g.Link == nil || g.Link.Index == 0 { + return netError(g, "enable invalid gre link: %v", g) + } + + if err := netlink.LinkSetUp(g.Link); err != nil { + return netError(g, "enable link enable %v", err) + } + + return nil + +} + +// Disable the Tunnel +func (g *GreTunEP) Disable() error { + if g.Link == nil || g.Link.Index == 0 { + return netError(g, "disable invalid gre link: %v", g) + } + + if err := netlink.LinkSetDown(g.Link); err != nil { + return netError(g, "disable link disable %v", err) + } + return nil +} + +func (g *GreTunEP) setAlias(alias string) error { + if g.Link == nil || g.Link.Index == 0 { + return netError(g, "set alias invalid gre link: %v", g) + } + + if err := netlink.LinkSetAlias(g.Link, alias); err != nil { + return netError(g, "set alias link set alias %v %v", alias, err) + } + + return nil +} + +// Attach the gretunnel to a device/bridge/switch +func (g *GreTunEP) Attach(dev interface{}) error { + + if g.Link == nil || g.Link.Index == 0 { + return netError(g, "attach gre tunnel unnitialized") + } + + br, ok := dev.(*Bridge) + if !ok { + return netError(g, "attach unknown device %v, %T", dev, dev) + } + + if br.Link == nil || br.Link.Index == 0 { + return netError(g, "attach bridge unnitialized") + } + + err := netlink.LinkSetMaster(g.Link, br.Link) + if err != nil { + return netError(g, "attach link set master %v", err) + } + + return nil +} + +// Detach the GreTunnel from the device/bridge it is attached to +func (g *GreTunEP) Detach(dev interface{}) error { + if g.Link == nil || g.Link.Index == 0 { + return netError(g, "detach invalid gre link: %v", g) + } + + br, ok := dev.(*Bridge) + if !ok { + return netError(g, "detach incorrect device type %v, %T", dev, dev) + } + + if br.Link == nil || br.Link.Index == 0 { + return netError(g, "detach bridge unnitialized") + } + + if err := netlink.LinkSetNoMaster(g.Link); err != nil { + return netError(g, "detach link set no master %v", err) + } + + return nil +} diff --git a/networking/libsnnet/gre_test.go b/networking/libsnnet/gre_test.go new file mode 100644 index 000000000..904deb3b9 --- /dev/null +++ b/networking/libsnnet/gre_test.go @@ -0,0 +1,100 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet_test + +import ( + "net" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +//Test all GRE tunnel primitives +// +//Tests create, enable, disable and destroy of GRE tunnels +//Failure indicates changes in netlink or kernel and in some +//case pre-existing tunnels on the test node. Ensure that +//there are no existing conflicting tunnels before running +//this test +// +//Test is expected to pass +func TestGre_Basic(t *testing.T) { + id := "testgretap" + local := net.ParseIP("127.0.0.1") + remote := local + key := uint32(0xF) + + gre, _ := libsnnet.NewGreTunEP(id, local, remote, key) + + if err := gre.Create(); err != nil { + t.Errorf("GreTunnel creation failed: %v", err) + } + + if err := gre.Enable(); err != nil { + t.Errorf("GreTunnel enable failed: %v", err) + } + + if err := gre.Disable(); err != nil { + t.Errorf("GreTunnel disable failed: %v", err) + } + + if err := gre.Destroy(); err != nil { + t.Errorf("GreTunnel deletion failed: %v", err) + } +} + +//Test GRE tunnel bridge interactions +// +//Test all bridge, gre tunnel interactions including +//attach, detach, enable, disable, destroy +// +//Test is expected to pass +func TestGre_Bridge(t *testing.T) { + id := "testgretap" + local := net.ParseIP("127.0.0.1") + remote := local + key := uint32(0xF) + + gre, _ := libsnnet.NewGreTunEP(id, local, remote, key) + bridge, _ := libsnnet.NewBridge("testbridge") + + if err := gre.Create(); err != nil { + t.Errorf("Vnic Create failed: %v", err) + } + defer gre.Destroy() + + if err := bridge.Create(); err != nil { + t.Errorf("Bridge Create failed: %v", err) + } + defer bridge.Destroy() + + if err := gre.Attach(bridge); err != nil { + t.Errorf("GRE attach failed: %v", err) + } + + if err := gre.Enable(); err != nil { + t.Errorf("GRE enable failed: %v", err) + } + + if err := bridge.Enable(); err != nil { + t.Errorf("Bridge enable failed: %v", err) + } + + if err := gre.Detach(bridge); err != nil { + t.Errorf("GRE detach failed: %v", err) + } +} diff --git a/networking/libsnnet/internal_test.go b/networking/libsnnet/internal_test.go new file mode 100644 index 000000000..8ac2fbec1 --- /dev/null +++ b/networking/libsnnet/internal_test.go @@ -0,0 +1,206 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Internal tests (whitebox) for libsnnet +package libsnnet + +import ( + "net" + "testing" +) + +/* +func TestIfaceCollusion(t *testing.T) { + imap := make(map[string]bool) + + const ifaceLimit = 32 * 1024 + + for i := 0; i < ifaceLimit; i++ { + name, err := genIface(&Vnic{}) + + if err != nil { + t.Errorf("unknown error %v %v", i, err) + } + if imap[name] { + t.Errorf("ignore failure: Collusion detected at %v for %v", i, name) + } + imap[name] = true + } +} */ + +//Tests the implementation of the db rebuild from aliases +// +//This test uses a mix of primitives and APIs to check +//the reliability of the dbRebuild API +// +//The test is expected to pass +func TestCN_dbRebuild(t *testing.T) { + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + + vnicCfg := &VnicConfig{ + VnicIP: net.IPv4(192, 168, 1, 100), + ConcIP: net.IPv4(192, 168, 1, 1), + VnicMAC: mac, + SubnetKey: 0xF, + VnicID: "vuuid", + InstanceID: "iuuid", + TenantID: "tuuid", + SubnetID: "suuid", + ConcID: "cnciuuid", + } + + cn := &ComputeNode{} + alias := genCnVnicAliases(vnicCfg) + + bridgeAlias := alias.bridge + vnicAlias := alias.vnic + greAlias := alias.gre + + bridge, _ := NewBridge(bridgeAlias) + + if err := bridge.GetDevice(); err != nil { + // First instance to land, create the bridge and tunnel + if err := bridge.Create(); err != nil { + t.Error("Bridge creation failed: ", err) + } + defer bridge.Destroy() + + // Create the tunnel to connect to the CNCI + local := vnicCfg.VnicIP //Fake it for now + remote := vnicCfg.ConcIP + subnetKey := vnicCfg.SubnetKey + + gre, _ := NewGreTunEP(greAlias, local, remote, uint32(subnetKey)) + + if err := gre.Create(); err != nil { + t.Error("GRE Tunnel Creation failed: ", err) + } + defer gre.Destroy() + + if err := gre.Attach(bridge); err != nil { + t.Error("GRE Tunnel attach failed: ", err) + } + + } + + // Create the VNIC for the instance + vnic, _ := NewVnic(vnicAlias) + + if err := vnic.Create(); err != nil { + t.Error("Vnic Create failed: ", err) + } + defer vnic.Destroy() + + if err := vnic.Attach(bridge); err != nil { + t.Error("Vnic attach failed: ", err) + } + + //Add a second vnic + vnicCfg.VnicIP = net.IPv4(192, 168, 1, 101) + alias1 := genCnVnicAliases(vnicCfg) + vnic1, _ := NewVnic(alias1.vnic) + + if err := vnic1.Create(); err != nil { + t.Error("Vnic Create failed: ", err) + } + defer vnic1.Destroy() + + if err := vnic1.Attach(bridge); err != nil { + t.Error("Vnic attach failed: ", err) + } + + /* Test negative test cases */ + if err := cn.DbRebuild(nil); err == nil { + t.Error("cn.dbRebuild should have failed") + } + + cn.NetworkConfig = &NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: GreTunnel, + } + + if err := cn.DbRebuild(nil); err == nil { + t.Error("cn.dbRebuild should have failed") + } + + /* Test positive */ + cn.cnTopology = &cnTopology{ + bridgeMap: make(map[string]map[string]bool), + linkMap: make(map[string]*linkInfo), + nameMap: make(map[string]bool), + } + + if err := cn.DbRebuild(nil); err != nil { + t.Error("cn.dbRebuild failed", err) + } + + if cnt, err := cn.dbUpdate(alias.bridge, alias1.vnic, dbDelVnic); err == nil { + if cnt != 1 { + t.Error("cn.dbUpdate failed", cnt) + } + } else { + t.Error("cn.dbUpdate failed", err) + } + + if cnt, err := cn.dbUpdate(alias.bridge, alias.vnic, dbDelVnic); err == nil { + if cnt != 0 { + t.Error("cn.dbUpdate failed", cnt) + } + } else { + t.Error("cn.dbUpdate failed", err) + } + + if cnt, err := cn.dbUpdate(alias.bridge, "", dbDelBr); err == nil { + if cnt != 0 { + t.Error("cn.dbUpdate failed", cnt) + } + } else { + t.Error("cn.dbUpdate failed", err) + } + + if cnt, err := cn.dbUpdate(alias.bridge, "", dbInsBr); err == nil { + if cnt != 1 { + t.Error("cn.dbUpdate failed", cnt) + } + } else { + t.Error("cn.dbUpdate failed", err) + } + + if cnt, err := cn.dbUpdate(alias.bridge, alias.vnic, dbInsVnic); err == nil { + if cnt != 1 { + t.Error("cn.dbUpdate failed", cnt) + } + } else { + t.Error("cn.dbUpdate failed", err) + } + + if cnt, err := cn.dbUpdate(alias.bridge, alias1.vnic, dbInsVnic); err == nil { + if cnt != 2 { + t.Error("cn.dbUpdate failed", cnt) + } + } else { + t.Error("cn.dbUpdate failed", err) + } + + //Negative tests + if cnt, err := cn.dbUpdate(alias.bridge, alias1.vnic, dbInsVnic); err == nil { + t.Error("cn.dbUpdate failed", cnt) + } + if cnt, err := cn.dbUpdate(alias.bridge, "", dbInsBr); err == nil { + t.Error("cn.dbUpdate failed", cnt) + } +} diff --git a/networking/libsnnet/network.go b/networking/libsnnet/network.go new file mode 100644 index 000000000..df9f28778 --- /dev/null +++ b/networking/libsnnet/network.go @@ -0,0 +1,209 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "fmt" + "net" + "time" + + "github.com/vishvananda/netlink" +) + +//TODO: Add more info based on object level details and caller +func netError(dev interface{}, format string, args ...interface{}) error { + switch dev.(type) { + case Bridge, *Bridge: + return fmt.Errorf("bridge error: "+format, args...) + case Vnic, *Vnic: + return fmt.Errorf("vnic error: "+format, args...) + case CnciVnic, *CnciVnic: + return fmt.Errorf("cncivnic error: "+format, args...) + case GreTunEP, *GreTunEP: + return fmt.Errorf("gre error: "+format, args...) + } + return fmt.Errorf("network error: "+format, args...) +} + +type networkError struct { + msg string + when time.Time + category string +} + +// FatalError indicates that the system may be in an inconsistent +// state due to the error. The caller needs to initiate some sort of recovery. +// No new workloads should be scheduled on this node until the error is +// resolved +type FatalError struct { + networkError +} + +func (e FatalError) Error() string { + return e.msg +} + +//NewFatalError is a non recoverable error +func NewFatalError(s string) FatalError { + return FatalError{ + networkError: networkError{ + msg: s, + when: time.Now(), + category: "FATAL", + }, + } +} + +// APIError indicates that the networking call failed. However the system +// is still consistent and the networking layer has performed appropriate cleanup +type APIError struct { + networkError +} + +func (e APIError) Error() string { + return e.msg +} + +//NewAPIError is a recoverable error +func NewAPIError(s string) APIError { + return APIError{ + networkError: networkError{ + msg: s, + when: time.Now(), + category: "API", + }, + } +} + +// NetworkMode describes the networking configuration of the data center +type NetworkMode int + +const ( + // Routed means all traffic is routed with no tenant isolation except through firewall rules + Routed NetworkMode = iota + // GreTunnel means tenant instances interlinked using GRE tunnels. Full tenant isolation + GreTunnel +) + +// VnicRole specifies the role of the VNIC +type VnicRole int + +const ( + //TenantVM role is assigned to tenant VM + TenantVM VnicRole = iota //Attached to a VM in the tenant network + //TenantContainer role is assigned to a tenant container + TenantContainer //Attach to a container in the tenant network + //DataCenter role is assigned to resources owned by the data center + DataCenter //Attached to the data center network +) + +// Network describes the configuration of the data center network. +// This is the physical configurtion of the data center. +// The Management Networks carry management/control SSNTP traffic +// The Compute Network carries tenant traffic. +// In a simplistic configuratIon the management nework and the compute networks +// may be one and and the same. +type Network struct { + ManagementNet []net.IPNet // Enumerates all possible management subnets + ComputeNet []net.IPNet // Enumerates all possible compute subnets + FloatingPool []net.IP // Available floating IPs + PublicGw net.IP // Public IP Gateway to reach the internet + Mode NetworkMode +} + +// Attrs contains fields common to all device types +type Attrs struct { + LinkName string // Locally unique device name + TenantID string // UUID of the tenant the device belongs to + //ID string // UUID of the device. Valid if allocated by Controller + // Auto generated. Combination of UUIDs and other params. + // Typically assigned to the alias + // It is both locally unique and globally unique + // Fully qualifies the device and its role + GlobalID string + MACAddr *net.HardwareAddr +} + +// Netdev ciao generic network device representation. +// Any of these methods can be invoked provided a NewXXX +// has been performed to instatiate the device +type Netdev interface { + Create() error // Create the device that does not exist + GetDevice() error // Associate with an existing device + Destroy() error // Destroy the device + Enable() error // Enable/Activate the device + Disable() error // Disable/Deactivate the device +} + +// Attachable is a Netdev that can be attached to another. +// VNICs and GRE Tunnels can be attached to Bridges today. +// The routine perform basic error checks to ensure that +// they are compatible in the ciao networking setup +type Attachable interface { + Attach(*Netdev) error // Attach the device to the specified Netdev + Detach(*Netdev) error // Detach the device to the specified Netdev +} + +// Bridge represents a ciao Bridge +type Bridge struct { + Attrs + Link *netlink.Bridge +} + +// DhcpEntry is the fully qualified MAC address to IP mapping +type DhcpEntry struct { + MACAddr net.HardwareAddr + IPAddr net.IP + Hostname string // Optional +} + +//VnicAttrs represent common Vnic attributes +type VnicAttrs struct { + Attrs + Role VnicRole + InstanceID string // UUID of the instance to which it will attach + BridgeID string // ID of bridge it has attached to + IPAddr *net.IP + MTU int +} + +// Vnic represents a ciao VNIC (typically a tap or veth interface) +type Vnic struct { + VnicAttrs + Link netlink.Link // TODO: Enhance netlink library to add specific tap type to libnetlink +} + +// CnciVnic represents a ciao CNCI VNIC +// This is used to connect a CNCI instance to the network +// A CNCI VNIC will be directly attached to the data center network +// Currently we use MacVtap in VEPA mode. We can also use MacVtap in Bridge Mode +type CnciVnic struct { + VnicAttrs + Link *netlink.Macvtap +} + +// GreTunEP ciao GRE Tunnel representation +// This represents one end of the tunnel +type GreTunEP struct { + Attrs + Link *netlink.Gretap + Key uint32 + LocalIP net.IP + RemoteIP net.IP + CNCIId string // UUID of the CNCI + CNId string // UUID of the CN +} diff --git a/networking/libsnnet/scale_test.go b/networking/libsnnet/scale_test.go new file mode 100644 index 000000000..5bd45cf0a --- /dev/null +++ b/networking/libsnnet/scale_test.go @@ -0,0 +1,102 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet_test + +import ( + "fmt" + "os" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +var scaleCfg = struct { + maxBridgesShort int + maxVnicsShort int + maxBridgesLong int + maxVnicsLong int +}{2, 64, 8, 64} + +//Internal scaling test case +// +//Test used to determine how many interfaces can be created +//on any given node. This tests the underlying kernel and node +//configuration and not the primitive itself +// +//Test may fail or take a long time to run based on the values +//configured for maX* +func TestScale(t *testing.T) { + + var maxBridges, maxVnics int + if testing.Short() { + maxBridges = scaleCfg.maxBridgesShort + maxVnics = scaleCfg.maxVnicsShort + } else { + maxBridges = scaleCfg.maxBridgesLong + maxVnics = scaleCfg.maxVnicsLong + } + + unique := false + + if os.Getenv("UNIQUE") != "" { + unique = true + t.Logf("Uniqueness test on") + } else { + t.Logf("Uniqueness test off") + } + + for b := 0; b < maxBridges; b++ { + var err error + + bridge, _ := libsnnet.NewBridge(fmt.Sprintf("testbridge%v", b)) + + if bridge.LinkName, err = libsnnet.GenIface(bridge, unique); err != nil { + t.Errorf("Bridge Interface generation failed: %v %v", err, bridge) + } + + if err := bridge.Create(); err != nil { + t.Errorf("Bridge create failed: %v %v", err, bridge) + } + defer bridge.Destroy() + + for v := 0; v < maxVnics; v++ { + vnic, _ := libsnnet.NewVnic(fmt.Sprintf("testvnic%v_%v", v, b)) + if vnic.LinkName, err = libsnnet.GenIface(vnic, unique); err != nil { + t.Errorf("VNIC Interface generation failed: %v %v", err, bridge) + } + + if err := vnic.Create(); err != nil { + t.Errorf("Vnic Create failed: %v %v", err, vnic) + } + + defer vnic.Destroy() + + if err := vnic.Attach(bridge); err != nil { + t.Errorf("Vnic attach failed: %v", err) + } + if err := vnic.Enable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + defer vnic.Detach(bridge) + + } + if err := bridge.Enable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + } +} diff --git a/networking/libsnnet/tests/cncicli/cncicli.go b/networking/libsnnet/tests/cncicli/cncicli.go new file mode 100644 index 000000000..5b5920ab9 --- /dev/null +++ b/networking/libsnnet/tests/cncicli/cncicli.go @@ -0,0 +1,120 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "encoding/binary" + "flag" + "fmt" + "net" + "os" + + "github.com/01org/ciao/networking/libsnnet" +) + +func main() { + + operationIn := flag.String("operation", "create", "operation reset clears all CNCI setup") + cnciSubnetIn := flag.String("cnciSubnet", "", "CNCI Physicla subnet on which the CN can be reached") + tenantSubnetIn := flag.String("tenantSubnet", "192.168.8.0/21", "Tenant subnet served by this CNCI") + cnIPIn := flag.String("cnip", "127.0.0.1", "CNCI reachable CN IP address") + + cnciIDIn := flag.String("cnciuuid", "cnciuuid", "CNCI UUID") + + flag.Parse() + + cnci := &libsnnet.Cnci{ + ID: *cnciIDIn, + } + cnci.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + if *cnciSubnetIn != "" { + _, cnciPhyNet, err := net.ParseCIDR(*cnciSubnetIn) + if err != nil { + fmt.Println("Error invalid CNCI IP", *cnciSubnetIn) + os.Exit(-1) + } + cnci.ManagementNet = []net.IPNet{*cnciPhyNet} + cnci.ComputeNet = []net.IPNet{*cnciPhyNet} + } + + if *operationIn == "reset" { + if err := cnci.Init(); err != nil { + fmt.Println(err) + os.Exit(-1) + } + if err := cnci.RebuildTopology(); err != nil { + fmt.Println(err) + os.Exit(-1) + } + if err := cnci.Shutdown(); err != nil { + fmt.Println(err) + os.Exit(-1) + } + os.Exit(0) + } + + _, tenantSubnet, err := net.ParseCIDR(*tenantSubnetIn) + if err != nil { + fmt.Println("Error invalid tenant subnet", *tenantSubnetIn) + os.Exit(-1) + } + subnetKey := binary.LittleEndian.Uint32(tenantSubnet.IP) + + cnIP := net.ParseIP(*cnIPIn) + if cnIP == nil { + fmt.Println("Error invalid CN IP", *cnIPIn) + os.Exit(-1) + } + + switch *operationIn { + case "create": + if err := cnci.Init(); err != nil { + fmt.Println(err) + os.Exit(-1) + } + if err := cnci.RebuildTopology(); err != nil { + fmt.Println(err) + os.Exit(-1) + } + if _, err := cnci.AddRemoteSubnet(*tenantSubnet, int(subnetKey), cnIP); err != nil { + fmt.Println(err) + os.Exit(-1) + } + case "delete": + if err := cnci.Init(); err != nil { + fmt.Println(err) + os.Exit(-1) + } + if err := cnci.RebuildTopology(); err != nil { + fmt.Println(err) + os.Exit(-1) + } + if err := cnci.DelRemoteSubnet(*tenantSubnet, int(subnetKey), cnIP); err != nil { + fmt.Println(err) + os.Exit(-1) + } + default: + fmt.Println("Invalid operation ", *operationIn) + } + + os.Exit(0) +} diff --git a/networking/libsnnet/tests/cncli/cncli.go b/networking/libsnnet/tests/cncli/cncli.go new file mode 100644 index 000000000..fe2587ff6 --- /dev/null +++ b/networking/libsnnet/tests/cncli/cncli.go @@ -0,0 +1,168 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "encoding/binary" + "flag" + "fmt" + "net" + "os" + + "github.com/01org/ciao/networking/libsnnet" +) + +func main() { + operationIn := flag.String("operation", "create", "operation ") + nwNodeIn := flag.Bool("nwNode", false, "true if Network Node") + nwIn := flag.String("subnet", "", "subnet of the compute network") + macIn := flag.String("mac", "DE:AD:BE:EF:02:03", "VNIC MAC Address") + vnicIDIn := flag.String("vuuid", "vuuid", "VNIC UUID") + instanceIDIn := flag.String("iuuid", "iuuid", "instance UUID") + + vnicNwIn := flag.String("vnicsubnet", "127.0.0.1/24", "subnet of vnic network") + vnicIPIn := flag.String("vnicIP", "127.0.0.1", "VNIC IP") + concIPIn := flag.String("cnci", "127.0.0.1", "CNCI IP") + + tenantIDIn := flag.String("tuuid", "tuuid", "tunnel UUID") + subnetIDIn := flag.String("suuid", "suuid", "subnet UUID") + concIDIn := flag.String("cnciuuid", "cnciuuid", "CNCI UUID") + cnIDIn := flag.String("cnuuid", "cnuuid", "CN UUID") + + flag.Parse() + + _, vnet, err := net.ParseCIDR(*vnicNwIn) + if err != nil { + fmt.Println("Invalid vnic subnet ", err) + os.Exit(-1) + } + subnetKey := binary.LittleEndian.Uint32(vnet.IP) + + cn := &libsnnet.ComputeNode{ + NetworkConfig: &libsnnet.NetworkConfig{ + Mode: libsnnet.GreTunnel, + }, + } + + if *nwIn != "" { + _, snet, err := net.ParseCIDR(*nwIn) + if err != nil { + fmt.Println("Invalid subnet ", err) + os.Exit(-1) + } + cn.ManagementNet = []net.IPNet{*snet} + cn.ComputeNet = []net.IPNet{*snet} + } + cn.ID = *cnIDIn + + if err := cn.Init(); err != nil { + fmt.Println(err) + os.Exit(-1) + } + + if err := cn.DbRebuild(nil); err != nil { + fmt.Println(err) + os.Exit(-1) + } + + vnicIP := net.ParseIP(*vnicIPIn) + if vnicIP == nil { + fmt.Println("Invalid vnic IP") + os.Exit(-1) + } + + //Create a compute VNIC + if !*nwNodeIn { + + concIP := net.ParseIP(*concIPIn) + if concIP == nil { + fmt.Println("Invalid Conc IP") + os.Exit(-1) + } + + //From YAML on instance init + mac, _ := net.ParseMAC(*macIn) + vnicCfg := &libsnnet.VnicConfig{ + VnicRole: libsnnet.TenantVM, + VnicIP: vnicIP, + ConcIP: concIP, + VnicMAC: mac, + Subnet: *vnet, + SubnetKey: int(subnetKey), + VnicID: *vnicIDIn, + InstanceID: *instanceIDIn, + TenantID: *tenantIDIn, + SubnetID: *subnetIDIn, + ConcID: *concIDIn, + } + + switch *operationIn { + case "create": + fmt.Println("Creating VNIC for Workload") + if vnic, ssntpEvent, err := cn.CreateVnic(vnicCfg); err != nil { + fmt.Println(err) + os.Exit(-1) + } else { + if ssntpEvent != nil { + fmt.Println("VNIC:=", vnic.LinkName, "SSNTP Event:=", ssntpEvent) + } else { + fmt.Println("VNIC:=", vnic.LinkName) + } + } + fmt.Println("Subnet Key:= ", subnetKey) + case "delete": + fmt.Println("Deleting VNIC for Workload") + if ssntpEvent, err := cn.DestroyVnic(vnicCfg); err != nil { + fmt.Println(err) + os.Exit(-1) + } else { + if ssntpEvent != nil { + fmt.Println("SSNTP Event:=", ssntpEvent) + } + } + } + os.Exit(0) + } + + //Network Node + if *nwNodeIn { + mac, _ := net.ParseMAC(*macIn) + vnicCfg := &libsnnet.VnicConfig{ + VnicRole: libsnnet.DataCenter, + VnicMAC: mac, + VnicID: *vnicIDIn, + InstanceID: *instanceIDIn, + TenantID: *tenantIDIn, + } + + switch *operationIn { + case "create": + if cvnic, err := cn.CreateCnciVnic(vnicCfg); err != nil { + fmt.Println(err) + os.Exit(-1) + } else { + fmt.Println("CVNIC:=", cvnic.LinkName, cvnic) + } + case "delete": + if err := cn.DestroyCnciVnic(vnicCfg); err != nil { + fmt.Println(err) + os.Exit(-1) + } + } + + } +} diff --git a/networking/libsnnet/tests/cncli/run_vm.sh b/networking/libsnnet/tests/cncli/run_vm.sh new file mode 100644 index 000000000..e23056cc9 --- /dev/null +++ b/networking/libsnnet/tests/cncli/run_vm.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +if [ -z "$1" ]; then + IMAGE=clear.img +else + IMAGE="$1" +fi + +if [ -z "$2" ]; then + VNIC="ERROR" +else + VNIC="$2" +fi + +if [ -z "$3" ]; then + MAC="DE:AD:DE:AD:DE:AD" +else + MAC="$3" +fi + +if [[ "$IMAGE" =~ .xz$ ]]; then + >&2 echo "File \"$IMAGE\" is still xz compressed. Uncompress it first with \"unxz\"" + exit 1 +fi + +if [ ! -f "$IMAGE" ]; then + >&2 echo "Can't find image file \"$IMAGE\"" + exit 1 +fi +rm -f debug.log + +qemu-system-x86_64 \ + -enable-kvm \ + -bios OVMF.fd \ + -smp cpus=4,cores=2 -cpu host \ + -vga none -nographic \ + -drive file="$IMAGE",if=virtio,aio=threads \ + -net nic,model=virtio,macaddr=$3 -net tap,ifname=$2,script=no,downscript=no \ + -debugcon file:debug.log -global isa-debugcon.iobase=0x402 diff --git a/networking/libsnnet/tests/parallel/parallel.go b/networking/libsnnet/tests/parallel/parallel.go new file mode 100644 index 000000000..639a17d89 --- /dev/null +++ b/networking/libsnnet/tests/parallel/parallel.go @@ -0,0 +1,21 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package parallel + +func main() { + +} diff --git a/networking/libsnnet/tests/parallel/parallel_test.go b/networking/libsnnet/tests/parallel/parallel_test.go new file mode 100644 index 000000000..0965254f9 --- /dev/null +++ b/networking/libsnnet/tests/parallel/parallel_test.go @@ -0,0 +1,160 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package parallel + +import ( + "net" + "os" + "strconv" + "sync" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +var cnNetEnv string + +var scaleCfg = struct { + maxBridgesShort int + maxVnicsShort int + maxBridgesLong int + maxVnicsLong int +}{2, 64, 64, 64} + +func cninit() { + cnNetEnv = os.Getenv("SNNET_ENV") + + if cnNetEnv == "" { + cnNetEnv = "10.3.66.0/24" + } +} + +func TestCN_Parallel(t *testing.T) { + + cn := &libsnnet.ComputeNode{} + + cn.NetworkConfig = &libsnnet.NetworkConfig{ + ManagementNet: nil, + ComputeNet: nil, + Mode: libsnnet.GreTunnel, + } + + cn.ID = "cnuuid" + + cninit() + _, mnet, _ := net.ParseCIDR(cnNetEnv) + + //From YAML, on agent init + mgtNet := []net.IPNet{*mnet} + cn.ManagementNet = mgtNet + cn.ComputeNet = mgtNet + + if err := cn.Init(); err != nil { + t.Fatal("ERROR: cn.Init failed", err) + } + if err := cn.ResetNetwork(); err != nil { + t.Error("ERROR: cn.ResetNetwork failed", err) + } + if err := cn.DbRebuild(nil); err != nil { + t.Fatal("ERROR: cn.dbRebuild failed") + } + + //From YAML on instance init + tenantID := "tenantuuid" + concIP := net.IPv4(192, 168, 254, 1) + + var maxBridges, maxVnics int + if testing.Short() { + maxBridges = scaleCfg.maxBridgesShort + maxVnics = scaleCfg.maxVnicsShort + } else { + maxBridges = scaleCfg.maxBridgesLong + maxVnics = scaleCfg.maxVnicsLong + } + + channelSize := maxBridges*maxVnics + 1 + createCh := make(chan *libsnnet.VnicConfig, channelSize) + destroyCh := make(chan *libsnnet.VnicConfig, channelSize) + + for s3 := 1; s3 <= maxBridges; s3++ { + s4 := 0 + _, tenantNet, _ := net.ParseCIDR("192.168." + strconv.Itoa(s3) + "." + strconv.Itoa(s4) + "/24") + subnetID := "suuid_" + strconv.Itoa(s3) + "_" + strconv.Itoa(s4) + + for s4 := 2; s4 <= maxVnics; s4++ { + + vnicIP := net.IPv4(192, 168, byte(s3), byte(s4)) + mac, _ := net.ParseMAC("CA:FE:00:01:02:03") + + vnicID := "vuuid_" + strconv.Itoa(s3) + "_" + strconv.Itoa(s4) + instanceID := "iuuid_" + strconv.Itoa(s3) + "_" + strconv.Itoa(s4) + vnicCfg := &libsnnet.VnicConfig{ + VnicIP: vnicIP, + ConcIP: concIP, + VnicMAC: mac, + Subnet: *tenantNet, + SubnetKey: s3, + VnicID: vnicID, + InstanceID: instanceID, + SubnetID: subnetID, + TenantID: tenantID, + ConcID: "cnciuuid", + } + + createCh <- vnicCfg + destroyCh <- vnicCfg + } + } + + close(createCh) + close(destroyCh) + + var wg sync.WaitGroup + wg.Add(len(createCh)) + + for vnicCfg := range createCh { + go func(vnicCfg *libsnnet.VnicConfig) { + defer wg.Done() + if vnicCfg == nil { + t.Errorf("WARNING: VNIC nil") + return + } + if _, _, _, err := cn.CreateVnicV2(vnicCfg); err != nil { + t.Fatal("ERROR: cn.CreateVnicV2 failed", err) + } + }(vnicCfg) + } + + wg.Wait() + + wg.Add(len(destroyCh)) + + for vnicCfg := range destroyCh { + go func(vnicCfg *libsnnet.VnicConfig) { + defer wg.Done() + if vnicCfg == nil { + t.Errorf("WARNING: VNIC nil") + return + } + if _, _, err := cn.DestroyVnicV2(vnicCfg); err != nil { + t.Fatal("ERROR: cn.DestroyVnicV2 failed event", vnicCfg, err) + } + }(vnicCfg) + } + + wg.Wait() +} diff --git a/networking/libsnnet/tests/snnetcli/cli.go b/networking/libsnnet/tests/snnetcli/cli.go new file mode 100644 index 000000000..58a9c5236 --- /dev/null +++ b/networking/libsnnet/tests/snnetcli/cli.go @@ -0,0 +1,409 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "encoding/binary" + "fmt" + "net" + "os" + "strconv" + + "github.com/01org/ciao/networking/libsnnet" +) + +func main() { + var err error + fmt.Println("Args :", os.Args) + + if len(os.Args) < 3 { + fmt.Println("Usage:") + fmt.Println(" show ") + fmt.Println(" ") + fmt.Println(" ") + fmt.Println(" create gretap ") + fmt.Println(" destroy gretap ") + fmt.Println(" create instance ") + fmt.Println(" create conc ") + fmt.Println(" addcn conc ") + fmt.Println(" destroy conc ") + fmt.Println(" test bridge ") + fmt.Println(" test vnic ") + os.Exit(1) + } + + arg1 := os.Args[1] + arg2 := os.Args[2] + + if arg1 == "show" { + if err = libsnnet.Show(arg2); err != nil { + fmt.Println(err) + os.Exit(1) + } + os.Exit(0) + } + + if len(os.Args) < 4 { + fmt.Println("Invalid args", os.Args) + os.Exit(1) + } + + arg3 := os.Args[3] + + switch { + case arg1 == "test" && arg2 == "bridge": + bridge, _ := libsnnet.NewBridge(arg3) + + if err = bridge.Create(); err == nil { + if err = bridge.Enable(); err == nil { + if err = bridge.Disable(); err == nil { + err = bridge.Destroy() + } + } + } + + case arg1 == "test" && arg2 == "vnic": + vnic, _ := libsnnet.NewVnic(arg3) + + if err = vnic.Create(); err == nil { + if err = vnic.Enable(); err == nil { + if err = vnic.Disable(); err == nil { + err = vnic.Destroy() + } + } + } + + case arg1 == "create" && arg2 == "bridge": + bridge, _ := libsnnet.NewBridge(arg3) + + err = bridge.Create() + + case arg1 == "enable" && arg2 == "bridge": + bridge, _ := libsnnet.NewBridge(arg3) + + if err = bridge.GetDevice(); err == nil { + err = bridge.Enable() + } + + case arg1 == "disable" && arg2 == "bridge": + bridge, _ := libsnnet.NewBridge(arg3) + + if err = bridge.GetDevice(); err == nil { + err = bridge.Disable() + } + + case arg1 == "destroy" && arg2 == "bridge": + bridge, _ := libsnnet.NewBridge(arg3) + + if err = bridge.GetDevice(); err == nil { + err = bridge.Destroy() + } + + case arg1 == "create" && arg2 == "vnic": + vnic, _ := libsnnet.NewVnic(arg3) + + err = vnic.Create() + + case arg1 == "enable" && arg2 == "vnic": + vnic, _ := libsnnet.NewVnic(arg3) + + if err = vnic.GetDevice(); err == nil { + vnic.Enable() + } + + case arg1 == "disable" && arg2 == "vnic": + vnic, _ := libsnnet.NewVnic(arg3) + + if err = vnic.GetDevice(); err == nil { + err = vnic.Disable() + } + + case arg1 == "destroy" && arg2 == "vnic": + vnic, _ := libsnnet.NewVnic(arg3) + if err = vnic.GetDevice(); err == nil { + err = vnic.Destroy() + } + + case arg1 == "attach": + bridge, _ := libsnnet.NewBridge(arg2) + vnic, _ := libsnnet.NewVnic(arg3) + + if err = bridge.GetDevice(); err == nil { + if err = vnic.GetDevice(); err == nil { + err = vnic.Attach(bridge) + } + } + case arg1 == "detach": + bridge, _ := libsnnet.NewBridge(arg2) + vnic, _ := libsnnet.NewVnic(arg3) + + if err = bridge.GetDevice(); err == nil { + if err = vnic.GetDevice(); err == nil { + vnic.Detach(bridge) + } + } + + case arg1 == "create" && arg2 == "gretap": + var key uint64 + + id := arg3 + arg4 := os.Args[4] + arg5 := os.Args[5] + arg6 := os.Args[6] + local := net.ParseIP(arg4) + remote := net.ParseIP(arg5) + + if local == nil || remote == nil { + err = fmt.Errorf("Bad args for gretap") + } + + key, err = strconv.ParseUint(arg6, 10, 32) + + if err == nil { + var gre *libsnnet.GreTunEP + gre, err = libsnnet.NewGreTunEP(id, local, remote, uint32(key)) + if err == nil { + err = gre.Create() + } + } + + case arg1 == "destroy" && arg2 == "gretap": + var gre *libsnnet.GreTunEP + id := arg3 + if gre, err = libsnnet.NewGreTunEP(id, nil, nil, 0); err == nil { + if err = gre.GetDevice(); err == nil { + err = gre.Destroy() + } + } + + case arg1 == "create" && arg2 == "conc": + var subnet *net.IPNet + + id := arg3 + + tenantUUID := id + concUUID := id + cnUUID := os.Args[6] + subnetUUID := id + reserved := 10 + + cnciIP := net.ParseIP(os.Args[4]) + if cnciIP == nil { + fmt.Println("Error invalid CNCI IP") + goto some_error + } + + if _, subnet, err = net.ParseCIDR(os.Args[5]); err != nil { + goto some_error + } + subnetKey := binary.LittleEndian.Uint32(subnet.IP) + fmt.Println("Subnet Key := ", subnetKey) + + cnIP := net.ParseIP(os.Args[6]) + if cnIP == nil { + fmt.Println("Error invalid CN IP") + goto some_error + } + + bridgeAlias := fmt.Sprintf("br_%s_%s_%s", tenantUUID, subnetUUID, concUUID) + bridge, _ := libsnnet.NewBridge(bridgeAlias) + + if err = bridge.GetDevice(); err != nil { + if err = bridge.Create(); err != nil { + fmt.Println("Error bridge create", err) + goto some_error + } + } + + if err = bridge.Enable(); err != nil { + fmt.Println("Error bridge enable", err) + goto some_error + } + + d, _ := libsnnet.NewDnsmasq(bridgeAlias, tenantUUID, *subnet, reserved, bridge) + + d.Stop() //Ignore any errors + + if err = d.Start(); err != nil { + fmt.Println("Error starting dnsmasq", err) + goto some_error + } + + greAlias := fmt.Sprintf("gre_%s_%s_%s", tenantUUID, subnetUUID, cnUUID) + gre, _ := libsnnet.NewGreTunEP(greAlias, cnciIP, cnIP, subnetKey) + + if err = gre.Create(); err != nil { + fmt.Println("Error gre create", err) + goto some_error + } + + if err = gre.Attach(bridge); err != nil { + fmt.Println("Error gre attach", err) + goto some_error + } + + if err = gre.Enable(); err != nil { + fmt.Println("Error gre enable", err) + goto some_error + } + fmt.Println("Concentrator setup sucessfully") + + case arg1 == "destroy" && arg2 == "conc": + id := arg3 + tenantUUID := id + concUUID := id + cnUUID := os.Args[6] + subnetUUID := id + + cnciIP := net.ParseIP(os.Args[4]) + if cnciIP == nil { + fmt.Println("Invalid CNCI IP") + goto some_error + } + + var subnet *net.IPNet + if _, subnet, err = net.ParseCIDR(os.Args[5]); err != nil { + goto some_error + } + subnetKey := binary.LittleEndian.Uint32(subnet.IP) + fmt.Println("Subnet Key := ", subnetKey) + + cnIP := net.ParseIP(os.Args[6]) + if cnIP == nil { + fmt.Println("Error invalid CN IP") + goto some_error + } + + bridgeAlias := fmt.Sprintf("br_%s_%s_%s", tenantUUID, subnetUUID, concUUID) + bridge, _ := libsnnet.NewBridge(bridgeAlias) + + greAlias := fmt.Sprintf("gre_%s_%s_%s", tenantUUID, subnetUUID, cnUUID) + gre, _ := libsnnet.NewGreTunEP(greAlias, cnciIP, cnIP, subnetKey) + + if err = gre.GetDevice(); err != nil { + fmt.Println("Error gre getdevice", err) + goto some_error + } + + if err = gre.Detach(bridge); err != nil { + fmt.Println("Error gre detach", err) + goto some_error + } + + if err = gre.Destroy(); err != nil { + fmt.Println("Error gre destroy", err) + goto some_error + } + + if err = bridge.GetDevice(); err != nil { + fmt.Println("Warning bridge does not exist", err) + err = nil + //goto some_error + } else { + var subnet *net.IPNet + reserved := 10 + + if _, subnet, err = net.ParseCIDR(os.Args[5]); err != nil { + goto some_error + } + + d, _ := libsnnet.NewDnsmasq(bridgeAlias, tenantUUID, *subnet, reserved, bridge) + + if err = d.Stop(); err != nil { + fmt.Println("Error cannot stop dnsmasq", err) + } + if err = bridge.Destroy(); err != nil { + fmt.Println("Error bridge destroy", err) + goto some_error + } + } + + fmt.Println("Concentrator deleted sucessfully") + + case arg1 == "create" && arg2 == "instance": + id := arg3 + cnciIP := net.ParseIP(os.Args[4]) + cnIP := net.ParseIP(os.Args[5]) + + tenantUUID := id + instanceUUID := id + concUUID := id + cnUUID := id + subnetUUID := id + subnetKey := uint32(0xF) + + bridgeAlias := fmt.Sprintf("br_%s_%s_%s", tenantUUID, subnetUUID, concUUID) + greAlias := fmt.Sprintf("gre_%s_%s_%s", tenantUUID, subnetUUID, cnUUID) + vnicAlias := fmt.Sprintf("vnic_%s_%s_%s", tenantUUID, instanceUUID, concUUID) + + if err != nil { + goto some_error + } + + bridge, _ := libsnnet.NewBridge(bridgeAlias) + + if err := bridge.Create(); err != nil { + goto some_error + } + + if err := bridge.Enable(); err != nil { + goto some_error + } + + gre, _ := libsnnet.NewGreTunEP(greAlias, cnIP, cnciIP, subnetKey) + + if err := gre.Create(); err != nil { + goto some_error + } + + if err := gre.Attach(bridge); err != nil { + goto some_error + } + + if err := gre.Enable(); err != nil { + goto some_error + } + + vnic, _ := libsnnet.NewVnic(vnicAlias) + + if err := vnic.Create(); err != nil { + goto some_error + } + + if err := vnic.Attach(bridge); err != nil { + goto some_error + } + + if err := vnic.Enable(); err != nil { + goto some_error + } + fmt.Println("Instance sucessfully created with name", vnic.LinkName) + + default: + fmt.Println("Unknown args", os.Args) + os.Exit(1) + } + +some_error: + + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + os.Exit(0) +} diff --git a/networking/libsnnet/tests/snnetcli/run_all_tests.sh b/networking/libsnnet/tests/snnetcli/run_all_tests.sh new file mode 100644 index 000000000..980064182 --- /dev/null +++ b/networking/libsnnet/tests/snnetcli/run_all_tests.sh @@ -0,0 +1,21 @@ +sudo ./snnetcli create bridge br00 +sudo ./snnetcli show br00 +sudo ./snnetcli enable bridge br00 +sudo ./snnetcli disable bridge br00 +sudo ./snnetcli destroy bridge br00 +sudo ./snnetcli create vnic vnic00 +sudo ./snnetcli show vnic00 +sudo ./snnetcli enable vnic vnic00 +sudo ./snnetcli disable vnic vnic00 +sudo ./snnetcli destroy vnic vnic00 +sudo ./snnetcli create bridge br00 +sudo ./snnetcli create vnic vnic00 +sudo ./snnetcli attach br00 vnic00 +brctl show +ip link +sudo ./snnetcli detach br00 vnic00 +brctl show +ip link +sudo ./snnetcli destroy bridge br00 +sudo ./snnetcli destroy vnic vnic00 +ip link diff --git a/networking/libsnnet/utils.go b/networking/libsnnet/utils.go new file mode 100644 index 000000000..e609636c9 --- /dev/null +++ b/networking/libsnnet/utils.go @@ -0,0 +1,89 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "fmt" + "math/rand" + "time" + + "github.com/vishvananda/netlink" +) + +const ( + prefixBridge = "sbr" + prefixVnic = "svn" + prefixVnicCont = "svp" + prefixVnicHost = "svn" + prefixCnciVnic = "svc" + prefixGretap = "sgt" +) + +const ifaceRetryLimit = 10 + +var ( + ifaceRseed rand.Source + ifaceRsrc *rand.Rand +) + +func init() { + ifaceRseed = rand.NewSource(time.Now().UnixNano()) + ifaceRsrc = rand.New(ifaceRseed) +} + +// GenIface generates locally unique interface names based on the +// type of device passed in. It will additionally check if the +// interface name exists on the localhost based on unique +// When uniqueness is specified error will be returned +// if it is not possible to generate a locally unique name within +// a finite number of retries +func GenIface(device interface{}, unique bool) (string, error) { + var prefix string + + switch d := device.(type) { + case *Bridge: + prefix = prefixBridge + case *Vnic: + switch d.Role { + case TenantVM: + prefix = prefixVnic + case TenantContainer: + prefix = prefixVnicHost + } + case *GreTunEP: + prefix = prefixGretap + case *CnciVnic: + prefix = prefixCnciVnic + default: + return "", fmt.Errorf("invalid device type %T %v", device, device) + } + + if !unique { + iface := fmt.Sprintf("%s_%x", prefix, ifaceRsrc.Uint32()) + return iface, nil + } + + for i := 0; i < ifaceRetryLimit; i++ { + iface := fmt.Sprintf("%s_%x", prefix, ifaceRsrc.Uint32()) + if _, err := netlink.LinkByName(iface); err != nil { + return iface, nil + } + } + + // The chances of the failure are remote + return "", fmt.Errorf("unable to create unique interface name") +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/.travis.yml b/networking/libsnnet/vendor/github.com/vishvananda/netlink/.travis.yml new file mode 100644 index 000000000..1970069d5 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/.travis.yml @@ -0,0 +1,3 @@ +language: go +install: + - go get github.com/vishvananda/netns diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/LICENSE b/networking/libsnnet/vendor/github.com/vishvananda/netlink/LICENSE new file mode 100644 index 000000000..9f64db858 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Vishvananda Ishaya. + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/Makefile b/networking/libsnnet/vendor/github.com/vishvananda/netlink/Makefile new file mode 100644 index 000000000..b3250185f --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/Makefile @@ -0,0 +1,29 @@ +DIRS := \ + . \ + nl + +DEPS = \ + github.com/vishvananda/netns + +uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1))) +testdirs = $(call uniq,$(foreach d,$(1),$(dir $(wildcard $(d)/*_test.go)))) +goroot = $(addprefix ../../../,$(1)) +unroot = $(subst ../../../,,$(1)) +fmt = $(addprefix fmt-,$(1)) + +all: fmt + +$(call goroot,$(DEPS)): + go get $(call unroot,$@) + +.PHONY: $(call testdirs,$(DIRS)) +$(call testdirs,$(DIRS)): + sudo -E go test -v github.com/vishvananda/netlink/$@ + +$(call fmt,$(call testdirs,$(DIRS))): + ! gofmt -l $(subst fmt-,,$@)/*.go | grep '' + +.PHONY: fmt +fmt: $(call fmt,$(call testdirs,$(DIRS))) + +test: fmt $(call goroot,$(DEPS)) $(call testdirs,$(DIRS)) diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/README.md b/networking/libsnnet/vendor/github.com/vishvananda/netlink/README.md new file mode 100644 index 000000000..8cd50a93b --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/README.md @@ -0,0 +1,89 @@ +# netlink - netlink library for go # + +[![Build Status](https://travis-ci.org/vishvananda/netlink.png?branch=master)](https://travis-ci.org/vishvananda/netlink) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink) + +The netlink package provides a simple netlink library for go. Netlink +is the interface a user-space program in linux uses to communicate with +the kernel. It can be used to add and remove interfaces, set ip addresses +and routes, and configure ipsec. Netlink communication requires elevated +privileges, so in most cases this code needs to be run as root. Since +low-level netlink messages are inscrutable at best, the library attempts +to provide an api that is loosely modeled on the CLI provied by iproute2. +Actions like `ip link add` will be accomplished via a similarly named +function like AddLink(). This library began its life as a fork of the +netlink functionality in +[docker/libcontainer](https://github.com/docker/libcontainer) but was +heavily rewritten to improve testability, performance, and to add new +functionality like ipsec xfrm handling. + +## Local Build and Test ## + +You can use go get command: + + go get github.com/vishvananda/netlink + +Testing dependencies: + + go get github.com/vishvananda/netns + +Testing (requires root): + + sudo -E go test github.com/vishvananda/netlink + +## Examples ## + +Add a new bridge and add eth1 into it: + +```go +package main + +import ( + "net" + "github.com/vishvananda/netlink" +) + +func main() { + la := netlink.NewLinkAttrs() + la.Name = "foo" + mybridge := &netlink.Bridge{la}} + _ := netlink.LinkAdd(mybridge) + eth1, _ := netlink.LinkByName("eth1") + netlink.LinkSetMaster(eth1, mybridge) +} + +``` +Note `NewLinkAttrs` constructor, it sets default values in structure. For now +it sets only `TxQLen` to `-1`, so kernel will set default by itself. If you're +using simple initialization(`LinkAttrs{Name: "foo"}`) `TxQLen` will be set to +`0` unless you specify it like `LinkAttrs{Name: "foo", TxQLen: 1000}`. + +Add a new ip address to loopback: + +```go +package main + +import ( + "net" + "github.com/vishvananda/netlink" +) + +func main() { + lo, _ := netlink.LinkByName("lo") + addr, _ := netlink.ParseAddr("169.254.169.254/32") + netlink.AddrAdd(lo, addr) +} + +``` + +## Future Work ## + +Many pieces of netlink are not yet fully supported in the high-level +interface. Aspects of virtually all of the high-level objects don't exist. +Many of the underlying primitives are there, so its a matter of putting +the right fields into the high-level objects and making sure that they +are serialized and deserialized correctly in the Add and List methods. + +There are also a few pieces of low level netlink functionality that still +need to be implemented. Routing rules are not in place and some of the +more advanced link types. Hopefully there is decent structure and testing +in place to make these fairly straightforward to add. diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/addr.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/addr.go new file mode 100644 index 000000000..079fff3b3 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/addr.go @@ -0,0 +1,45 @@ +package netlink + +import ( + "fmt" + "net" + "strings" +) + +// Addr represents an IP address from netlink. Netlink ip addresses +// include a mask, so it stores the address as a net.IPNet. +type Addr struct { + *net.IPNet + Label string + Flags int + Scope int +} + +// String returns $ip/$netmask $label +func (a Addr) String() string { + return strings.TrimSpace(fmt.Sprintf("%s %s", a.IPNet, a.Label)) +} + +// ParseAddr parses the string representation of an address in the +// form $ip/$netmask $label. The label portion is optional +func ParseAddr(s string) (*Addr, error) { + label := "" + parts := strings.Split(s, " ") + if len(parts) > 1 { + s = parts[0] + label = parts[1] + } + m, err := ParseIPNet(s) + if err != nil { + return nil, err + } + return &Addr{IPNet: m, Label: label}, nil +} + +// Equal returns true if both Addrs have the same net.IPNet value. +func (a Addr) Equal(x Addr) bool { + sizea, _ := a.Mask.Size() + sizeb, _ := x.Mask.Size() + // ignore label for comparison + return a.IP.Equal(x.IP) && sizea == sizeb +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/addr_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/addr_linux.go new file mode 100644 index 000000000..9373e9c5a --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -0,0 +1,142 @@ +package netlink + +import ( + "fmt" + "net" + "strings" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// IFA_FLAGS is a u32 attribute. +const IFA_FLAGS = 0x8 + +// AddrAdd will add an IP address to a link device. +// Equivalent to: `ip addr add $addr dev $link` +func AddrAdd(link Link, addr *Addr) error { + + req := nl.NewNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + return addrHandle(link, addr, req) +} + +// AddrDel will delete an IP address from a link device. +// Equivalent to: `ip addr del $addr dev $link` +func AddrDel(link Link, addr *Addr) error { + req := nl.NewNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK) + return addrHandle(link, addr, req) +} + +func addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error { + base := link.Attrs() + if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) { + return fmt.Errorf("label must begin with interface name") + } + ensureIndex(base) + + family := nl.GetIPFamily(addr.IP) + + msg := nl.NewIfAddrmsg(family) + msg.Index = uint32(base.Index) + msg.Scope = uint8(addr.Scope) + prefixlen, _ := addr.Mask.Size() + msg.Prefixlen = uint8(prefixlen) + req.AddData(msg) + + var addrData []byte + if family == FAMILY_V4 { + addrData = addr.IP.To4() + } else { + addrData = addr.IP.To16() + } + + localData := nl.NewRtAttr(syscall.IFA_LOCAL, addrData) + req.AddData(localData) + + addressData := nl.NewRtAttr(syscall.IFA_ADDRESS, addrData) + req.AddData(addressData) + + if addr.Flags != 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(addr.Flags)) + flagsData := nl.NewRtAttr(IFA_FLAGS, b) + req.AddData(flagsData) + } + + if addr.Label != "" { + labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label)) + req.AddData(labelData) + } + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// AddrList gets a list of IP addresses in the system. +// Equivalent to: `ip addr show`. +// The list can be filtered by link and ip family. +func AddrList(link Link, family int) ([]Addr, error) { + req := nl.NewNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP) + msg := nl.NewIfInfomsg(family) + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWADDR) + if err != nil { + return nil, err + } + + index := 0 + if link != nil { + base := link.Attrs() + ensureIndex(base) + index = base.Index + } + + var res []Addr + for _, m := range msgs { + msg := nl.DeserializeIfAddrmsg(m) + + if link != nil && msg.Index != uint32(index) { + // Ignore messages from other interfaces + continue + } + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + var local, dst *net.IPNet + var addr Addr + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.IFA_ADDRESS: + dst = &net.IPNet{ + IP: attr.Value, + Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), + } + case syscall.IFA_LOCAL: + local = &net.IPNet{ + IP: attr.Value, + Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), + } + case syscall.IFA_LABEL: + addr.Label = string(attr.Value[:len(attr.Value)-1]) + case IFA_FLAGS: + addr.Flags = int(native.Uint32(attr.Value[0:4])) + } + } + + // IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS + if local != nil { + addr.IPNet = local + } else { + addr.IPNet = dst + } + addr.Scope = int(msg.Scope) + + res = append(res, addr) + } + + return res, nil +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/addr_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/addr_test.go new file mode 100644 index 000000000..1735b8349 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/addr_test.go @@ -0,0 +1,88 @@ +package netlink + +import ( + "net" + "syscall" + "testing" +) + +func TestAddr(t *testing.T) { + var address = &net.IPNet{net.IPv4(127, 0, 0, 2), net.CIDRMask(24, 32)} + var addrTests = []struct { + addr *Addr + expected *Addr + }{ + { + &Addr{IPNet: address}, + &Addr{IPNet: address, Label: "lo", Scope: syscall.RT_SCOPE_UNIVERSE, Flags: syscall.IFA_F_PERMANENT}, + }, + { + &Addr{IPNet: address, Label: "local"}, + &Addr{IPNet: address, Label: "local", Scope: syscall.RT_SCOPE_UNIVERSE, Flags: syscall.IFA_F_PERMANENT}, + }, + { + &Addr{IPNet: address, Flags: syscall.IFA_F_OPTIMISTIC}, + &Addr{IPNet: address, Label: "lo", Flags: syscall.IFA_F_OPTIMISTIC | syscall.IFA_F_PERMANENT, Scope: syscall.RT_SCOPE_UNIVERSE}, + }, + { + &Addr{IPNet: address, Flags: syscall.IFA_F_OPTIMISTIC | syscall.IFA_F_DADFAILED}, + &Addr{IPNet: address, Label: "lo", Flags: syscall.IFA_F_OPTIMISTIC | syscall.IFA_F_DADFAILED | syscall.IFA_F_PERMANENT, Scope: syscall.RT_SCOPE_UNIVERSE}, + }, + { + &Addr{IPNet: address, Scope: syscall.RT_SCOPE_NOWHERE}, + &Addr{IPNet: address, Label: "lo", Flags: syscall.IFA_F_PERMANENT, Scope: syscall.RT_SCOPE_NOWHERE}, + }, + } + + tearDown := setUpNetlinkTest(t) + defer tearDown() + + link, err := LinkByName("lo") + if err != nil { + t.Fatal(err) + } + + for _, tt := range addrTests { + if err = AddrAdd(link, tt.addr); err != nil { + t.Fatal(err) + } + + addrs, err := AddrList(link, FAMILY_ALL) + if err != nil { + t.Fatal(err) + } + + if len(addrs) != 1 { + t.Fatal("Address not added properly") + } + + if !addrs[0].Equal(*tt.expected) { + t.Fatalf("Address ip no set properly, got=%s, expected=%s", addrs[0], tt.expected) + } + + if addrs[0].Label != tt.expected.Label { + t.Fatalf("Address label not set properly, got=%s, expected=%s", addrs[0].Label, tt.expected.Label) + } + + if addrs[0].Flags != tt.expected.Flags { + t.Fatalf("Address flags not set properly, got=%d, expected=%d", addrs[0].Flags, tt.expected.Flags) + } + + if addrs[0].Scope != tt.expected.Scope { + t.Fatalf("Address scope not set properly, got=%d, expected=%d", addrs[0].Scope, tt.expected.Scope) + } + + if err = AddrDel(link, tt.addr); err != nil { + t.Fatal(err) + } + + addrs, err = AddrList(link, FAMILY_ALL) + if err != nil { + t.Fatal(err) + } + + if len(addrs) != 0 { + t.Fatal("Address not removed properly") + } + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/class.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/class.go new file mode 100644 index 000000000..35bdb3310 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/class.go @@ -0,0 +1,110 @@ +package netlink + +import ( + "fmt" +) + +type Class interface { + Attrs() *ClassAttrs + Type() string +} + +// Class represents a netlink class. A filter is associated with a link, +// has a handle and a parent. The root filter of a device should have a +// parent == HANDLE_ROOT. +type ClassAttrs struct { + LinkIndex int + Handle uint32 + Parent uint32 + Leaf uint32 +} + +func (q ClassAttrs) String() string { + return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Leaf: %s}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Leaf) +} + +type HtbClassAttrs struct { + // TODO handle all attributes + Rate uint64 + Ceil uint64 + Buffer uint32 + Cbuffer uint32 + Quantum uint32 + Level uint32 + Prio uint32 +} + +func (q HtbClassAttrs) String() string { + return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer) +} + +// Htb class +type HtbClass struct { + ClassAttrs + Rate uint64 + Ceil uint64 + Buffer uint32 + Cbuffer uint32 + Quantum uint32 + Level uint32 + Prio uint32 +} + +func NewHtbClass(attrs ClassAttrs, cattrs HtbClassAttrs) *HtbClass { + mtu := 1600 + rate := cattrs.Rate / 8 + ceil := cattrs.Ceil / 8 + buffer := cattrs.Buffer + cbuffer := cattrs.Cbuffer + if ceil == 0 { + ceil = rate + } + + if buffer == 0 { + buffer = uint32(float64(rate)/Hz() + float64(mtu)) + } + buffer = uint32(Xmittime(rate, buffer)) + + if cbuffer == 0 { + cbuffer = uint32(float64(ceil)/Hz() + float64(mtu)) + } + cbuffer = uint32(Xmittime(ceil, cbuffer)) + + return &HtbClass{ + ClassAttrs: attrs, + Rate: rate, + Ceil: ceil, + Buffer: buffer, + Cbuffer: cbuffer, + Quantum: 10, + Level: 0, + Prio: 0, + } +} + +func (q HtbClass) String() string { + return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer) +} + +func (class *HtbClass) Attrs() *ClassAttrs { + return &class.ClassAttrs +} + +func (class *HtbClass) Type() string { + return "htb" +} + +// GenericClass classes represent types that are not currently understood +// by this netlink library. +type GenericClass struct { + ClassAttrs + ClassType string +} + +func (class *GenericClass) Attrs() *ClassAttrs { + return &class.ClassAttrs +} + +func (class *GenericClass) Type() string { + return class.ClassType +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/class_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/class_linux.go new file mode 100644 index 000000000..84828da10 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/class_linux.go @@ -0,0 +1,168 @@ +package netlink + +import ( + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// ClassDel will delete a class from the system. +// Equivalent to: `tc class del $class` +func ClassDel(class Class) error { + return classModify(syscall.RTM_DELTCLASS, 0, class) +} + +// ClassChange will change a class in place +// Equivalent to: `tc class change $class` +// The parent and handle MUST NOT be changed. + +func ClassChange(class Class) error { + return classModify(syscall.RTM_NEWTCLASS, 0, class) +} + +// ClassReplace will replace a class to the system. +// quivalent to: `tc class replace $class` +// The handle MAY be changed. +// If a class already exist with this parent/handle pair, the class is changed. +// If a class does not already exist with this parent/handle, a new class is created. +func ClassReplace(class Class) error { + return classModify(syscall.RTM_NEWTCLASS, syscall.NLM_F_CREATE, class) +} + +// ClassAdd will add a class to the system. +// Equivalent to: `tc class add $class` +func ClassAdd(class Class) error { + return classModify( + syscall.RTM_NEWTCLASS, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, + class, + ) +} + +func classModify(cmd, flags int, class Class) error { + req := nl.NewNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) + base := class.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + } + req.AddData(msg) + + if cmd != syscall.RTM_DELTCLASS { + if err := classPayload(req, class); err != nil { + return err + } + } + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +func classPayload(req *nl.NetlinkRequest, class Class) error { + req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(class.Type()))) + + options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) + if htb, ok := class.(*HtbClass); ok { + opt := nl.TcHtbCopt{} + opt.Rate.Rate = uint32(htb.Rate) + opt.Ceil.Rate = uint32(htb.Ceil) + opt.Buffer = htb.Buffer + opt.Cbuffer = htb.Cbuffer + opt.Quantum = htb.Quantum + opt.Level = htb.Level + opt.Prio = htb.Prio + // TODO: Handle Debug properly. For now default to 0 + nl.NewRtAttrChild(options, nl.TCA_HTB_PARMS, opt.Serialize()) + } + req.AddData(options) + return nil +} + +// ClassList gets a list of classes in the system. +// Equivalent to: `tc class show`. +// Generally returns nothing if link and parent are not specified. +func ClassList(link Link, parent uint32) ([]Class, error) { + req := nl.NewNetlinkRequest(syscall.RTM_GETTCLASS, syscall.NLM_F_DUMP) + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Parent: parent, + } + if link != nil { + base := link.Attrs() + ensureIndex(base) + msg.Ifindex = int32(base.Index) + } + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTCLASS) + if err != nil { + return nil, err + } + + var res []Class + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + base := ClassAttrs{ + LinkIndex: int(msg.Ifindex), + Handle: msg.Handle, + Parent: msg.Parent, + } + + var class Class + classType := "" + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_KIND: + classType = string(attr.Value[:len(attr.Value)-1]) + switch classType { + case "htb": + class = &HtbClass{} + default: + class = &GenericClass{ClassType: classType} + } + case nl.TCA_OPTIONS: + switch classType { + case "htb": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + _, err = parseHtbClassData(class, data) + if err != nil { + return nil, err + } + } + } + } + *class.Attrs() = base + res = append(res, class) + } + + return res, nil +} + +func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, error) { + htb := class.(*HtbClass) + detailed := false + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_HTB_PARMS: + opt := nl.DeserializeTcHtbCopt(datum.Value) + htb.Rate = uint64(opt.Rate.Rate) + htb.Ceil = uint64(opt.Ceil.Rate) + htb.Buffer = opt.Buffer + htb.Cbuffer = opt.Cbuffer + htb.Quantum = opt.Quantum + htb.Level = opt.Level + htb.Prio = opt.Prio + } + } + return detailed, nil +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/class_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/class_test.go new file mode 100644 index 000000000..92fdd4a9e --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/class_test.go @@ -0,0 +1,406 @@ +package netlink + +import ( + "testing" +) + +func TestClassAddDel(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil { + t.Fatal(err) + } + if err := LinkAdd(&Ifb{LinkAttrs{Name: "bar"}}); err != nil { + t.Fatal(err) + } + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + if err := LinkSetUp(link); err != nil { + t.Fatal(err) + } + attrs := QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Handle: MakeHandle(0xffff, 0), + Parent: HANDLE_ROOT, + } + qdisc := NewHtb(attrs) + if err := QdiscAdd(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err := QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 1 { + t.Fatal("Failed to add qdisc") + } + _, ok := qdiscs[0].(*Htb) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + + classattrs := ClassAttrs{ + LinkIndex: link.Attrs().Index, + Parent: MakeHandle(0xffff, 0), + Handle: MakeHandle(0xffff, 2), + } + + htbclassattrs := HtbClassAttrs{ + Rate: 1234000, + Cbuffer: 1690, + } + class := NewHtbClass(classattrs, htbclassattrs) + if err := ClassAdd(class); err != nil { + t.Fatal(err) + } + classes, err := ClassList(link, MakeHandle(0xffff, 0)) + if err != nil { + t.Fatal(err) + } + if len(classes) != 1 { + t.Fatal("Failed to add class") + } + + htb, ok := classes[0].(*HtbClass) + if !ok { + t.Fatal("Class is the wrong type") + } + if htb.Rate != class.Rate { + t.Fatal("Rate doesn't match") + } + if htb.Ceil != class.Ceil { + t.Fatal("Ceil doesn't match") + } + if htb.Buffer != class.Buffer { + t.Fatal("Buffer doesn't match") + } + if htb.Cbuffer != class.Cbuffer { + t.Fatal("Cbuffer doesn't match") + } + + qattrs := QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Handle: MakeHandle(0x2, 0), + Parent: MakeHandle(0xffff, 2), + } + nattrs := NetemQdiscAttrs{ + Latency: 20000, + Loss: 23.4, + Duplicate: 14.3, + LossCorr: 8.34, + Jitter: 1000, + DelayCorr: 12.3, + ReorderProb: 23.4, + CorruptProb: 10.0, + CorruptCorr: 10, + } + qdiscnetem := NewNetem(qattrs, nattrs) + if err := QdiscAdd(qdiscnetem); err != nil { + t.Fatal(err) + } + + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 2 { + t.Fatal("Failed to add qdisc") + } + _, ok = qdiscs[0].(*Htb) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + + netem, ok := qdiscs[1].(*Netem) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + // Compare the record we got from the list with the one we created + if netem.Loss != qdiscnetem.Loss { + t.Fatal("Loss does not match") + } + if netem.Latency != qdiscnetem.Latency { + t.Fatal("Latency does not match") + } + if netem.CorruptProb != qdiscnetem.CorruptProb { + t.Fatal("CorruptProb does not match") + } + if netem.Jitter != qdiscnetem.Jitter { + t.Fatal("Jitter does not match") + } + if netem.LossCorr != qdiscnetem.LossCorr { + t.Fatal("Loss does not match") + } + if netem.DuplicateCorr != qdiscnetem.DuplicateCorr { + t.Fatal("DuplicateCorr does not match") + } + + // Deletion + if err := ClassDel(class); err != nil { + t.Fatal(err) + } + classes, err = ClassList(link, MakeHandle(0xffff, 0)) + if err != nil { + t.Fatal(err) + } + if len(classes) != 0 { + t.Fatal("Failed to remove class") + } + if err := QdiscDel(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 0 { + t.Fatal("Failed to remove qdisc") + } +} + +func TestHtbClassAddHtbClassChangeDel(t *testing.T) { + /** + This test first set up a interface ans set up a Htb qdisc + A HTB class is attach to it and a Netem qdisc is attached to that class + Next, we test changing the HTB class in place and confirming the Netem is + still attached. We also check that invoting ClassChange with a non-existing + class will fail. + Finally, we test ClassReplace. We confirm it correctly behave like + ClassChange when the parent/handle pair exists and that it will create a + new class if the handle is modified. + */ + tearDown := setUpNetlinkTest(t) + defer tearDown() + if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil { + t.Fatal(err) + } + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + if err := LinkSetUp(link); err != nil { + t.Fatal(err) + } + attrs := QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Handle: MakeHandle(0xffff, 0), + Parent: HANDLE_ROOT, + } + qdisc := NewHtb(attrs) + if err := QdiscAdd(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err := QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 1 { + t.Fatal("Failed to add qdisc") + } + _, ok := qdiscs[0].(*Htb) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + + classattrs := ClassAttrs{ + LinkIndex: link.Attrs().Index, + Parent: MakeHandle(0xffff, 0), + Handle: MakeHandle(0xffff, 2), + } + + htbclassattrs := HtbClassAttrs{ + Rate: 1234000, + Cbuffer: 1690, + } + class := NewHtbClass(classattrs, htbclassattrs) + if err := ClassAdd(class); err != nil { + t.Fatal(err) + } + classes, err := ClassList(link, 0) + if err != nil { + t.Fatal(err) + } + if len(classes) != 1 { + t.Fatal("Failed to add class") + } + + htb, ok := classes[0].(*HtbClass) + if !ok { + t.Fatal("Class is the wrong type") + } + + qattrs := QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Handle: MakeHandle(0x2, 0), + Parent: MakeHandle(0xffff, 2), + } + nattrs := NetemQdiscAttrs{ + Latency: 20000, + Loss: 23.4, + Duplicate: 14.3, + LossCorr: 8.34, + Jitter: 1000, + DelayCorr: 12.3, + ReorderProb: 23.4, + CorruptProb: 10.0, + CorruptCorr: 10, + } + qdiscnetem := NewNetem(qattrs, nattrs) + if err := QdiscAdd(qdiscnetem); err != nil { + t.Fatal(err) + } + + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 2 { + t.Fatal("Failed to add qdisc") + } + + _, ok = qdiscs[1].(*Netem) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + + // Change + // For change to work, the handle and parent cannot be changed. + + // First, test it fails if we change the Handle. + old_handle := classattrs.Handle + classattrs.Handle = MakeHandle(0xffff, 3) + class = NewHtbClass(classattrs, htbclassattrs) + if err := ClassChange(class); err == nil { + t.Fatal("ClassChange should not work when using a different handle.") + } + // It should work with the same handle + classattrs.Handle = old_handle + htbclassattrs.Rate = 4321000 + class = NewHtbClass(classattrs, htbclassattrs) + if err := ClassChange(class); err != nil { + t.Fatal(err) + } + + classes, err = ClassList(link, MakeHandle(0xffff, 0)) + if err != nil { + t.Fatal(err) + } + if len(classes) != 1 { + t.Fatalf( + "1 class expected, %d found", + len(classes), + ) + } + + htb, ok = classes[0].(*HtbClass) + if !ok { + t.Fatal("Class is the wrong type") + } + // Verify that the rate value has changed. + if htb.Rate != class.Rate { + t.Fatal("Rate did not get changed while changing the class.") + } + + // Check that we still have the netem child qdisc + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + + if len(qdiscs) != 2 { + t.Fatalf("2 qdisc expected, %d found", len(qdiscs)) + } + _, ok = qdiscs[0].(*Htb) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + + _, ok = qdiscs[1].(*Netem) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + + // Replace + // First replace by keeping the same handle, class will be changed. + // Then, replace by providing a new handle, n new class will be created. + + // Replace acting as Change + class = NewHtbClass(classattrs, htbclassattrs) + if err := ClassReplace(class); err != nil { + t.Fatal("Failed to replace class that is existing.") + } + + classes, err = ClassList(link, MakeHandle(0xffff, 0)) + if err != nil { + t.Fatal(err) + } + if len(classes) != 1 { + t.Fatalf( + "1 class expected, %d found", + len(classes), + ) + } + + htb, ok = classes[0].(*HtbClass) + if !ok { + t.Fatal("Class is the wrong type") + } + // Verify that the rate value has changed. + if htb.Rate != class.Rate { + t.Fatal("Rate did not get changed while changing the class.") + } + + // It should work with the same handle + classattrs.Handle = MakeHandle(0xffff, 3) + class = NewHtbClass(classattrs, htbclassattrs) + if err := ClassReplace(class); err != nil { + t.Fatal(err) + } + + classes, err = ClassList(link, MakeHandle(0xffff, 0)) + if err != nil { + t.Fatal(err) + } + if len(classes) != 2 { + t.Fatalf( + "2 classes expected, %d found", + len(classes), + ) + } + + htb, ok = classes[1].(*HtbClass) + if !ok { + t.Fatal("Class is the wrong type") + } + // Verify that the rate value has changed. + if htb.Rate != class.Rate { + t.Fatal("Rate did not get changed while changing the class.") + } + + // Deletion + for _, class := range classes { + if err := ClassDel(class); err != nil { + t.Fatal(err) + } + } + + classes, err = ClassList(link, MakeHandle(0xffff, 0)) + if err != nil { + t.Fatal(err) + } + if len(classes) != 0 { + t.Fatal("Failed to remove class") + } + if err := QdiscDel(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 0 { + t.Fatal("Failed to remove qdisc") + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/filter.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/filter.go new file mode 100644 index 000000000..80ef34ded --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/filter.go @@ -0,0 +1,140 @@ +package netlink + +import ( + "errors" + "fmt" + "github.com/vishvananda/netlink/nl" +) + +type Filter interface { + Attrs() *FilterAttrs + Type() string +} + +// Filter represents a netlink filter. A filter is associated with a link, +// has a handle and a parent. The root filter of a device should have a +// parent == HANDLE_ROOT. +type FilterAttrs struct { + LinkIndex int + Handle uint32 + Parent uint32 + Priority uint16 // lower is higher priority + Protocol uint16 // syscall.ETH_P_* +} + +func (q FilterAttrs) String() string { + return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Priority: %d, Protocol: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Priority, q.Protocol) +} + +// U32 filters on many packet related properties +type U32 struct { + FilterAttrs + // Currently only supports redirecting to another interface + RedirIndex int +} + +func (filter *U32) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *U32) Type() string { + return "u32" +} + +type FilterFwAttrs struct { + ClassId uint32 + InDev string + Mask uint32 + Index uint32 + Buffer uint32 + Mtu uint32 + Mpu uint16 + Rate uint32 + AvRate uint32 + PeakRate uint32 + Action int + Overhead uint16 + LinkLayer int +} + +// FwFilter filters on firewall marks +type Fw struct { + FilterAttrs + ClassId uint32 + Police nl.TcPolice + InDev string + // TODO Action + Mask uint32 + AvRate uint32 + Rtab [256]uint32 + Ptab [256]uint32 +} + +func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { + var rtab [256]uint32 + var ptab [256]uint32 + rcell_log := -1 + pcell_log := -1 + avrate := fattrs.AvRate / 8 + police := nl.TcPolice{} + police.Rate.Rate = fattrs.Rate / 8 + police.PeakRate.Rate = fattrs.PeakRate / 8 + buffer := fattrs.Buffer + linklayer := nl.LINKLAYER_ETHERNET + + if fattrs.LinkLayer != nl.LINKLAYER_UNSPEC { + linklayer = fattrs.LinkLayer + } + + police.Action = int32(fattrs.Action) + if police.Rate.Rate != 0 { + police.Rate.Mpu = fattrs.Mpu + police.Rate.Overhead = fattrs.Overhead + if CalcRtable(&police.Rate, rtab, rcell_log, fattrs.Mtu, linklayer) < 0 { + return nil, errors.New("TBF: failed to calculate rate table.") + } + police.Burst = uint32(Xmittime(uint64(police.Rate.Rate), uint32(buffer))) + } + police.Mtu = fattrs.Mtu + if police.PeakRate.Rate != 0 { + police.PeakRate.Mpu = fattrs.Mpu + police.PeakRate.Overhead = fattrs.Overhead + if CalcRtable(&police.PeakRate, ptab, pcell_log, fattrs.Mtu, linklayer) < 0 { + return nil, errors.New("POLICE: failed to calculate peak rate table.") + } + } + + return &Fw{ + FilterAttrs: attrs, + ClassId: fattrs.ClassId, + InDev: fattrs.InDev, + Mask: fattrs.Mask, + Police: police, + AvRate: avrate, + Rtab: rtab, + Ptab: ptab, + }, nil +} + +func (filter *Fw) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *Fw) Type() string { + return "fw" +} + +// GenericFilter filters represent types that are not currently understood +// by this netlink library. +type GenericFilter struct { + FilterAttrs + FilterType string +} + +func (filter *GenericFilter) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *GenericFilter) Type() string { + return filter.FilterType +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/filter_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/filter_linux.go new file mode 100644 index 000000000..1dc688b12 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -0,0 +1,322 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "fmt" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// FilterDel will delete a filter from the system. +// Equivalent to: `tc filter del $filter` +func FilterDel(filter Filter) error { + req := nl.NewNetlinkRequest(syscall.RTM_DELTFILTER, syscall.NLM_F_ACK) + base := filter.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), + } + req.AddData(msg) + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// FilterAdd will add a filter to the system. +// Equivalent to: `tc filter add $filter` +func FilterAdd(filter Filter) error { + native = nl.NativeEndian() + req := nl.NewNetlinkRequest(syscall.RTM_NEWTFILTER, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + base := filter.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), + } + req.AddData(msg) + req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type()))) + + options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) + if u32, ok := filter.(*U32); ok { + // match all + sel := nl.TcU32Sel{ + Nkeys: 1, + Flags: nl.TC_U32_TERMINAL, + } + sel.Keys = append(sel.Keys, nl.TcU32Key{}) + nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize()) + actions := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil) + table := nl.NewRtAttrChild(actions, nl.TCA_ACT_TAB, nil) + nl.NewRtAttrChild(table, nl.TCA_KIND, nl.ZeroTerminated("mirred")) + // redirect to other interface + mir := nl.TcMirred{ + Action: nl.TC_ACT_STOLEN, + Eaction: nl.TCA_EGRESS_REDIR, + Ifindex: uint32(u32.RedirIndex), + } + aopts := nl.NewRtAttrChild(table, nl.TCA_OPTIONS, nil) + nl.NewRtAttrChild(aopts, nl.TCA_MIRRED_PARMS, mir.Serialize()) + } else if fw, ok := filter.(*Fw); ok { + if fw.Mask != 0 { + b := make([]byte, 4) + native.PutUint32(b, fw.Mask) + nl.NewRtAttrChild(options, nl.TCA_FW_MASK, b) + } + if fw.InDev != "" { + nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(fw.InDev)) + } + if (fw.Police != nl.TcPolice{}) { + + police := nl.NewRtAttrChild(options, nl.TCA_FW_POLICE, nil) + nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, fw.Police.Serialize()) + if (fw.Police.Rate != nl.TcRateSpec{}) { + payload := SerializeRtab(fw.Rtab) + nl.NewRtAttrChild(police, nl.TCA_POLICE_RATE, payload) + } + if (fw.Police.PeakRate != nl.TcRateSpec{}) { + payload := SerializeRtab(fw.Ptab) + nl.NewRtAttrChild(police, nl.TCA_POLICE_PEAKRATE, payload) + } + } + if fw.ClassId != 0 { + b := make([]byte, 4) + native.PutUint32(b, fw.ClassId) + nl.NewRtAttrChild(options, nl.TCA_FW_CLASSID, b) + } + } + + req.AddData(options) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// FilterList gets a list of filters in the system. +// Equivalent to: `tc filter show`. +// Generally retunrs nothing if link and parent are not specified. +func FilterList(link Link, parent uint32) ([]Filter, error) { + req := nl.NewNetlinkRequest(syscall.RTM_GETTFILTER, syscall.NLM_F_DUMP) + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Parent: parent, + } + if link != nil { + base := link.Attrs() + ensureIndex(base) + msg.Ifindex = int32(base.Index) + } + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTFILTER) + if err != nil { + return nil, err + } + + var res []Filter + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + base := FilterAttrs{ + LinkIndex: int(msg.Ifindex), + Handle: msg.Handle, + Parent: msg.Parent, + } + base.Priority, base.Protocol = MajorMinor(msg.Info) + base.Protocol = nl.Swap16(base.Protocol) + + var filter Filter + filterType := "" + detailed := false + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_KIND: + filterType = string(attr.Value[:len(attr.Value)-1]) + switch filterType { + case "u32": + filter = &U32{} + case "fw": + filter = &Fw{} + default: + filter = &GenericFilter{FilterType: filterType} + } + case nl.TCA_OPTIONS: + switch filterType { + case "u32": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + detailed, err = parseU32Data(filter, data) + if err != nil { + return nil, err + } + case "fw": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + detailed, err = parseFwData(filter, data) + if err != nil { + return nil, err + } + } + } + } + // only return the detailed version of the filter + if detailed { + *filter.Attrs() = base + res = append(res, filter) + } + } + + return res, nil +} + +func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + native = nl.NativeEndian() + u32 := filter.(*U32) + detailed := false + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_U32_SEL: + detailed = true + sel := nl.DeserializeTcU32Sel(datum.Value) + // only parse if we have a very basic redirect + if sel.Flags&nl.TC_U32_TERMINAL == 0 || sel.Nkeys != 1 { + return detailed, nil + } + case nl.TCA_U32_ACT: + table, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return detailed, err + } + if len(table) != 1 || table[0].Attr.Type != nl.TCA_ACT_TAB { + return detailed, fmt.Errorf("Action table not formed properly") + } + aattrs, err := nl.ParseRouteAttr(table[0].Value) + for _, aattr := range aattrs { + switch aattr.Attr.Type { + case nl.TCA_KIND: + actionType := string(aattr.Value[:len(aattr.Value)-1]) + // only parse if the action is mirred + if actionType != "mirred" { + return detailed, nil + } + case nl.TCA_OPTIONS: + adata, err := nl.ParseRouteAttr(aattr.Value) + if err != nil { + return detailed, err + } + for _, adatum := range adata { + switch adatum.Attr.Type { + case nl.TCA_MIRRED_PARMS: + mir := nl.DeserializeTcMirred(adatum.Value) + u32.RedirIndex = int(mir.Ifindex) + } + } + } + } + } + } + return detailed, nil +} + +func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + native = nl.NativeEndian() + fw := filter.(*Fw) + detailed := true + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_FW_MASK: + fw.Mask = native.Uint32(datum.Value[0:4]) + case nl.TCA_FW_CLASSID: + fw.ClassId = native.Uint32(datum.Value[0:4]) + case nl.TCA_FW_INDEV: + fw.InDev = string(datum.Value[:len(datum.Value)-1]) + case nl.TCA_FW_POLICE: + adata, _ := nl.ParseRouteAttr(datum.Value) + for _, aattr := range adata { + switch aattr.Attr.Type { + case nl.TCA_POLICE_TBF: + fw.Police = *nl.DeserializeTcPolice(aattr.Value) + case nl.TCA_POLICE_RATE: + fw.Rtab = DeserializeRtab(aattr.Value) + case nl.TCA_POLICE_PEAKRATE: + fw.Ptab = DeserializeRtab(aattr.Value) + } + } + } + } + return detailed, nil +} + +func AlignToAtm(size uint) uint { + var linksize, cells int + cells = int(size / nl.ATM_CELL_PAYLOAD) + if (size % nl.ATM_CELL_PAYLOAD) > 0 { + cells++ + } + linksize = cells * nl.ATM_CELL_SIZE + return uint(linksize) +} + +func AdjustSize(sz uint, mpu uint, linklayer int) uint { + if sz < mpu { + sz = mpu + } + switch linklayer { + case nl.LINKLAYER_ATM: + return AlignToAtm(sz) + default: + return sz + } +} + +func CalcRtable(rate *nl.TcRateSpec, rtab [256]uint32, cell_log int, mtu uint32, linklayer int) int { + bps := rate.Rate + mpu := rate.Mpu + var sz uint + if mtu == 0 { + mtu = 2047 + } + if cell_log < 0 { + cell_log = 0 + for (mtu >> uint(cell_log)) > 255 { + cell_log++ + } + } + for i := 0; i < 256; i++ { + sz = AdjustSize(uint((i+1)< 0 { + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age))) + } + if vxlan.Limit > 0 { + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit))) + } + if vxlan.Port > 0 { + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, nl.Uint16Attr(uint16(vxlan.Port))) + } + if vxlan.PortLow > 0 || vxlan.PortHigh > 0 { + pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)} + + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, &pr) + + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes()) + } +} + +func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + if bond.Mode >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_MODE, nl.Uint8Attr(uint8(bond.Mode))) + } + if bond.ActiveSlave >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_ACTIVE_SLAVE, nl.Uint32Attr(uint32(bond.ActiveSlave))) + } + if bond.Miimon >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_MIIMON, nl.Uint32Attr(uint32(bond.Miimon))) + } + if bond.UpDelay >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_UPDELAY, nl.Uint32Attr(uint32(bond.UpDelay))) + } + if bond.DownDelay >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_DOWNDELAY, nl.Uint32Attr(uint32(bond.DownDelay))) + } + if bond.UseCarrier >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_USE_CARRIER, nl.Uint8Attr(uint8(bond.UseCarrier))) + } + if bond.ArpInterval >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_INTERVAL, nl.Uint32Attr(uint32(bond.ArpInterval))) + } + if bond.ArpIpTargets != nil { + msg := nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_IP_TARGET, nil) + for i := range bond.ArpIpTargets { + ip := bond.ArpIpTargets[i].To4() + if ip != nil { + nl.NewRtAttrChild(msg, i, []byte(ip)) + continue + } + ip = bond.ArpIpTargets[i].To16() + if ip != nil { + nl.NewRtAttrChild(msg, i, []byte(ip)) + } + } + } + if bond.ArpValidate >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_VALIDATE, nl.Uint32Attr(uint32(bond.ArpValidate))) + } + if bond.ArpAllTargets >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_ALL_TARGETS, nl.Uint32Attr(uint32(bond.ArpAllTargets))) + } + if bond.Primary >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_PRIMARY, nl.Uint32Attr(uint32(bond.Primary))) + } + if bond.PrimaryReselect >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_PRIMARY_RESELECT, nl.Uint8Attr(uint8(bond.PrimaryReselect))) + } + if bond.FailOverMac >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_FAIL_OVER_MAC, nl.Uint8Attr(uint8(bond.FailOverMac))) + } + if bond.XmitHashPolicy >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_XMIT_HASH_POLICY, nl.Uint8Attr(uint8(bond.XmitHashPolicy))) + } + if bond.ResendIgmp >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_RESEND_IGMP, nl.Uint32Attr(uint32(bond.ResendIgmp))) + } + if bond.NumPeerNotif >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_NUM_PEER_NOTIF, nl.Uint8Attr(uint8(bond.NumPeerNotif))) + } + if bond.AllSlavesActive >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_ALL_SLAVES_ACTIVE, nl.Uint8Attr(uint8(bond.AllSlavesActive))) + } + if bond.MinLinks >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_MIN_LINKS, nl.Uint32Attr(uint32(bond.MinLinks))) + } + if bond.LpInterval >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_LP_INTERVAL, nl.Uint32Attr(uint32(bond.LpInterval))) + } + if bond.PackersPerSlave >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PackersPerSlave))) + } + if bond.LacpRate >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_LACP_RATE, nl.Uint8Attr(uint8(bond.LacpRate))) + } + if bond.AdSelect >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_SELECT, nl.Uint8Attr(uint8(bond.AdSelect))) + } +} + +// LinkAdd adds a new link device. The type and features of the device +// are taken fromt the parameters in the link object. +// Equivalent to: `ip link add $link` +func LinkAdd(link Link) error { + // TODO: set mtu and hardware address + // TODO: support extra data for macvlan + base := link.Attrs() + + if base.Name == "" { + return fmt.Errorf("LinkAttrs.Name cannot be empty!") + } + + if tuntap, ok := link.(*Tuntap); ok { + // TODO: support user + // TODO: support group + // TODO: support non- one_queue + // TODO: support pi | vnet_hdr | multi_queue + // TODO: support non- exclusive + // TODO: support non- persistent + if tuntap.Mode < syscall.IFF_TUN || tuntap.Mode > syscall.IFF_TAP { + return fmt.Errorf("Tuntap.Mode %v unknown!", tuntap.Mode) + } + file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + var req ifReq + req.Flags |= syscall.IFF_ONE_QUEUE + req.Flags |= syscall.IFF_TUN_EXCL + copy(req.Name[:15], base.Name) + req.Flags |= uint16(tuntap.Mode) + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETIFF), uintptr(unsafe.Pointer(&req))) + if errno != 0 { + return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed, errno %v", errno) + } + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETPERSIST), 1) + if errno != 0 { + return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) + } + ensureIndex(base) + + // can't set master during create, so set it afterwards + if base.MasterIndex != 0 { + // TODO: verify MasterIndex is actually a bridge? + return LinkSetMasterByIndex(link, base.MasterIndex) + } + return nil + } + + req := nl.NewNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + // TODO: make it shorter + if base.Flags&net.FlagUp != 0 { + msg.Change = syscall.IFF_UP + msg.Flags = syscall.IFF_UP + } + if base.Flags&net.FlagBroadcast != 0 { + msg.Change |= syscall.IFF_BROADCAST + msg.Flags |= syscall.IFF_BROADCAST + } + if base.Flags&net.FlagLoopback != 0 { + msg.Change |= syscall.IFF_LOOPBACK + msg.Flags |= syscall.IFF_LOOPBACK + } + if base.Flags&net.FlagPointToPoint != 0 { + msg.Change |= syscall.IFF_POINTOPOINT + msg.Flags |= syscall.IFF_POINTOPOINT + } + if base.Flags&net.FlagMulticast != 0 { + msg.Change |= syscall.IFF_MULTICAST + msg.Flags |= syscall.IFF_MULTICAST + } + req.AddData(msg) + + if base.ParentIndex != 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(base.ParentIndex)) + data := nl.NewRtAttr(syscall.IFLA_LINK, b) + req.AddData(data) + } else if link.Type() == "ipvlan" { + return fmt.Errorf("Can't create ipvlan link without ParentIndex") + } + + nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) + req.AddData(nameData) + + if base.MTU > 0 { + mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + req.AddData(mtu) + } + + if base.TxQLen >= 0 { + qlen := nl.NewRtAttr(syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + req.AddData(qlen) + } + + if base.Namespace != nil { + var attr *nl.RtAttr + switch base.Namespace.(type) { + case NsPid: + val := nl.Uint32Attr(uint32(base.Namespace.(NsPid))) + attr = nl.NewRtAttr(syscall.IFLA_NET_NS_PID, val) + case NsFd: + val := nl.Uint32Attr(uint32(base.Namespace.(NsFd))) + attr = nl.NewRtAttr(nl.IFLA_NET_NS_FD, val) + } + + req.AddData(attr) + } + + linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil) + nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + + if vlan, ok := link.(*Vlan); ok { + b := make([]byte, 2) + native.PutUint16(b, uint16(vlan.VlanId)) + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b) + } else if veth, ok := link.(*Veth); ok { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil) + nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC) + nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(veth.PeerName)) + if base.TxQLen >= 0 { + nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + } + if base.MTU > 0 { + nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + } + + } else if vxlan, ok := link.(*Vxlan); ok { + addVxlanAttrs(vxlan, linkInfo) + } else if bond, ok := link.(*Bond); ok { + addBondAttrs(bond, linkInfo) + } else if ipv, ok := link.(*IPVlan); ok { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(ipv.Mode))) + } else if macv, ok := link.(*Macvlan); ok { + if macv.Mode != MACVLAN_MODE_DEFAULT { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode])) + } + } else if gretap, ok := link.(*Gretap); ok { + addGretapAttrs(gretap, linkInfo) + } + + req.AddData(linkInfo) + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if err != nil { + return err + } + + ensureIndex(base) + + // can't set master during create, so set it afterwards + if base.MasterIndex != 0 { + // TODO: verify MasterIndex is actually a bridge? + return LinkSetMasterByIndex(link, base.MasterIndex) + } + return nil +} + +// LinkDel deletes link device. Either Index or Name must be set in +// the link object for it to be deleted. The other values are ignored. +// Equivalent to: `ip link del $link` +func LinkDel(link Link) error { + base := link.Attrs() + + ensureIndex(base) + + req := nl.NewNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +func linkByNameDump(name string) (Link, error) { + links, err := LinkList() + if err != nil { + return nil, err + } + + for _, link := range links { + if link.Attrs().Name == name { + return link, nil + } + } + return nil, fmt.Errorf("Link %s not found", name) +} + +func linkByAliasDump(alias string) (Link, error) { + links, err := LinkList() + if err != nil { + return nil, err + } + + for _, link := range links { + if link.Attrs().Alias == alias { + return link, nil + } + } + return nil, fmt.Errorf("Link alias %s not found", alias) +} + +// LinkByName finds a link by name and returns a pointer to the object. +func LinkByName(name string) (Link, error) { + if lookupByDump { + return linkByNameDump(name) + } + + req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + req.AddData(msg) + + nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name)) + req.AddData(nameData) + + link, err := execGetLink(req) + if err == syscall.EINVAL { + // older kernels don't support looking up via IFLA_IFNAME + // so fall back to dumping all links + lookupByDump = true + return linkByNameDump(name) + } + + return link, err +} + +// LinkByAlias finds a link by its alias and returns a pointer to the object. +// If there are multiple links with the alias it returns the first one +func LinkByAlias(alias string) (Link, error) { + if lookupByDump { + return linkByAliasDump(alias) + } + + req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + req.AddData(msg) + + nameData := nl.NewRtAttr(syscall.IFLA_IFALIAS, nl.ZeroTerminated(alias)) + req.AddData(nameData) + + link, err := execGetLink(req) + if err == syscall.EINVAL { + // older kernels don't support looking up via IFLA_IFALIAS + // so fall back to dumping all links + lookupByDump = true + return linkByAliasDump(alias) + } + + return link, err +} + +// LinkByIndex finds a link by index and returns a pointer to the object. +func LinkByIndex(index int) (Link, error) { + req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(index) + req.AddData(msg) + + return execGetLink(req) +} + +func execGetLink(req *nl.NetlinkRequest) (Link, error) { + msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if err != nil { + if errno, ok := err.(syscall.Errno); ok { + if errno == syscall.ENODEV { + return nil, fmt.Errorf("Link not found") + } + } + return nil, err + } + + switch { + case len(msgs) == 0: + return nil, fmt.Errorf("Link not found") + + case len(msgs) == 1: + return linkDeserialize(msgs[0]) + + default: + return nil, fmt.Errorf("More than one link found") + } +} + +// linkDeserialize deserializes a raw message received from netlink into +// a link object. +func linkDeserialize(m []byte) (Link, error) { + msg := nl.DeserializeIfInfomsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + base := LinkAttrs{Index: int(msg.Index), Flags: linkFlags(msg.Flags)} + var link Link + linkType := "" + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.IFLA_LINKINFO: + infos, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + for _, info := range infos { + switch info.Attr.Type { + case nl.IFLA_INFO_KIND: + linkType = string(info.Value[:len(info.Value)-1]) + switch linkType { + case "dummy": + link = &Dummy{} + case "ifb": + link = &Ifb{} + case "bridge": + link = &Bridge{} + case "vlan": + link = &Vlan{} + case "veth": + link = &Veth{} + case "vxlan": + link = &Vxlan{} + case "bond": + link = &Bond{} + case "ipvlan": + link = &IPVlan{} + case "macvlan": + link = &Macvlan{} + case "macvtap": + link = &Macvtap{} + case "gretap": + link = &Gretap{} + default: + link = &GenericLink{LinkType: linkType} + } + case nl.IFLA_INFO_DATA: + data, err := nl.ParseRouteAttr(info.Value) + if err != nil { + return nil, err + } + switch linkType { + case "vlan": + parseVlanData(link, data) + case "vxlan": + parseVxlanData(link, data) + case "bond": + parseBondData(link, data) + case "ipvlan": + parseIPVlanData(link, data) + case "macvlan": + parseMacvlanData(link, data) + case "macvtap": + parseMacvtapData(link, data) + case "gretap": + parseGretapData(link, data) + } + } + } + case syscall.IFLA_ADDRESS: + var nonzero bool + for _, b := range attr.Value { + if b != 0 { + nonzero = true + } + } + if nonzero { + base.HardwareAddr = attr.Value[:] + } + case syscall.IFLA_IFNAME: + base.Name = string(attr.Value[:len(attr.Value)-1]) + case syscall.IFLA_MTU: + base.MTU = int(native.Uint32(attr.Value[0:4])) + case syscall.IFLA_LINK: + base.ParentIndex = int(native.Uint32(attr.Value[0:4])) + case syscall.IFLA_MASTER: + base.MasterIndex = int(native.Uint32(attr.Value[0:4])) + case syscall.IFLA_TXQLEN: + base.TxQLen = int(native.Uint32(attr.Value[0:4])) + case syscall.IFLA_IFALIAS: + base.Alias = string(attr.Value[:len(attr.Value)-1]) + } + } + // Links that don't have IFLA_INFO_KIND are hardware devices + if link == nil { + link = &Device{} + } + *link.Attrs() = base + + return link, nil +} + +// LinkList gets a list of link devices. +// Equivalent to: `ip link show` +func LinkList() ([]Link, error) { + // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need + // to get the message ourselves to parse link type. + req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) + if err != nil { + return nil, err + } + + var res []Link + for _, m := range msgs { + link, err := linkDeserialize(m) + if err != nil { + return nil, err + } + res = append(res, link) + } + + return res, nil +} + +// LinkUpdate is used to pass information back from LinkSubscribe() +type LinkUpdate struct { + nl.IfInfomsg + Link +} + +// LinkSubscribe takes a chan down which notifications will be sent +// when links change. Close the 'done' chan to stop subscription. +func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error { + s, err := nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK) + if err != nil { + return err + } + if done != nil { + go func() { + <-done + s.Close() + }() + } + go func() { + defer close(ch) + for { + msgs, err := s.Receive() + if err != nil { + return + } + for _, m := range msgs { + ifmsg := nl.DeserializeIfInfomsg(m.Data) + link, err := linkDeserialize(m.Data) + if err != nil { + return + } + ch <- LinkUpdate{IfInfomsg: *ifmsg, Link: link} + } + } + }() + + return nil +} + +func LinkSetHairpin(link Link, mode bool) error { + return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_MODE) +} + +func LinkSetGuard(link Link, mode bool) error { + return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD) +} + +func LinkSetFastLeave(link Link, mode bool) error { + return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_FAST_LEAVE) +} + +func LinkSetLearning(link Link, mode bool) error { + return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING) +} + +func LinkSetRootBlock(link Link, mode bool) error { + return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROTECT) +} + +func LinkSetFlood(link Link, mode bool) error { + return setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD) +} + +func setProtinfoAttr(link Link, mode bool, attr int) error { + base := link.Attrs() + ensureIndex(base) + req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + msg.Index = int32(base.Index) + req.AddData(msg) + + br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil) + nl.NewRtAttrChild(br, attr, boolToByte(mode)) + req.AddData(br) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if err != nil { + return err + } + return nil +} + +func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { + vlan := link.(*Vlan) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_VLAN_ID: + vlan.VlanId = int(native.Uint16(datum.Value[0:2])) + } + } +} + +func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { + vxlan := link.(*Vxlan) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_VXLAN_ID: + vxlan.VxlanId = int(native.Uint32(datum.Value[0:4])) + case nl.IFLA_VXLAN_LINK: + vxlan.VtepDevIndex = int(native.Uint32(datum.Value[0:4])) + case nl.IFLA_VXLAN_LOCAL: + vxlan.SrcAddr = net.IP(datum.Value[0:4]) + case nl.IFLA_VXLAN_LOCAL6: + vxlan.SrcAddr = net.IP(datum.Value[0:16]) + case nl.IFLA_VXLAN_GROUP: + vxlan.Group = net.IP(datum.Value[0:4]) + case nl.IFLA_VXLAN_GROUP6: + vxlan.Group = net.IP(datum.Value[0:16]) + case nl.IFLA_VXLAN_TTL: + vxlan.TTL = int(datum.Value[0]) + case nl.IFLA_VXLAN_TOS: + vxlan.TOS = int(datum.Value[0]) + case nl.IFLA_VXLAN_LEARNING: + vxlan.Learning = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_PROXY: + vxlan.Proxy = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_RSC: + vxlan.RSC = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_L2MISS: + vxlan.L2miss = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_L3MISS: + vxlan.L3miss = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_GBP: + vxlan.GBP = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_AGEING: + vxlan.Age = int(native.Uint32(datum.Value[0:4])) + vxlan.NoAge = vxlan.Age == 0 + case nl.IFLA_VXLAN_LIMIT: + vxlan.Limit = int(native.Uint32(datum.Value[0:4])) + case nl.IFLA_VXLAN_PORT: + vxlan.Port = int(native.Uint16(datum.Value[0:2])) + case nl.IFLA_VXLAN_PORT_RANGE: + buf := bytes.NewBuffer(datum.Value[0:4]) + var pr vxlanPortRange + if binary.Read(buf, binary.BigEndian, &pr) != nil { + vxlan.PortLow = int(pr.Lo) + vxlan.PortHigh = int(pr.Hi) + } + } + } +} + +func parseBondData(link Link, data []syscall.NetlinkRouteAttr) { + bond := NewLinkBond(NewLinkAttrs()) + for i := range data { + switch data[i].Attr.Type { + case nl.IFLA_BOND_MODE: + bond.Mode = BondMode(data[i].Value[0]) + case nl.IFLA_BOND_ACTIVE_SLAVE: + bond.ActiveSlave = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_MIIMON: + bond.Miimon = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_UPDELAY: + bond.UpDelay = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_DOWNDELAY: + bond.DownDelay = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_USE_CARRIER: + bond.UseCarrier = int(data[i].Value[0]) + case nl.IFLA_BOND_ARP_INTERVAL: + bond.ArpInterval = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_ARP_IP_TARGET: + // TODO: implement + case nl.IFLA_BOND_ARP_VALIDATE: + bond.ArpValidate = BondArpValidate(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_ARP_ALL_TARGETS: + bond.ArpAllTargets = BondArpAllTargets(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_PRIMARY: + bond.Primary = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_PRIMARY_RESELECT: + bond.PrimaryReselect = BondPrimaryReselect(data[i].Value[0]) + case nl.IFLA_BOND_FAIL_OVER_MAC: + bond.FailOverMac = BondFailOverMac(data[i].Value[0]) + case nl.IFLA_BOND_XMIT_HASH_POLICY: + bond.XmitHashPolicy = BondXmitHashPolicy(data[i].Value[0]) + case nl.IFLA_BOND_RESEND_IGMP: + bond.ResendIgmp = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_NUM_PEER_NOTIF: + bond.NumPeerNotif = int(data[i].Value[0]) + case nl.IFLA_BOND_ALL_SLAVES_ACTIVE: + bond.AllSlavesActive = int(data[i].Value[0]) + case nl.IFLA_BOND_MIN_LINKS: + bond.MinLinks = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_LP_INTERVAL: + bond.LpInterval = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_PACKETS_PER_SLAVE: + bond.PackersPerSlave = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_AD_LACP_RATE: + bond.LacpRate = BondLacpRate(data[i].Value[0]) + case nl.IFLA_BOND_AD_SELECT: + bond.AdSelect = BondAdSelect(data[i].Value[0]) + case nl.IFLA_BOND_AD_INFO: + // TODO: implement + } + } +} + +func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) { + ipv := link.(*IPVlan) + for _, datum := range data { + if datum.Attr.Type == nl.IFLA_IPVLAN_MODE { + ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4])) + return + } + } +} + +func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) { + macv := link.(*Macvtap) + parseMacvlanData(&macv.Macvlan, data) +} + +func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { + macv := link.(*Macvlan) + for _, datum := range data { + if datum.Attr.Type == nl.IFLA_MACVLAN_MODE { + switch native.Uint32(datum.Value[0:4]) { + case nl.MACVLAN_MODE_PRIVATE: + macv.Mode = MACVLAN_MODE_PRIVATE + case nl.MACVLAN_MODE_VEPA: + macv.Mode = MACVLAN_MODE_VEPA + case nl.MACVLAN_MODE_BRIDGE: + macv.Mode = MACVLAN_MODE_BRIDGE + case nl.MACVLAN_MODE_PASSTHRU: + macv.Mode = MACVLAN_MODE_PASSTHRU + case nl.MACVLAN_MODE_SOURCE: + macv.Mode = MACVLAN_MODE_SOURCE + } + return + } + } +} + +// copied from pkg/net_linux.go +func linkFlags(rawFlags uint32) net.Flags { + var f net.Flags + if rawFlags&syscall.IFF_UP != 0 { + f |= net.FlagUp + } + if rawFlags&syscall.IFF_BROADCAST != 0 { + f |= net.FlagBroadcast + } + if rawFlags&syscall.IFF_LOOPBACK != 0 { + f |= net.FlagLoopback + } + if rawFlags&syscall.IFF_POINTOPOINT != 0 { + f |= net.FlagPointToPoint + } + if rawFlags&syscall.IFF_MULTICAST != 0 { + f |= net.FlagMulticast + } + return f +} + +func htonl(val uint32) []byte { + bytes := make([]byte, 4) + binary.BigEndian.PutUint32(bytes, val) + return bytes +} + +func htons(val uint16) []byte { + bytes := make([]byte, 2) + binary.BigEndian.PutUint16(bytes, val) + return bytes +} + +func ntohl(buf []byte) uint32 { + return binary.BigEndian.Uint32(buf) +} + +func ntohs(buf []byte) uint16 { + return binary.BigEndian.Uint16(buf) +} + +func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + + ip := gretap.Local.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip)) + } + ip = gretap.Remote.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip)) + } + + if gretap.IKey != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gretap.IKey)) + gretap.IFlags |= uint16(nl.GRE_KEY) + } + + if gretap.OKey != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gretap.OKey)) + gretap.OFlags |= uint16(nl.GRE_KEY) + } + + nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gretap.IFlags)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gretap.OFlags)) + + if gretap.Link != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gretap.Link)) + } + + nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gretap.PMtuDisc)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gretap.Ttl)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gretap.Tos)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gretap.EncapType)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gretap.EncapFlags)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gretap.EncapSport)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gretap.EncapDport)) +} + +func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { + gre := link.(*Gretap) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GRE_OKEY: + gre.IKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_IKEY: + gre.OKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_LOCAL: + gre.Local = net.IP(datum.Value[0:4]) + case nl.IFLA_GRE_REMOTE: + gre.Remote = net.IP(datum.Value[0:4]) + case nl.IFLA_GRE_ENCAP_SPORT: + gre.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_DPORT: + gre.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_IFLAGS: + gre.IFlags = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_OFLAGS: + gre.OFlags = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_LINK: + gre.Link = native.Uint32(datum.Value[0:4]) + case nl.IFLA_GRE_TTL: + gre.Ttl = uint8(datum.Value[0]) + case nl.IFLA_GRE_TOS: + gre.Tos = uint8(datum.Value[0]) + case nl.IFLA_GRE_PMTUDISC: + gre.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_GRE_ENCAP_TYPE: + gre.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_FLAGS: + gre.EncapFlags = native.Uint16(datum.Value[0:2]) + } + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/link_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/link_test.go new file mode 100644 index 000000000..83046128c --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/link_test.go @@ -0,0 +1,769 @@ +package netlink + +import ( + "bytes" + "net" + "syscall" + "testing" + "time" + + "github.com/vishvananda/netns" +) + +const ( + testTxQLen int = 100 + defaultTxQLen int = 1000 +) + +func testLinkAddDel(t *testing.T, link Link) { + links, err := LinkList() + if err != nil { + t.Fatal(err) + } + num := len(links) + + if err := LinkAdd(link); err != nil { + t.Fatal(err) + } + + base := link.Attrs() + + result, err := LinkByName(base.Name) + if err != nil { + t.Fatal(err) + } + + rBase := result.Attrs() + + if vlan, ok := link.(*Vlan); ok { + other, ok := result.(*Vlan) + if !ok { + t.Fatal("Result of create is not a vlan") + } + if vlan.VlanId != other.VlanId { + t.Fatal("Link.VlanId id doesn't match") + } + } + + if rBase.ParentIndex == 0 && base.ParentIndex != 0 { + t.Fatal("Created link doesn't have a Parent but it should") + } else if rBase.ParentIndex != 0 && base.ParentIndex == 0 { + t.Fatal("Created link has a Parent but it shouldn't") + } else if rBase.ParentIndex != 0 && base.ParentIndex != 0 { + if rBase.ParentIndex != base.ParentIndex { + t.Fatal("Link.ParentIndex doesn't match") + } + } + + if veth, ok := result.(*Veth); ok { + if rBase.TxQLen != base.TxQLen { + t.Fatalf("qlen is %d, should be %d", rBase.TxQLen, base.TxQLen) + } + if rBase.MTU != base.MTU { + t.Fatalf("MTU is %d, should be %d", rBase.MTU, base.MTU) + } + + if veth.PeerName != "" { + var peer *Veth + other, err := LinkByName(veth.PeerName) + if err != nil { + t.Fatalf("Peer %s not created", veth.PeerName) + } + if peer, ok = other.(*Veth); !ok { + t.Fatalf("Peer %s is incorrect type", veth.PeerName) + } + if peer.TxQLen != testTxQLen { + t.Fatalf("TxQLen of peer is %d, should be %d", peer.TxQLen, testTxQLen) + } + } + } + + if vxlan, ok := link.(*Vxlan); ok { + other, ok := result.(*Vxlan) + if !ok { + t.Fatal("Result of create is not a vxlan") + } + compareVxlan(t, vxlan, other) + } + + if ipv, ok := link.(*IPVlan); ok { + other, ok := result.(*IPVlan) + if !ok { + t.Fatal("Result of create is not a ipvlan") + } + if ipv.Mode != other.Mode { + t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, ipv.Mode) + } + } + + if macv, ok := link.(*Macvlan); ok { + other, ok := result.(*Macvlan) + if !ok { + t.Fatal("Result of create is not a macvlan") + } + if macv.Mode != other.Mode { + t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, macv.Mode) + } + } + + if err = LinkDel(link); err != nil { + t.Fatal(err) + } + + links, err = LinkList() + if err != nil { + t.Fatal(err) + } + + if len(links) != num { + t.Fatal("Link not removed properly") + } +} + +func compareVxlan(t *testing.T, expected, actual *Vxlan) { + + if actual.VxlanId != expected.VxlanId { + t.Fatal("Vxlan.VxlanId doesn't match") + } + if expected.SrcAddr != nil && !actual.SrcAddr.Equal(expected.SrcAddr) { + t.Fatal("Vxlan.SrcAddr doesn't match") + } + if expected.Group != nil && !actual.Group.Equal(expected.Group) { + t.Fatal("Vxlan.Group doesn't match") + } + if expected.TTL != -1 && actual.TTL != expected.TTL { + t.Fatal("Vxlan.TTL doesn't match") + } + if expected.TOS != -1 && actual.TOS != expected.TOS { + t.Fatal("Vxlan.TOS doesn't match") + } + if actual.Learning != expected.Learning { + t.Fatal("Vxlan.Learning doesn't match") + } + if actual.Proxy != expected.Proxy { + t.Fatal("Vxlan.Proxy doesn't match") + } + if actual.RSC != expected.RSC { + t.Fatal("Vxlan.RSC doesn't match") + } + if actual.L2miss != expected.L2miss { + t.Fatal("Vxlan.L2miss doesn't match") + } + if actual.L3miss != expected.L3miss { + t.Fatal("Vxlan.L3miss doesn't match") + } + if actual.GBP != expected.GBP { + t.Fatal("Vxlan.GBP doesn't match") + } + if expected.NoAge { + if !actual.NoAge { + t.Fatal("Vxlan.NoAge doesn't match") + } + } else if expected.Age > 0 && actual.Age != expected.Age { + t.Fatal("Vxlan.Age doesn't match") + } + if expected.Limit > 0 && actual.Limit != expected.Limit { + t.Fatal("Vxlan.Limit doesn't match") + } + if expected.Port > 0 && actual.Port != expected.Port { + t.Fatal("Vxlan.Port doesn't match") + } + if expected.PortLow > 0 || expected.PortHigh > 0 { + if actual.PortLow != expected.PortLow { + t.Fatal("Vxlan.PortLow doesn't match") + } + if actual.PortHigh != expected.PortHigh { + t.Fatal("Vxlan.PortHigh doesn't match") + } + } +} + +func TestLinkAddDelDummy(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + testLinkAddDel(t, &Dummy{LinkAttrs{Name: "foo"}}) +} + +func TestLinkAddDelIfb(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + testLinkAddDel(t, &Ifb{LinkAttrs{Name: "foo"}}) +} + +func TestLinkAddDelBridge(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + testLinkAddDel(t, &Bridge{LinkAttrs{Name: "foo", MTU: 1400}}) +} + +func TestLinkAddDelGretap(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + testLinkAddDel(t, &Gretap{ + LinkAttrs: LinkAttrs{Name: "foo"}, + IKey: 0x101, + OKey: 0x101, + PMtuDisc: 1, + Local: net.IPv4(127, 0, 0, 1), + Remote: net.IPv4(127, 0, 0, 1)}) +} + +func TestLinkAddDelVlan(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + parent := &Dummy{LinkAttrs{Name: "foo"}} + if err := LinkAdd(parent); err != nil { + t.Fatal(err) + } + + testLinkAddDel(t, &Vlan{LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, 900}) + + if err := LinkDel(parent); err != nil { + t.Fatal(err) + } +} + +func TestLinkAddDelMacvlan(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + parent := &Dummy{LinkAttrs{Name: "foo"}} + if err := LinkAdd(parent); err != nil { + t.Fatal(err) + } + + testLinkAddDel(t, &Macvlan{ + LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, + Mode: MACVLAN_MODE_PRIVATE, + }) + + if err := LinkDel(parent); err != nil { + t.Fatal(err) + } +} + +func TestLinkAddDelMacvtap(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + parent := &Dummy{LinkAttrs{Name: "foo"}} + if err := LinkAdd(parent); err != nil { + t.Fatal(err) + } + + testLinkAddDel(t, &Macvtap{ + Macvlan: Macvlan{ + LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, + Mode: MACVLAN_MODE_PRIVATE, + }, + }) + + if err := LinkDel(parent); err != nil { + t.Fatal(err) + } +} + +func TestLinkAddDelVeth(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + testLinkAddDel(t, &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar"}) +} + +func TestLinkAddDelBond(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + testLinkAddDel(t, NewLinkBond(LinkAttrs{Name: "foo"})) +} + +func TestLinkAddVethWithDefaultTxQLen(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + la := NewLinkAttrs() + la.Name = "foo" + + veth := &Veth{LinkAttrs: la, PeerName: "bar"} + if err := LinkAdd(veth); err != nil { + t.Fatal(err) + } + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + if veth, ok := link.(*Veth); !ok { + t.Fatalf("unexpected link type: %T", link) + } else { + if veth.TxQLen != defaultTxQLen { + t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen) + } + } + peer, err := LinkByName("bar") + if err != nil { + t.Fatal(err) + } + if veth, ok := peer.(*Veth); !ok { + t.Fatalf("unexpected link type: %T", link) + } else { + if veth.TxQLen != defaultTxQLen { + t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen) + } + } +} + +func TestLinkAddVethWithZeroTxQLen(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + la := NewLinkAttrs() + la.Name = "foo" + la.TxQLen = 0 + + veth := &Veth{LinkAttrs: la, PeerName: "bar"} + if err := LinkAdd(veth); err != nil { + t.Fatal(err) + } + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + if veth, ok := link.(*Veth); !ok { + t.Fatalf("unexpected link type: %T", link) + } else { + if veth.TxQLen != 0 { + t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0) + } + } + peer, err := LinkByName("bar") + if err != nil { + t.Fatal(err) + } + if veth, ok := peer.(*Veth); !ok { + t.Fatalf("unexpected link type: %T", link) + } else { + if veth.TxQLen != 0 { + t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0) + } + } +} + +func TestLinkAddDummyWithTxQLen(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + la := NewLinkAttrs() + la.Name = "foo" + la.TxQLen = 1500 + + dummy := &Dummy{LinkAttrs: la} + if err := LinkAdd(dummy); err != nil { + t.Fatal(err) + } + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + if dummy, ok := link.(*Dummy); !ok { + t.Fatalf("unexpected link type: %T", link) + } else { + if dummy.TxQLen != 1500 { + t.Fatalf("TxQLen is %d, should be %d", dummy.TxQLen, 1500) + } + } +} + +func TestLinkAddDelBridgeMaster(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + master := &Bridge{LinkAttrs{Name: "foo"}} + if err := LinkAdd(master); err != nil { + t.Fatal(err) + } + testLinkAddDel(t, &Dummy{LinkAttrs{Name: "bar", MasterIndex: master.Attrs().Index}}) + + if err := LinkDel(master); err != nil { + t.Fatal(err) + } +} + +func TestLinkSetUnsetResetMaster(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + master := &Bridge{LinkAttrs{Name: "foo"}} + if err := LinkAdd(master); err != nil { + t.Fatal(err) + } + + newmaster := &Bridge{LinkAttrs{Name: "bar"}} + if err := LinkAdd(newmaster); err != nil { + t.Fatal(err) + } + + slave := &Dummy{LinkAttrs{Name: "baz"}} + if err := LinkAdd(slave); err != nil { + t.Fatal(err) + } + + nonexistsmaster := &Bridge{LinkAttrs{Name: "foobar"}} + + if err := LinkSetMaster(slave, nonexistsmaster); err == nil { + t.Fatal("error expected") + } + + if err := LinkSetMaster(slave, master); err != nil { + t.Fatal(err) + } + + link, err := LinkByName("baz") + if err != nil { + t.Fatal(err) + } + + if link.Attrs().MasterIndex != master.Attrs().Index { + t.Fatal("Master not set properly") + } + + if err := LinkSetMaster(slave, newmaster); err != nil { + t.Fatal(err) + } + + link, err = LinkByName("baz") + if err != nil { + t.Fatal(err) + } + + if link.Attrs().MasterIndex != newmaster.Attrs().Index { + t.Fatal("Master not reset properly") + } + + if err := LinkSetNoMaster(slave); err != nil { + t.Fatal(err) + } + + link, err = LinkByName("baz") + if err != nil { + t.Fatal(err) + } + + if link.Attrs().MasterIndex != 0 { + t.Fatal("Master not unset properly") + } + if err := LinkDel(slave); err != nil { + t.Fatal(err) + } + + if err := LinkDel(newmaster); err != nil { + t.Fatal(err) + } + + if err := LinkDel(master); err != nil { + t.Fatal(err) + } +} + +func TestLinkSetNs(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + basens, err := netns.Get() + if err != nil { + t.Fatal("Failed to get basens") + } + defer basens.Close() + + newns, err := netns.New() + if err != nil { + t.Fatal("Failed to create newns") + } + defer newns.Close() + + link := &Veth{LinkAttrs{Name: "foo"}, "bar"} + if err := LinkAdd(link); err != nil { + t.Fatal(err) + } + + peer, err := LinkByName("bar") + if err != nil { + t.Fatal(err) + } + + LinkSetNsFd(peer, int(basens)) + if err != nil { + t.Fatal("Failed to set newns for link") + } + + _, err = LinkByName("bar") + if err == nil { + t.Fatal("Link bar is still in newns") + } + + err = netns.Set(basens) + if err != nil { + t.Fatal("Failed to set basens") + } + + peer, err = LinkByName("bar") + if err != nil { + t.Fatal("Link is not in basens") + } + + if err := LinkDel(peer); err != nil { + t.Fatal(err) + } + + err = netns.Set(newns) + if err != nil { + t.Fatal("Failed to set newns") + } + + _, err = LinkByName("foo") + if err == nil { + t.Fatal("Other half of veth pair not deleted") + } + +} + +func TestLinkAddDelVxlan(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + parent := &Dummy{ + LinkAttrs{Name: "foo"}, + } + if err := LinkAdd(parent); err != nil { + t.Fatal(err) + } + + vxlan := Vxlan{ + LinkAttrs: LinkAttrs{ + Name: "bar", + }, + VxlanId: 10, + VtepDevIndex: parent.Index, + Learning: true, + L2miss: true, + L3miss: true, + } + + testLinkAddDel(t, &vxlan) + if err := LinkDel(parent); err != nil { + t.Fatal(err) + } +} + +func TestLinkAddDelIPVlanL2(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + parent := &Dummy{LinkAttrs{Name: "foo"}} + if err := LinkAdd(parent); err != nil { + t.Fatal(err) + } + + ipv := IPVlan{ + LinkAttrs: LinkAttrs{ + Name: "bar", + ParentIndex: parent.Index, + }, + Mode: IPVLAN_MODE_L2, + } + + testLinkAddDel(t, &ipv) +} + +func TestLinkAddDelIPVlanL3(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + parent := &Dummy{LinkAttrs{Name: "foo"}} + if err := LinkAdd(parent); err != nil { + t.Fatal(err) + } + + ipv := IPVlan{ + LinkAttrs: LinkAttrs{ + Name: "bar", + ParentIndex: parent.Index, + }, + Mode: IPVLAN_MODE_L3, + } + + testLinkAddDel(t, &ipv) +} + +func TestLinkAddDelIPVlanNoParent(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + ipv := IPVlan{ + LinkAttrs: LinkAttrs{ + Name: "bar", + }, + Mode: IPVLAN_MODE_L3, + } + err := LinkAdd(&ipv) + if err == nil { + t.Fatal("Add should fail if ipvlan creating without ParentIndex") + } + if err.Error() != "Can't create ipvlan link without ParentIndex" { + t.Fatalf("Error should be about missing ParentIndex, got %q", err) + } +} + +func TestLinkByIndex(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + dummy := &Dummy{LinkAttrs{Name: "dummy"}} + if err := LinkAdd(dummy); err != nil { + t.Fatal(err) + } + + found, err := LinkByIndex(dummy.Index) + if err != nil { + t.Fatal(err) + } + + if found.Attrs().Index != dummy.Attrs().Index { + t.Fatalf("Indices don't match: %v != %v", found.Attrs().Index, dummy.Attrs().Index) + } + + LinkDel(dummy) + + // test not found + _, err = LinkByIndex(dummy.Attrs().Index) + if err == nil { + t.Fatalf("LinkByIndex(%v) found deleted link", err) + } +} + +func TestLinkSet(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + iface := &Dummy{LinkAttrs{Name: "foo"}} + if err := LinkAdd(iface); err != nil { + t.Fatal(err) + } + + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + + err = LinkSetName(link, "bar") + if err != nil { + t.Fatalf("Could not change interface name: %v", err) + } + + link, err = LinkByName("bar") + if err != nil { + t.Fatalf("Interface name not changed: %v", err) + } + + err = LinkSetMTU(link, 1400) + if err != nil { + t.Fatalf("Could not set MTU: %v", err) + } + + link, err = LinkByName("bar") + if err != nil { + t.Fatal(err) + } + + if link.Attrs().MTU != 1400 { + t.Fatal("MTU not changed!") + } + + addr, err := net.ParseMAC("00:12:34:56:78:AB") + if err != nil { + t.Fatal(err) + } + + err = LinkSetHardwareAddr(link, addr) + if err != nil { + t.Fatal(err) + } + + link, err = LinkByName("bar") + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(link.Attrs().HardwareAddr, addr) { + t.Fatalf("hardware address not changed!") + } + + err = LinkSetAlias(link, "barAlias") + if err != nil { + t.Fatalf("Could not set alias: %v", err) + } + + link, err = LinkByName("bar") + if err != nil { + t.Fatal(err) + } + + if link.Attrs().Alias != "barAlias" { + t.Fatalf("alias not changed!") + } + + link, err = LinkByAlias("barAlias") + if err != nil { + t.Fatal(err) + } +} + +func expectLinkUpdate(ch <-chan LinkUpdate, ifaceName string, up bool) bool { + for { + timeout := time.After(time.Minute) + select { + case update := <-ch: + if ifaceName == update.Link.Attrs().Name && (update.IfInfomsg.Flags&syscall.IFF_UP != 0) == up { + return true + } + case <-timeout: + return false + } + } +} + +func TestLinkSubscribe(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + ch := make(chan LinkUpdate) + done := make(chan struct{}) + defer close(done) + if err := LinkSubscribe(ch, done); err != nil { + t.Fatal(err) + } + + link := &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar"} + if err := LinkAdd(link); err != nil { + t.Fatal(err) + } + + if !expectLinkUpdate(ch, "foo", false) { + t.Fatal("Add update not received as expected") + } + + if err := LinkSetUp(link); err != nil { + t.Fatal(err) + } + + if !expectLinkUpdate(ch, "foo", true) { + t.Fatal("Link Up update not received as expected") + } + + if err := LinkDel(link); err != nil { + t.Fatal(err) + } + + if !expectLinkUpdate(ch, "foo", false) { + t.Fatal("Del update not received as expected") + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go new file mode 100644 index 000000000..310bd33d8 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go @@ -0,0 +1,14 @@ +package netlink + +// ideally golang.org/x/sys/unix would define IfReq but it only has +// IFNAMSIZ, hence this minimalistic implementation +const ( + SizeOfIfReq = 40 + IFNAMSIZ = 16 +) + +type ifReq struct { + Name [IFNAMSIZ]byte + Flags uint16 + pad [SizeOfIfReq - IFNAMSIZ - 2]byte +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/neigh.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/neigh.go new file mode 100644 index 000000000..0e5eb90c9 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/neigh.go @@ -0,0 +1,22 @@ +package netlink + +import ( + "fmt" + "net" +) + +// Neigh represents a link layer neighbor from netlink. +type Neigh struct { + LinkIndex int + Family int + State int + Type int + Flags int + IP net.IP + HardwareAddr net.HardwareAddr +} + +// String returns $ip/$hwaddr $label +func (neigh *Neigh) String() string { + return fmt.Sprintf("%s %s", neigh.IP, neigh.HardwareAddr) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/neigh_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/neigh_linux.go new file mode 100644 index 000000000..2af693bab --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -0,0 +1,190 @@ +package netlink + +import ( + "net" + "syscall" + "unsafe" + + "github.com/vishvananda/netlink/nl" +) + +const ( + NDA_UNSPEC = iota + NDA_DST + NDA_LLADDR + NDA_CACHEINFO + NDA_PROBES + NDA_VLAN + NDA_PORT + NDA_VNI + NDA_IFINDEX + NDA_MAX = NDA_IFINDEX +) + +// Neighbor Cache Entry States. +const ( + NUD_NONE = 0x00 + NUD_INCOMPLETE = 0x01 + NUD_REACHABLE = 0x02 + NUD_STALE = 0x04 + NUD_DELAY = 0x08 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 +) + +// Neighbor Flags +const ( + NTF_USE = 0x01 + NTF_SELF = 0x02 + NTF_MASTER = 0x04 + NTF_PROXY = 0x08 + NTF_ROUTER = 0x80 +) + +type Ndmsg struct { + Family uint8 + Index uint32 + State uint16 + Flags uint8 + Type uint8 +} + +func deserializeNdmsg(b []byte) *Ndmsg { + var dummy Ndmsg + return (*Ndmsg)(unsafe.Pointer(&b[0:unsafe.Sizeof(dummy)][0])) +} + +func (msg *Ndmsg) Serialize() []byte { + return (*(*[unsafe.Sizeof(*msg)]byte)(unsafe.Pointer(msg)))[:] +} + +func (msg *Ndmsg) Len() int { + return int(unsafe.Sizeof(*msg)) +} + +// NeighAdd will add an IP to MAC mapping to the ARP table +// Equivalent to: `ip neigh add ....` +func NeighAdd(neigh *Neigh) error { + return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL) +} + +// NeighSet will add or replace an IP to MAC mapping to the ARP table +// Equivalent to: `ip neigh replace....` +func NeighSet(neigh *Neigh) error { + return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE) +} + +// NeighAppend will append an entry to FDB +// Equivalent to: `bridge fdb append...` +func NeighAppend(neigh *Neigh) error { + return neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND) +} + +func neighAdd(neigh *Neigh, mode int) error { + req := nl.NewNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK) + return neighHandle(neigh, req) +} + +// NeighDel will delete an IP address from a link device. +// Equivalent to: `ip addr del $addr dev $link` +func NeighDel(neigh *Neigh) error { + req := nl.NewNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK) + return neighHandle(neigh, req) +} + +func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { + var family int + if neigh.Family > 0 { + family = neigh.Family + } else { + family = nl.GetIPFamily(neigh.IP) + } + + msg := Ndmsg{ + Family: uint8(family), + Index: uint32(neigh.LinkIndex), + State: uint16(neigh.State), + Type: uint8(neigh.Type), + Flags: uint8(neigh.Flags), + } + req.AddData(&msg) + + ipData := neigh.IP.To4() + if ipData == nil { + ipData = neigh.IP.To16() + } + + dstData := nl.NewRtAttr(NDA_DST, ipData) + req.AddData(dstData) + + hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr)) + req.AddData(hwData) + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// NeighList gets a list of IP-MAC mappings in the system (ARP table). +// Equivalent to: `ip neighbor show`. +// The list can be filtered by link and ip family. +func NeighList(linkIndex, family int) ([]Neigh, error) { + req := nl.NewNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP) + msg := Ndmsg{ + Family: uint8(family), + Index: uint32(linkIndex), + } + req.AddData(&msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWNEIGH) + if err != nil { + return nil, err + } + + var res []Neigh + for _, m := range msgs { + ndm := deserializeNdmsg(m) + if linkIndex != 0 && int(ndm.Index) != linkIndex { + // Ignore messages from other interfaces + continue + } + + neigh, err := NeighDeserialize(m) + if err != nil { + continue + } + + res = append(res, *neigh) + } + + return res, nil +} + +func NeighDeserialize(m []byte) (*Neigh, error) { + msg := deserializeNdmsg(m) + + neigh := Neigh{ + LinkIndex: int(msg.Index), + Family: int(msg.Family), + State: int(msg.State), + Type: int(msg.Type), + Flags: int(msg.Flags), + } + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case NDA_DST: + neigh.IP = net.IP(attr.Value) + case NDA_LLADDR: + neigh.HardwareAddr = net.HardwareAddr(attr.Value) + } + } + + return &neigh, nil +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/neigh_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/neigh_test.go new file mode 100644 index 000000000..50da59c5c --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/neigh_test.go @@ -0,0 +1,104 @@ +package netlink + +import ( + "net" + "testing" +) + +type arpEntry struct { + ip net.IP + mac net.HardwareAddr +} + +func parseMAC(s string) net.HardwareAddr { + m, err := net.ParseMAC(s) + if err != nil { + panic(err) + } + return m +} + +func dumpContains(dump []Neigh, e arpEntry) bool { + for _, n := range dump { + if n.IP.Equal(e.ip) && (n.State&NUD_INCOMPLETE) == 0 { + return true + } + } + return false +} + +func TestNeighAddDel(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + dummy := Dummy{LinkAttrs{Name: "neigh0"}} + if err := LinkAdd(&dummy); err != nil { + t.Fatal(err) + } + + ensureIndex(dummy.Attrs()) + + arpTable := []arpEntry{ + {net.ParseIP("10.99.0.1"), parseMAC("aa:bb:cc:dd:00:01")}, + {net.ParseIP("10.99.0.2"), parseMAC("aa:bb:cc:dd:00:02")}, + {net.ParseIP("10.99.0.3"), parseMAC("aa:bb:cc:dd:00:03")}, + {net.ParseIP("10.99.0.4"), parseMAC("aa:bb:cc:dd:00:04")}, + {net.ParseIP("10.99.0.5"), parseMAC("aa:bb:cc:dd:00:05")}, + } + + // Add the arpTable + for _, entry := range arpTable { + err := NeighAdd(&Neigh{ + LinkIndex: dummy.Index, + State: NUD_REACHABLE, + IP: entry.ip, + HardwareAddr: entry.mac, + }) + + if err != nil { + t.Errorf("Failed to NeighAdd: %v", err) + } + } + + // Dump and see that all added entries are there + dump, err := NeighList(dummy.Index, 0) + if err != nil { + t.Errorf("Failed to NeighList: %v", err) + } + + for _, entry := range arpTable { + if !dumpContains(dump, entry) { + t.Errorf("Dump does not contain: %v", entry) + } + } + + // Delete the arpTable + for _, entry := range arpTable { + err := NeighDel(&Neigh{ + LinkIndex: dummy.Index, + IP: entry.ip, + HardwareAddr: entry.mac, + }) + + if err != nil { + t.Errorf("Failed to NeighDel: %v", err) + } + } + + // TODO: seems not working because of cache + //// Dump and see that none of deleted entries are there + //dump, err = NeighList(dummy.Index, 0) + //if err != nil { + //t.Errorf("Failed to NeighList: %v", err) + //} + + //for _, entry := range arpTable { + //if dumpContains(dump, entry) { + //t.Errorf("Dump contains: %v", entry) + //} + //} + + if err := LinkDel(&dummy); err != nil { + t.Fatal(err) + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/netlink.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/netlink.go new file mode 100644 index 000000000..687d8760d --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/netlink.go @@ -0,0 +1,42 @@ +// Package netlink provides a simple library for netlink. Netlink is +// the interface a user-space program in linux uses to communicate with +// the kernel. It can be used to add and remove interfaces, set up ip +// addresses and routes, and confiugre ipsec. Netlink communication +// requires elevated privileges, so in most cases this code needs to +// be run as root. The low level primitives for netlink are contained +// in the nl subpackage. This package attempts to provide a high-level +// interface that is loosly modeled on the iproute2 cli. +package netlink + +import ( + "net" + + "github.com/vishvananda/netlink/nl" +) + +const ( + // Family type definitions + FAMILY_ALL = nl.FAMILY_ALL + FAMILY_V4 = nl.FAMILY_V4 + FAMILY_V6 = nl.FAMILY_V6 +) + +// ParseIPNet parses a string in ip/net format and returns a net.IPNet. +// This is valuable because addresses in netlink are often IPNets and +// ParseCIDR returns an IPNet with the IP part set to the base IP of the +// range. +func ParseIPNet(s string) (*net.IPNet, error) { + ip, ipNet, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + return &net.IPNet{IP: ip, Mask: ipNet.Mask}, nil +} + +// NewIPNet generates an IPNet from an ip address using a netmask of 32 or 128. +func NewIPNet(ip net.IP) *net.IPNet { + if ip.To4() != nil { + return &net.IPNet{IP: ip, Mask: net.CIDRMask(32, 32)} + } + return &net.IPNet{IP: ip, Mask: net.CIDRMask(128, 128)} +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/netlink_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/netlink_test.go new file mode 100644 index 000000000..3292b750a --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/netlink_test.go @@ -0,0 +1,34 @@ +package netlink + +import ( + "log" + "os" + "runtime" + "testing" + + "github.com/vishvananda/netns" +) + +type tearDownNetlinkTest func() + +func setUpNetlinkTest(t *testing.T) tearDownNetlinkTest { + if os.Getuid() != 0 { + msg := "Skipped test because it requires root privileges." + log.Printf(msg) + t.Skip(msg) + } + + // new temporary namespace so we don't pollute the host + // lock thread since the namespace is thread local + runtime.LockOSThread() + var err error + ns, err := netns.New() + if err != nil { + t.Fatal("Failed to create newns", ns) + } + + return func() { + ns.Close() + runtime.UnlockOSThread() + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/netlink_unspecified.go new file mode 100644 index 000000000..10c49c1bf --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -0,0 +1,143 @@ +// +build !linux + +package netlink + +import ( + "errors" +) + +var ( + ErrNotImplemented = errors.New("not implemented") +) + +func LinkSetUp(link *Link) error { + return ErrNotImplemented +} + +func LinkSetDown(link *Link) error { + return ErrNotImplemented +} + +func LinkSetMTU(link *Link, mtu int) error { + return ErrNotImplemented +} + +func LinkSetMaster(link *Link, master *Link) error { + return ErrNotImplemented +} + +func LinkSetNsPid(link *Link, nspid int) error { + return ErrNotImplemented +} + +func LinkSetNsFd(link *Link, fd int) error { + return ErrNotImplemented +} + +func LinkAdd(link *Link) error { + return ErrNotImplemented +} + +func LinkDel(link *Link) error { + return ErrNotImplemented +} + +func SetHairpin(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetGuard(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetFastLeave(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetLearning(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetRootBlock(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetFlood(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkList() ([]Link, error) { + return nil, ErrNotImplemented +} + +func AddrAdd(link *Link, addr *Addr) error { + return ErrNotImplemented +} + +func AddrDel(link *Link, addr *Addr) error { + return ErrNotImplemented +} + +func AddrList(link *Link, family int) ([]Addr, error) { + return nil, ErrNotImplemented +} + +func RouteAdd(route *Route) error { + return ErrNotImplemented +} + +func RouteDel(route *Route) error { + return ErrNotImplemented +} + +func RouteList(link *Link, family int) ([]Route, error) { + return nil, ErrNotImplemented +} + +func XfrmPolicyAdd(policy *XfrmPolicy) error { + return ErrNotImplemented +} + +func XfrmPolicyDel(policy *XfrmPolicy) error { + return ErrNotImplemented +} + +func XfrmPolicyList(family int) ([]XfrmPolicy, error) { + return nil, ErrNotImplemented +} + +func XfrmStateAdd(policy *XfrmState) error { + return ErrNotImplemented +} + +func XfrmStateDel(policy *XfrmState) error { + return ErrNotImplemented +} + +func XfrmStateList(family int) ([]XfrmState, error) { + return nil, ErrNotImplemented +} + +func NeighAdd(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighSet(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighAppend(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighDel(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighList(linkIndex, family int) ([]Neigh, error) { + return nil, ErrNotImplemented +} + +func NeighDeserialize(m []byte) (*Ndmsg, *Neigh, error) { + return nil, nil, ErrNotImplemented +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/addr_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/addr_linux.go new file mode 100644 index 000000000..17088fa0c --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/addr_linux.go @@ -0,0 +1,47 @@ +package nl + +import ( + "syscall" + "unsafe" +) + +type IfAddrmsg struct { + syscall.IfAddrmsg +} + +func NewIfAddrmsg(family int) *IfAddrmsg { + return &IfAddrmsg{ + IfAddrmsg: syscall.IfAddrmsg{ + Family: uint8(family), + }, + } +} + +// struct ifaddrmsg { +// __u8 ifa_family; +// __u8 ifa_prefixlen; /* The prefix length */ +// __u8 ifa_flags; /* Flags */ +// __u8 ifa_scope; /* Address scope */ +// __u32 ifa_index; /* Link index */ +// }; + +// type IfAddrmsg struct { +// Family uint8 +// Prefixlen uint8 +// Flags uint8 +// Scope uint8 +// Index uint32 +// } +// SizeofIfAddrmsg = 0x8 + +func DeserializeIfAddrmsg(b []byte) *IfAddrmsg { + return (*IfAddrmsg)(unsafe.Pointer(&b[0:syscall.SizeofIfAddrmsg][0])) +} + +func (msg *IfAddrmsg) Serialize() []byte { + return (*(*[syscall.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] +} + +func (msg *IfAddrmsg) Len() int { + return syscall.SizeofIfAddrmsg +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/addr_linux_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/addr_linux_test.go new file mode 100644 index 000000000..98c3b211f --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/addr_linux_test.go @@ -0,0 +1,39 @@ +package nl + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "syscall" + "testing" +) + +func (msg *IfAddrmsg) write(b []byte) { + native := NativeEndian() + b[0] = msg.Family + b[1] = msg.Prefixlen + b[2] = msg.Flags + b[3] = msg.Scope + native.PutUint32(b[4:8], msg.Index) +} + +func (msg *IfAddrmsg) serializeSafe() []byte { + len := syscall.SizeofIfAddrmsg + b := make([]byte, len) + msg.write(b) + return b +} + +func deserializeIfAddrmsgSafe(b []byte) *IfAddrmsg { + var msg = IfAddrmsg{} + binary.Read(bytes.NewReader(b[0:syscall.SizeofIfAddrmsg]), NativeEndian(), &msg) + return &msg +} + +func TestIfAddrmsgDeserializeSerialize(t *testing.T) { + var orig = make([]byte, syscall.SizeofIfAddrmsg) + rand.Read(orig) + safemsg := deserializeIfAddrmsgSafe(orig) + msg := DeserializeIfAddrmsg(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/link_linux.go new file mode 100644 index 000000000..8554a5d4a --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -0,0 +1,184 @@ +package nl + +const ( + DEFAULT_CHANGE = 0xFFFFFFFF +) + +const ( + IFLA_INFO_UNSPEC = iota + IFLA_INFO_KIND + IFLA_INFO_DATA + IFLA_INFO_XSTATS + IFLA_INFO_MAX = IFLA_INFO_XSTATS +) + +const ( + IFLA_VLAN_UNSPEC = iota + IFLA_VLAN_ID + IFLA_VLAN_FLAGS + IFLA_VLAN_EGRESS_QOS + IFLA_VLAN_INGRESS_QOS + IFLA_VLAN_PROTOCOL + IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL +) + +const ( + VETH_INFO_UNSPEC = iota + VETH_INFO_PEER + VETH_INFO_MAX = VETH_INFO_PEER +) + +const ( + IFLA_VXLAN_UNSPEC = iota + IFLA_VXLAN_ID + IFLA_VXLAN_GROUP + IFLA_VXLAN_LINK + IFLA_VXLAN_LOCAL + IFLA_VXLAN_TTL + IFLA_VXLAN_TOS + IFLA_VXLAN_LEARNING + IFLA_VXLAN_AGEING + IFLA_VXLAN_LIMIT + IFLA_VXLAN_PORT_RANGE + IFLA_VXLAN_PROXY + IFLA_VXLAN_RSC + IFLA_VXLAN_L2MISS + IFLA_VXLAN_L3MISS + IFLA_VXLAN_PORT + IFLA_VXLAN_GROUP6 + IFLA_VXLAN_LOCAL6 + IFLA_VXLAN_UDP_CSUM + IFLA_VXLAN_UDP_ZERO_CSUM6_TX + IFLA_VXLAN_UDP_ZERO_CSUM6_RX + IFLA_VXLAN_REMCSUM_TX + IFLA_VXLAN_REMCSUM_RX + IFLA_VXLAN_GBP + IFLA_VXLAN_REMCSUM_NOPARTIAL + IFLA_VXLAN_FLOWBASED + IFLA_VXLAN_MAX = IFLA_VXLAN_FLOWBASED +) + +const ( + BRIDGE_MODE_UNSPEC = iota + BRIDGE_MODE_HAIRPIN +) + +const ( + IFLA_BRPORT_UNSPEC = iota + IFLA_BRPORT_STATE + IFLA_BRPORT_PRIORITY + IFLA_BRPORT_COST + IFLA_BRPORT_MODE + IFLA_BRPORT_GUARD + IFLA_BRPORT_PROTECT + IFLA_BRPORT_FAST_LEAVE + IFLA_BRPORT_LEARNING + IFLA_BRPORT_UNICAST_FLOOD + IFLA_BRPORT_MAX = IFLA_BRPORT_UNICAST_FLOOD +) + +const ( + IFLA_IPVLAN_UNSPEC = iota + IFLA_IPVLAN_MODE + IFLA_IPVLAN_MAX = IFLA_IPVLAN_MODE +) + +const ( + // not defined in syscall + IFLA_NET_NS_FD = 28 +) + +const ( + IFLA_MACVLAN_UNSPEC = iota + IFLA_MACVLAN_MODE + IFLA_MACVLAN_FLAGS + IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS +) + +const ( + MACVLAN_MODE_PRIVATE = 1 + MACVLAN_MODE_VEPA = 2 + MACVLAN_MODE_BRIDGE = 4 + MACVLAN_MODE_PASSTHRU = 8 + MACVLAN_MODE_SOURCE = 16 +) + +const ( + IFLA_BOND_UNSPEC = iota + IFLA_BOND_MODE + IFLA_BOND_ACTIVE_SLAVE + IFLA_BOND_MIIMON + IFLA_BOND_UPDELAY + IFLA_BOND_DOWNDELAY + IFLA_BOND_USE_CARRIER + IFLA_BOND_ARP_INTERVAL + IFLA_BOND_ARP_IP_TARGET + IFLA_BOND_ARP_VALIDATE + IFLA_BOND_ARP_ALL_TARGETS + IFLA_BOND_PRIMARY + IFLA_BOND_PRIMARY_RESELECT + IFLA_BOND_FAIL_OVER_MAC + IFLA_BOND_XMIT_HASH_POLICY + IFLA_BOND_RESEND_IGMP + IFLA_BOND_NUM_PEER_NOTIF + IFLA_BOND_ALL_SLAVES_ACTIVE + IFLA_BOND_MIN_LINKS + IFLA_BOND_LP_INTERVAL + IFLA_BOND_PACKETS_PER_SLAVE + IFLA_BOND_AD_LACP_RATE + IFLA_BOND_AD_SELECT + IFLA_BOND_AD_INFO +) + +const ( + IFLA_BOND_AD_INFO_UNSPEC = iota + IFLA_BOND_AD_INFO_AGGREGATOR + IFLA_BOND_AD_INFO_NUM_PORTS + IFLA_BOND_AD_INFO_ACTOR_KEY + IFLA_BOND_AD_INFO_PARTNER_KEY + IFLA_BOND_AD_INFO_PARTNER_MAC +) + +const ( + IFLA_BOND_SLAVE_UNSPEC = iota + IFLA_BOND_SLAVE_STATE + IFLA_BOND_SLAVE_MII_STATUS + IFLA_BOND_SLAVE_LINK_FAILURE_COUNT + IFLA_BOND_SLAVE_PERM_HWADDR + IFLA_BOND_SLAVE_QUEUE_ID + IFLA_BOND_SLAVE_AD_AGGREGATOR_ID +) + +const ( + IFLA_GRE_UNSPEC = iota + IFLA_GRE_LINK + IFLA_GRE_IFLAGS + IFLA_GRE_OFLAGS + IFLA_GRE_IKEY + IFLA_GRE_OKEY + IFLA_GRE_LOCAL + IFLA_GRE_REMOTE + IFLA_GRE_TTL + IFLA_GRE_TOS + IFLA_GRE_PMTUDISC + IFLA_GRE_ENCAP_LIMIT + IFLA_GRE_FLOWINFO + IFLA_GRE_FLAGS + IFLA_GRE_ENCAP_TYPE + IFLA_GRE_ENCAP_FLAGS + IFLA_GRE_ENCAP_SPORT + IFLA_GRE_ENCAP_DPORT + IFLA_GRE_COLLECT_METADATA + IFLA_GRE_MAX = IFLA_GRE_COLLECT_METADATA +) + +const ( + GRE_CSUM = 0x8000 + GRE_ROUTING = 0x4000 + GRE_KEY = 0x2000 + GRE_SEQ = 0x1000 + GRE_STRICT = 0x0800 + GRE_REC = 0x0700 + GRE_FLAGS = 0x00F8 + GRE_VERSION = 0x0007 +) diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/nl_linux.go new file mode 100644 index 000000000..e3afb5c23 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -0,0 +1,424 @@ +// Package nl has low level primitives for making Netlink calls. +package nl + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + "sync/atomic" + "syscall" + "unsafe" +) + +const ( + // Family type definitions + FAMILY_ALL = syscall.AF_UNSPEC + FAMILY_V4 = syscall.AF_INET + FAMILY_V6 = syscall.AF_INET6 +) + +var nextSeqNr uint32 + +// GetIPFamily returns the family type of a net.IP. +func GetIPFamily(ip net.IP) int { + if len(ip) <= net.IPv4len { + return FAMILY_V4 + } + if ip.To4() != nil { + return FAMILY_V4 + } + return FAMILY_V6 +} + +var nativeEndian binary.ByteOrder + +// Get native endianness for the system +func NativeEndian() binary.ByteOrder { + if nativeEndian == nil { + var x uint32 = 0x01020304 + if *(*byte)(unsafe.Pointer(&x)) == 0x01 { + nativeEndian = binary.BigEndian + } else { + nativeEndian = binary.LittleEndian + } + } + return nativeEndian +} + +// Byte swap a 16 bit value if we aren't big endian +func Swap16(i uint16) uint16 { + if NativeEndian() == binary.BigEndian { + return i + } + return (i&0xff00)>>8 | (i&0xff)<<8 +} + +// Byte swap a 32 bit value if aren't big endian +func Swap32(i uint32) uint32 { + if NativeEndian() == binary.BigEndian { + return i + } + return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24 +} + +type NetlinkRequestData interface { + Len() int + Serialize() []byte +} + +// IfInfomsg is related to links, but it is used for list requests as well +type IfInfomsg struct { + syscall.IfInfomsg +} + +// Create an IfInfomsg with family specified +func NewIfInfomsg(family int) *IfInfomsg { + return &IfInfomsg{ + IfInfomsg: syscall.IfInfomsg{ + Family: uint8(family), + }, + } +} + +func DeserializeIfInfomsg(b []byte) *IfInfomsg { + return (*IfInfomsg)(unsafe.Pointer(&b[0:syscall.SizeofIfInfomsg][0])) +} + +func (msg *IfInfomsg) Serialize() []byte { + return (*(*[syscall.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] +} + +func (msg *IfInfomsg) Len() int { + return syscall.SizeofIfInfomsg +} + +func rtaAlignOf(attrlen int) int { + return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) +} + +func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { + msg := NewIfInfomsg(family) + parent.children = append(parent.children, msg) + return msg +} + +// Extend RtAttr to handle data and children +type RtAttr struct { + syscall.RtAttr + Data []byte + children []NetlinkRequestData +} + +// Create a new Extended RtAttr object +func NewRtAttr(attrType int, data []byte) *RtAttr { + return &RtAttr{ + RtAttr: syscall.RtAttr{ + Type: uint16(attrType), + }, + children: []NetlinkRequestData{}, + Data: data, + } +} + +// Create a new RtAttr obj anc add it as a child of an existing object +func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { + attr := NewRtAttr(attrType, data) + parent.children = append(parent.children, attr) + return attr +} + +func (a *RtAttr) Len() int { + if len(a.children) == 0 { + return (syscall.SizeofRtAttr + len(a.Data)) + } + + l := 0 + for _, child := range a.children { + l += rtaAlignOf(child.Len()) + } + l += syscall.SizeofRtAttr + return rtaAlignOf(l + len(a.Data)) +} + +// Serialize the RtAttr into a byte array +// This can't just unsafe.cast because it must iterate through children. +func (a *RtAttr) Serialize() []byte { + native := NativeEndian() + + length := a.Len() + buf := make([]byte, rtaAlignOf(length)) + + next := 4 + if a.Data != nil { + copy(buf[next:], a.Data) + next += rtaAlignOf(len(a.Data)) + } + if len(a.children) > 0 { + for _, child := range a.children { + childBuf := child.Serialize() + copy(buf[next:], childBuf) + next += rtaAlignOf(len(childBuf)) + } + } + + if l := uint16(length); l != 0 { + native.PutUint16(buf[0:2], l) + } + native.PutUint16(buf[2:4], a.Type) + return buf +} + +type NetlinkRequest struct { + syscall.NlMsghdr + Data []NetlinkRequestData +} + +// Serialize the Netlink Request into a byte array +func (req *NetlinkRequest) Serialize() []byte { + length := syscall.SizeofNlMsghdr + dataBytes := make([][]byte, len(req.Data)) + for i, data := range req.Data { + dataBytes[i] = data.Serialize() + length = length + len(dataBytes[i]) + } + req.Len = uint32(length) + b := make([]byte, length) + hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] + next := syscall.SizeofNlMsghdr + copy(b[0:next], hdr) + for _, data := range dataBytes { + for _, dataByte := range data { + b[next] = dataByte + next = next + 1 + } + } + return b +} + +func (req *NetlinkRequest) AddData(data NetlinkRequestData) { + if data != nil { + req.Data = append(req.Data, data) + } +} + +// Execute the request against a the given sockType. +// Returns a list of netlink messages in seriaized format, optionally filtered +// by resType. +func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) { + s, err := getNetlinkSocket(sockType) + if err != nil { + return nil, err + } + defer s.Close() + + if err := s.Send(req); err != nil { + return nil, err + } + + pid, err := s.GetPid() + if err != nil { + return nil, err + } + + var res [][]byte + +done: + for { + msgs, err := s.Receive() + if err != nil { + return nil, err + } + for _, m := range msgs { + if m.Header.Seq != req.Seq { + return nil, fmt.Errorf("Wrong Seq nr %d, expected 1", m.Header.Seq) + } + if m.Header.Pid != pid { + return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) + } + if m.Header.Type == syscall.NLMSG_DONE { + break done + } + if m.Header.Type == syscall.NLMSG_ERROR { + native := NativeEndian() + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + break done + } + return nil, syscall.Errno(-error) + } + if resType != 0 && m.Header.Type != resType { + continue + } + res = append(res, m.Data) + if m.Header.Flags&syscall.NLM_F_MULTI == 0 { + break done + } + } + } + return res, nil +} + +// Create a new netlink request from proto and flags +// Note the Len value will be inaccurate once data is added until +// the message is serialized +func NewNetlinkRequest(proto, flags int) *NetlinkRequest { + return &NetlinkRequest{ + NlMsghdr: syscall.NlMsghdr{ + Len: uint32(syscall.SizeofNlMsghdr), + Type: uint16(proto), + Flags: syscall.NLM_F_REQUEST | uint16(flags), + Seq: atomic.AddUint32(&nextSeqNr, 1), + }, + } +} + +type NetlinkSocket struct { + fd int + lsa syscall.SockaddrNetlink +} + +func getNetlinkSocket(protocol int) (*NetlinkSocket, error) { + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol) + if err != nil { + return nil, err + } + s := &NetlinkSocket{ + fd: fd, + } + s.lsa.Family = syscall.AF_NETLINK + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) + return nil, err + } + + return s, nil +} + +// Create a netlink socket with a given protocol (e.g. NETLINK_ROUTE) +// and subscribe it to multicast groups passed in variable argument list. +// Returns the netlink socket on which Receive() method can be called +// to retrieve the messages from the kernel. +func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol) + if err != nil { + return nil, err + } + s := &NetlinkSocket{ + fd: fd, + } + s.lsa.Family = syscall.AF_NETLINK + + for _, g := range groups { + s.lsa.Groups |= (1 << (g - 1)) + } + + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) + return nil, err + } + + return s, nil +} + +func (s *NetlinkSocket) Close() { + syscall.Close(s.fd) +} + +func (s *NetlinkSocket) GetFd() int { + return s.fd +} + +func (s *NetlinkSocket) Send(request *NetlinkRequest) error { + if err := syscall.Sendto(s.fd, request.Serialize(), 0, &s.lsa); err != nil { + return err + } + return nil +} + +func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { + rb := make([]byte, syscall.Getpagesize()) + nr, _, err := syscall.Recvfrom(s.fd, rb, 0) + if err != nil { + return nil, err + } + if nr < syscall.NLMSG_HDRLEN { + return nil, fmt.Errorf("Got short response from netlink") + } + rb = rb[:nr] + return syscall.ParseNetlinkMessage(rb) +} + +func (s *NetlinkSocket) GetPid() (uint32, error) { + lsa, err := syscall.Getsockname(s.fd) + if err != nil { + return 0, err + } + switch v := lsa.(type) { + case *syscall.SockaddrNetlink: + return v.Pid, nil + } + return 0, fmt.Errorf("Wrong socket type") +} + +func ZeroTerminated(s string) []byte { + bytes := make([]byte, len(s)+1) + for i := 0; i < len(s); i++ { + bytes[i] = s[i] + } + bytes[len(s)] = 0 + return bytes +} + +func NonZeroTerminated(s string) []byte { + bytes := make([]byte, len(s)) + for i := 0; i < len(s); i++ { + bytes[i] = s[i] + } + return bytes +} + +func BytesToString(b []byte) string { + n := bytes.Index(b, []byte{0}) + return string(b[:n]) +} + +func Uint8Attr(v uint8) []byte { + return []byte{byte(v)} +} + +func Uint16Attr(v uint16) []byte { + native := NativeEndian() + bytes := make([]byte, 2) + native.PutUint16(bytes, v) + return bytes +} + +func Uint32Attr(v uint32) []byte { + native := NativeEndian() + bytes := make([]byte, 4) + native.PutUint32(bytes, v) + return bytes +} + +func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { + var attrs []syscall.NetlinkRouteAttr + for len(b) >= syscall.SizeofRtAttr { + a, vbuf, alen, err := netlinkRouteAttrAndValue(b) + if err != nil { + return nil, err + } + ra := syscall.NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-syscall.SizeofRtAttr]} + attrs = append(attrs, ra) + b = b[alen:] + } + return attrs, nil +} + +func netlinkRouteAttrAndValue(b []byte) (*syscall.RtAttr, []byte, int, error) { + a := (*syscall.RtAttr)(unsafe.Pointer(&b[0])) + if int(a.Len) < syscall.SizeofRtAttr || int(a.Len) > len(b) { + return nil, nil, 0, syscall.EINVAL + } + return a, b[syscall.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/nl_linux_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/nl_linux_test.go new file mode 100644 index 000000000..4672684c7 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/nl_linux_test.go @@ -0,0 +1,60 @@ +package nl + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "reflect" + "syscall" + "testing" +) + +type testSerializer interface { + serializeSafe() []byte + Serialize() []byte +} + +func testDeserializeSerialize(t *testing.T, orig []byte, safemsg testSerializer, msg testSerializer) { + if !reflect.DeepEqual(safemsg, msg) { + t.Fatal("Deserialization failed.\n", safemsg, "\n", msg) + } + safe := msg.serializeSafe() + if !bytes.Equal(safe, orig) { + t.Fatal("Safe serialization failed.\n", safe, "\n", orig) + } + b := msg.Serialize() + if !bytes.Equal(b, safe) { + t.Fatal("Serialization failed.\n", b, "\n", safe) + } +} + +func (msg *IfInfomsg) write(b []byte) { + native := NativeEndian() + b[0] = msg.Family + b[1] = msg.X__ifi_pad + native.PutUint16(b[2:4], msg.Type) + native.PutUint32(b[4:8], uint32(msg.Index)) + native.PutUint32(b[8:12], msg.Flags) + native.PutUint32(b[12:16], msg.Change) +} + +func (msg *IfInfomsg) serializeSafe() []byte { + length := syscall.SizeofIfInfomsg + b := make([]byte, length) + msg.write(b) + return b +} + +func deserializeIfInfomsgSafe(b []byte) *IfInfomsg { + var msg = IfInfomsg{} + binary.Read(bytes.NewReader(b[0:syscall.SizeofIfInfomsg]), NativeEndian(), &msg) + return &msg +} + +func TestIfInfomsgDeserializeSerialize(t *testing.T) { + var orig = make([]byte, syscall.SizeofIfInfomsg) + rand.Read(orig) + safemsg := deserializeIfInfomsgSafe(orig) + msg := DeserializeIfInfomsg(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/route_linux.go new file mode 100644 index 000000000..447e83e5a --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -0,0 +1,42 @@ +package nl + +import ( + "syscall" + "unsafe" +) + +type RtMsg struct { + syscall.RtMsg +} + +func NewRtMsg() *RtMsg { + return &RtMsg{ + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_UNIVERSE, + Protocol: syscall.RTPROT_BOOT, + Type: syscall.RTN_UNICAST, + }, + } +} + +func NewRtDelMsg() *RtMsg { + return &RtMsg{ + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_NOWHERE, + }, + } +} + +func (msg *RtMsg) Len() int { + return syscall.SizeofRtMsg +} + +func DeserializeRtMsg(b []byte) *RtMsg { + return (*RtMsg)(unsafe.Pointer(&b[0:syscall.SizeofRtMsg][0])) +} + +func (msg *RtMsg) Serialize() []byte { + return (*(*[syscall.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/route_linux_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/route_linux_test.go new file mode 100644 index 000000000..ba9c410ee --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/route_linux_test.go @@ -0,0 +1,43 @@ +package nl + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "syscall" + "testing" +) + +func (msg *RtMsg) write(b []byte) { + native := NativeEndian() + b[0] = msg.Family + b[1] = msg.Dst_len + b[2] = msg.Src_len + b[3] = msg.Tos + b[4] = msg.Table + b[5] = msg.Protocol + b[6] = msg.Scope + b[7] = msg.Type + native.PutUint32(b[8:12], msg.Flags) +} + +func (msg *RtMsg) serializeSafe() []byte { + len := syscall.SizeofRtMsg + b := make([]byte, len) + msg.write(b) + return b +} + +func deserializeRtMsgSafe(b []byte) *RtMsg { + var msg = RtMsg{} + binary.Read(bytes.NewReader(b[0:syscall.SizeofRtMsg]), NativeEndian(), &msg) + return &msg +} + +func TestRtMsgDeserializeSerialize(t *testing.T) { + var orig = make([]byte, syscall.SizeofRtMsg) + rand.Read(orig) + safemsg := deserializeRtMsgSafe(orig) + msg := DeserializeRtMsg(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/syscall.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/syscall.go new file mode 100644 index 000000000..47aa6322d --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -0,0 +1,37 @@ +package nl + +// syscall package lack of rule atributes type. +// Thus there are defined below +const ( + FRA_UNSPEC = iota + FRA_DST /* destination address */ + FRA_SRC /* source address */ + FRA_IIFNAME /* interface name */ + FRA_GOTO /* target to jump to (FR_ACT_GOTO) */ + FRA_UNUSED2 + FRA_PRIORITY /* priority/preference */ + FRA_UNUSED3 + FRA_UNUSED4 + FRA_UNUSED5 + FRA_FWMARK /* mark */ + FRA_FLOW /* flow/class id */ + FRA_TUN_ID + FRA_SUPPRESS_IFGROUP + FRA_SUPPRESS_PREFIXLEN + FRA_TABLE /* Extended table id */ + FRA_FWMASK /* mask for netfilter mark */ + FRA_OIFNAME +) + +// ip rule netlink request types +const ( + FR_ACT_UNSPEC = iota + FR_ACT_TO_TBL /* Pass to fixed table */ + FR_ACT_GOTO /* Jump to another rule */ + FR_ACT_NOP /* No operation */ + FR_ACT_RES3 + FR_ACT_RES4 + FR_ACT_BLACKHOLE /* Drop without notification */ + FR_ACT_UNREACHABLE /* Drop with ENETUNREACH */ + FR_ACT_PROHIBIT /* Drop with EACCES */ +) diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/tc_linux.go new file mode 100644 index 000000000..aa5900577 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -0,0 +1,627 @@ +package nl + +import ( + "unsafe" +) + +// LinkLayer +const ( + LINKLAYER_UNSPEC = iota + LINKLAYER_ETHERNET + LINKLAYER_ATM +) + +// ATM +const ( + ATM_CELL_PAYLOAD = 48 + ATM_CELL_SIZE = 53 +) + +const TC_LINKLAYER_MASK = 0x0F + +// Police +const ( + TCA_POLICE_UNSPEC = iota + TCA_POLICE_TBF + TCA_POLICE_RATE + TCA_POLICE_PEAKRATE + TCA_POLICE_AVRATE + TCA_POLICE_RESULT + TCA_POLICE_MAX = TCA_POLICE_RESULT +) + +// Message types +const ( + TCA_UNSPEC = iota + TCA_KIND + TCA_OPTIONS + TCA_STATS + TCA_XSTATS + TCA_RATE + TCA_FCNT + TCA_STATS2 + TCA_STAB + TCA_MAX = TCA_STAB +) + +const ( + TCA_ACT_TAB = 1 + TCAA_MAX = 1 +) + +const ( + TCA_PRIO_UNSPEC = iota + TCA_PRIO_MQ + TCA_PRIO_MAX = TCA_PRIO_MQ +) + +const ( + SizeofTcMsg = 0x14 + SizeofTcActionMsg = 0x04 + SizeofTcPrioMap = 0x14 + SizeofTcRateSpec = 0x0c + SizeofTcNetemQopt = 0x18 + SizeofTcNetemCorr = 0x0c + SizeofTcNetemReorder = 0x08 + SizeofTcNetemCorrupt = 0x08 + SizeofTcTbfQopt = 2*SizeofTcRateSpec + 0x0c + SizeofTcHtbCopt = 2*SizeofTcRateSpec + 0x14 + SizeofTcHtbGlob = 0x14 + SizeofTcU32Key = 0x10 + SizeofTcU32Sel = 0x10 // without keys + SizeofTcMirred = 0x1c + SizeofTcPolice = 2*SizeofTcRateSpec + 0x20 +) + +// struct tcmsg { +// unsigned char tcm_family; +// unsigned char tcm__pad1; +// unsigned short tcm__pad2; +// int tcm_ifindex; +// __u32 tcm_handle; +// __u32 tcm_parent; +// __u32 tcm_info; +// }; + +type TcMsg struct { + Family uint8 + Pad [3]byte + Ifindex int32 + Handle uint32 + Parent uint32 + Info uint32 +} + +func (msg *TcMsg) Len() int { + return SizeofTcMsg +} + +func DeserializeTcMsg(b []byte) *TcMsg { + return (*TcMsg)(unsafe.Pointer(&b[0:SizeofTcMsg][0])) +} + +func (x *TcMsg) Serialize() []byte { + return (*(*[SizeofTcMsg]byte)(unsafe.Pointer(x)))[:] +} + +// struct tcamsg { +// unsigned char tca_family; +// unsigned char tca__pad1; +// unsigned short tca__pad2; +// }; + +type TcActionMsg struct { + Family uint8 + Pad [3]byte +} + +func (msg *TcActionMsg) Len() int { + return SizeofTcActionMsg +} + +func DeserializeTcActionMsg(b []byte) *TcActionMsg { + return (*TcActionMsg)(unsafe.Pointer(&b[0:SizeofTcActionMsg][0])) +} + +func (x *TcActionMsg) Serialize() []byte { + return (*(*[SizeofTcActionMsg]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TC_PRIO_MAX = 15 +) + +// struct tc_prio_qopt { +// int bands; /* Number of bands */ +// __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ +// }; + +type TcPrioMap struct { + Bands int32 + Priomap [TC_PRIO_MAX + 1]uint8 +} + +func (msg *TcPrioMap) Len() int { + return SizeofTcPrioMap +} + +func DeserializeTcPrioMap(b []byte) *TcPrioMap { + return (*TcPrioMap)(unsafe.Pointer(&b[0:SizeofTcPrioMap][0])) +} + +func (x *TcPrioMap) Serialize() []byte { + return (*(*[SizeofTcPrioMap]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_TBF_UNSPEC = iota + TCA_TBF_PARMS + TCA_TBF_RTAB + TCA_TBF_PTAB + TCA_TBF_RATE64 + TCA_TBF_PRATE64 + TCA_TBF_BURST + TCA_TBF_PBURST + TCA_TBF_MAX = TCA_TBF_PBURST +) + +// struct tc_ratespec { +// unsigned char cell_log; +// __u8 linklayer; /* lower 4 bits */ +// unsigned short overhead; +// short cell_align; +// unsigned short mpu; +// __u32 rate; +// }; + +type TcRateSpec struct { + CellLog uint8 + Linklayer uint8 + Overhead uint16 + CellAlign int16 + Mpu uint16 + Rate uint32 +} + +func (msg *TcRateSpec) Len() int { + return SizeofTcRateSpec +} + +func DeserializeTcRateSpec(b []byte) *TcRateSpec { + return (*TcRateSpec)(unsafe.Pointer(&b[0:SizeofTcRateSpec][0])) +} + +func (x *TcRateSpec) Serialize() []byte { + return (*(*[SizeofTcRateSpec]byte)(unsafe.Pointer(x)))[:] +} + +/** +* NETEM + */ + +const ( + TCA_NETEM_UNSPEC = iota + TCA_NETEM_CORR + TCA_NETEM_DELAY_DIST + TCA_NETEM_REORDER + TCA_NETEM_CORRUPT + TCA_NETEM_LOSS + TCA_NETEM_RATE + TCA_NETEM_ECN + TCA_NETEM_RATE64 + TCA_NETEM_MAX = TCA_NETEM_RATE64 +) + +// struct tc_netem_qopt { +// __u32 latency; /* added delay (us) */ +// __u32 limit; /* fifo limit (packets) */ +// __u32 loss; /* random packet loss (0=none ~0=100%) */ +// __u32 gap; /* re-ordering gap (0 for none) */ +// __u32 duplicate; /* random packet dup (0=none ~0=100%) */ +// __u32 jitter; /* random jitter in latency (us) */ +// }; + +type TcNetemQopt struct { + Latency uint32 + Limit uint32 + Loss uint32 + Gap uint32 + Duplicate uint32 + Jitter uint32 +} + +func (msg *TcNetemQopt) Len() int { + return SizeofTcNetemQopt +} + +func DeserializeTcNetemQopt(b []byte) *TcNetemQopt { + return (*TcNetemQopt)(unsafe.Pointer(&b[0:SizeofTcNetemQopt][0])) +} + +func (x *TcNetemQopt) Serialize() []byte { + return (*(*[SizeofTcNetemQopt]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_netem_corr { +// __u32 delay_corr; /* delay correlation */ +// __u32 loss_corr; /* packet loss correlation */ +// __u32 dup_corr; /* duplicate correlation */ +// }; + +type TcNetemCorr struct { + DelayCorr uint32 + LossCorr uint32 + DupCorr uint32 +} + +func (msg *TcNetemCorr) Len() int { + return SizeofTcNetemCorr +} + +func DeserializeTcNetemCorr(b []byte) *TcNetemCorr { + return (*TcNetemCorr)(unsafe.Pointer(&b[0:SizeofTcNetemCorr][0])) +} + +func (x *TcNetemCorr) Serialize() []byte { + return (*(*[SizeofTcNetemCorr]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_netem_reorder { +// __u32 probability; +// __u32 correlation; +// }; + +type TcNetemReorder struct { + Probability uint32 + Correlation uint32 +} + +func (msg *TcNetemReorder) Len() int { + return SizeofTcNetemReorder +} + +func DeserializeTcNetemReorder(b []byte) *TcNetemReorder { + return (*TcNetemReorder)(unsafe.Pointer(&b[0:SizeofTcNetemReorder][0])) +} + +func (x *TcNetemReorder) Serialize() []byte { + return (*(*[SizeofTcNetemReorder]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_netem_corrupt { +// __u32 probability; +// __u32 correlation; +// }; + +type TcNetemCorrupt struct { + Probability uint32 + Correlation uint32 +} + +func (msg *TcNetemCorrupt) Len() int { + return SizeofTcNetemCorrupt +} + +func DeserializeTcNetemCorrupt(b []byte) *TcNetemCorrupt { + return (*TcNetemCorrupt)(unsafe.Pointer(&b[0:SizeofTcNetemCorrupt][0])) +} + +func (x *TcNetemCorrupt) Serialize() []byte { + return (*(*[SizeofTcNetemCorrupt]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_tbf_qopt { +// struct tc_ratespec rate; +// struct tc_ratespec peakrate; +// __u32 limit; +// __u32 buffer; +// __u32 mtu; +// }; + +type TcTbfQopt struct { + Rate TcRateSpec + Peakrate TcRateSpec + Limit uint32 + Buffer uint32 + Mtu uint32 +} + +func (msg *TcTbfQopt) Len() int { + return SizeofTcTbfQopt +} + +func DeserializeTcTbfQopt(b []byte) *TcTbfQopt { + return (*TcTbfQopt)(unsafe.Pointer(&b[0:SizeofTcTbfQopt][0])) +} + +func (x *TcTbfQopt) Serialize() []byte { + return (*(*[SizeofTcTbfQopt]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_HTB_UNSPEC = iota + TCA_HTB_PARMS + TCA_HTB_INIT + TCA_HTB_CTAB + TCA_HTB_RTAB + TCA_HTB_DIRECT_QLEN + TCA_HTB_RATE64 + TCA_HTB_CEIL64 + TCA_HTB_MAX = TCA_HTB_CEIL64 +) + +//struct tc_htb_opt { +// struct tc_ratespec rate; +// struct tc_ratespec ceil; +// __u32 buffer; +// __u32 cbuffer; +// __u32 quantum; +// __u32 level; /* out only */ +// __u32 prio; +//}; + +type TcHtbCopt struct { + Rate TcRateSpec + Ceil TcRateSpec + Buffer uint32 + Cbuffer uint32 + Quantum uint32 + Level uint32 + Prio uint32 +} + +func (msg *TcHtbCopt) Len() int { + return SizeofTcHtbCopt +} + +func DeserializeTcHtbCopt(b []byte) *TcHtbCopt { + return (*TcHtbCopt)(unsafe.Pointer(&b[0:SizeofTcHtbCopt][0])) +} + +func (x *TcHtbCopt) Serialize() []byte { + return (*(*[SizeofTcHtbCopt]byte)(unsafe.Pointer(x)))[:] +} + +type TcHtbGlob struct { + Version uint32 + Rate2Quantum uint32 + Defcls uint32 + Debug uint32 + DirectPkts uint32 +} + +func (msg *TcHtbGlob) Len() int { + return SizeofTcHtbGlob +} + +func DeserializeTcHtbGlob(b []byte) *TcHtbGlob { + return (*TcHtbGlob)(unsafe.Pointer(&b[0:SizeofTcHtbGlob][0])) +} + +func (x *TcHtbGlob) Serialize() []byte { + return (*(*[SizeofTcHtbGlob]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_U32_UNSPEC = iota + TCA_U32_CLASSID + TCA_U32_HASH + TCA_U32_LINK + TCA_U32_DIVISOR + TCA_U32_SEL + TCA_U32_POLICE + TCA_U32_ACT + TCA_U32_INDEV + TCA_U32_PCNT + TCA_U32_MARK + TCA_U32_MAX = TCA_U32_MARK +) + +// struct tc_u32_key { +// __be32 mask; +// __be32 val; +// int off; +// int offmask; +// }; + +type TcU32Key struct { + Mask uint32 // big endian + Val uint32 // big endian + Off int32 + OffMask int32 +} + +func (msg *TcU32Key) Len() int { + return SizeofTcU32Key +} + +func DeserializeTcU32Key(b []byte) *TcU32Key { + return (*TcU32Key)(unsafe.Pointer(&b[0:SizeofTcU32Key][0])) +} + +func (x *TcU32Key) Serialize() []byte { + return (*(*[SizeofTcU32Key]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_u32_sel { +// unsigned char flags; +// unsigned char offshift; +// unsigned char nkeys; +// +// __be16 offmask; +// __u16 off; +// short offoff; +// +// short hoff; +// __be32 hmask; +// struct tc_u32_key keys[0]; +// }; + +const ( + TC_U32_TERMINAL = 1 << iota + TC_U32_OFFSET = 1 << iota + TC_U32_VAROFFSET = 1 << iota + TC_U32_EAT = 1 << iota +) + +type TcU32Sel struct { + Flags uint8 + Offshift uint8 + Nkeys uint8 + Pad uint8 + Offmask uint16 // big endian + Off uint16 + Offoff int16 + Hoff int16 + Hmask uint32 // big endian + Keys []TcU32Key +} + +func (msg *TcU32Sel) Len() int { + return SizeofTcU32Sel + int(msg.Nkeys)*SizeofTcU32Key +} + +func DeserializeTcU32Sel(b []byte) *TcU32Sel { + x := &TcU32Sel{} + copy((*(*[SizeofTcU32Sel]byte)(unsafe.Pointer(x)))[:], b) + next := SizeofTcU32Sel + var i uint8 + for i = 0; i < x.Nkeys; i++ { + x.Keys = append(x.Keys, *DeserializeTcU32Key(b[next:])) + next += SizeofTcU32Key + } + return x +} + +func (x *TcU32Sel) Serialize() []byte { + // This can't just unsafe.cast because it must iterate through keys. + buf := make([]byte, x.Len()) + copy(buf, (*(*[SizeofTcU32Sel]byte)(unsafe.Pointer(x)))[:]) + next := SizeofTcU32Sel + for _, key := range x.Keys { + keyBuf := key.Serialize() + copy(buf[next:], keyBuf) + next += SizeofTcU32Key + } + return buf +} + +const ( + TCA_ACT_MIRRED = 8 +) + +const ( + TCA_MIRRED_UNSPEC = iota + TCA_MIRRED_TM + TCA_MIRRED_PARMS + TCA_MIRRED_MAX = TCA_MIRRED_PARMS +) + +const ( + TCA_EGRESS_REDIR = 1 /* packet redirect to EGRESS*/ + TCA_EGRESS_MIRROR = 2 /* mirror packet to EGRESS */ + TCA_INGRESS_REDIR = 3 /* packet redirect to INGRESS*/ + TCA_INGRESS_MIRROR = 4 /* mirror packet to INGRESS */ +) + +const ( + TC_ACT_UNSPEC = int32(-1) + TC_ACT_OK = 0 + TC_ACT_RECLASSIFY = 1 + TC_ACT_SHOT = 2 + TC_ACT_PIPE = 3 + TC_ACT_STOLEN = 4 + TC_ACT_QUEUED = 5 + TC_ACT_REPEAT = 6 + TC_ACT_JUMP = 0x10000000 +) + +// #define tc_gen \ +// __u32 index; \ +// __u32 capab; \ +// int action; \ +// int refcnt; \ +// int bindcnt +// struct tc_mirred { +// tc_gen; +// int eaction; /* one of IN/EGRESS_MIRROR/REDIR */ +// __u32 ifindex; /* ifindex of egress port */ +// }; + +type TcMirred struct { + Index uint32 + Capab uint32 + Action int32 + Refcnt int32 + Bindcnt int32 + Eaction int32 + Ifindex uint32 +} + +func (msg *TcMirred) Len() int { + return SizeofTcMirred +} + +func DeserializeTcMirred(b []byte) *TcMirred { + return (*TcMirred)(unsafe.Pointer(&b[0:SizeofTcMirred][0])) +} + +func (x *TcMirred) Serialize() []byte { + return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TC_POLICE_UNSPEC = TC_ACT_UNSPEC + TC_POLICE_OK = TC_ACT_OK + TC_POLICE_RECLASSIFY = TC_ACT_RECLASSIFY + TC_POLICE_SHOT = TC_ACT_SHOT + TC_POLICE_PIPE = TC_ACT_PIPE +) + +// struct tc_police { +// __u32 index; +// int action; +// __u32 limit; +// __u32 burst; +// __u32 mtu; +// struct tc_ratespec rate; +// struct tc_ratespec peakrate; +// int refcnt; +// int bindcnt; +// __u32 capab; +// }; + +type TcPolice struct { + Index uint32 + Action int32 + Limit uint32 + Burst uint32 + Mtu uint32 + Rate TcRateSpec + PeakRate TcRateSpec + Refcnt int32 + Bindcnt int32 + Capab uint32 +} + +func (msg *TcPolice) Len() int { + return SizeofTcPolice +} + +func DeserializeTcPolice(b []byte) *TcPolice { + return (*TcPolice)(unsafe.Pointer(&b[0:SizeofTcPolice][0])) +} + +func (x *TcPolice) Serialize() []byte { + return (*(*[SizeofTcPolice]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_FW_UNSPEC = iota + TCA_FW_CLASSID + TCA_FW_POLICE + TCA_FW_INDEV + TCA_FW_ACT + TCA_FW_MASK + TCA_FW_MAX = TCA_FW_MASK +) diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/tc_linux_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/tc_linux_test.go new file mode 100644 index 000000000..148b2b02c --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/tc_linux_test.go @@ -0,0 +1,173 @@ +package nl + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +/* TcMsg */ +func (msg *TcMsg) write(b []byte) { + native := NativeEndian() + b[0] = msg.Family + copy(b[1:4], msg.Pad[:]) + native.PutUint32(b[4:8], uint32(msg.Ifindex)) + native.PutUint32(b[8:12], msg.Handle) + native.PutUint32(b[12:16], msg.Parent) + native.PutUint32(b[16:20], msg.Info) +} + +func (msg *TcMsg) serializeSafe() []byte { + length := SizeofTcMsg + b := make([]byte, length) + msg.write(b) + return b +} + +func deserializeTcMsgSafe(b []byte) *TcMsg { + var msg = TcMsg{} + binary.Read(bytes.NewReader(b[0:SizeofTcMsg]), NativeEndian(), &msg) + return &msg +} + +func TestTcMsgDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofTcMsg) + rand.Read(orig) + safemsg := deserializeTcMsgSafe(orig) + msg := DeserializeTcMsg(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +/* TcActionMsg */ +func (msg *TcActionMsg) write(b []byte) { + b[0] = msg.Family + copy(b[1:4], msg.Pad[:]) +} + +func (msg *TcActionMsg) serializeSafe() []byte { + length := SizeofTcActionMsg + b := make([]byte, length) + msg.write(b) + return b +} + +func deserializeTcActionMsgSafe(b []byte) *TcActionMsg { + var msg = TcActionMsg{} + binary.Read(bytes.NewReader(b[0:SizeofTcActionMsg]), NativeEndian(), &msg) + return &msg +} + +func TestTcActionMsgDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofTcActionMsg) + rand.Read(orig) + safemsg := deserializeTcActionMsgSafe(orig) + msg := DeserializeTcActionMsg(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +/* TcRateSpec */ +func (msg *TcRateSpec) write(b []byte) { + native := NativeEndian() + b[0] = msg.CellLog + b[1] = msg.Linklayer + native.PutUint16(b[2:4], msg.Overhead) + native.PutUint16(b[4:6], uint16(msg.CellAlign)) + native.PutUint16(b[6:8], msg.Mpu) + native.PutUint32(b[8:12], msg.Rate) +} + +func (msg *TcRateSpec) serializeSafe() []byte { + length := SizeofTcRateSpec + b := make([]byte, length) + msg.write(b) + return b +} + +func deserializeTcRateSpecSafe(b []byte) *TcRateSpec { + var msg = TcRateSpec{} + binary.Read(bytes.NewReader(b[0:SizeofTcRateSpec]), NativeEndian(), &msg) + return &msg +} + +func TestTcRateSpecDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofTcRateSpec) + rand.Read(orig) + safemsg := deserializeTcRateSpecSafe(orig) + msg := DeserializeTcRateSpec(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +/* TcTbfQopt */ +func (msg *TcTbfQopt) write(b []byte) { + native := NativeEndian() + msg.Rate.write(b[0:SizeofTcRateSpec]) + start := SizeofTcRateSpec + msg.Peakrate.write(b[start : start+SizeofTcRateSpec]) + start += SizeofTcRateSpec + native.PutUint32(b[start:start+4], msg.Limit) + start += 4 + native.PutUint32(b[start:start+4], msg.Buffer) + start += 4 + native.PutUint32(b[start:start+4], msg.Mtu) +} + +func (msg *TcTbfQopt) serializeSafe() []byte { + length := SizeofTcTbfQopt + b := make([]byte, length) + msg.write(b) + return b +} + +func deserializeTcTbfQoptSafe(b []byte) *TcTbfQopt { + var msg = TcTbfQopt{} + binary.Read(bytes.NewReader(b[0:SizeofTcTbfQopt]), NativeEndian(), &msg) + return &msg +} + +func TestTcTbfQoptDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofTcTbfQopt) + rand.Read(orig) + safemsg := deserializeTcTbfQoptSafe(orig) + msg := DeserializeTcTbfQopt(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +/* TcHtbCopt */ +func (msg *TcHtbCopt) write(b []byte) { + native := NativeEndian() + msg.Rate.write(b[0:SizeofTcRateSpec]) + start := SizeofTcRateSpec + msg.Ceil.write(b[start : start+SizeofTcRateSpec]) + start += SizeofTcRateSpec + native.PutUint32(b[start:start+4], msg.Buffer) + start += 4 + native.PutUint32(b[start:start+4], msg.Cbuffer) + start += 4 + native.PutUint32(b[start:start+4], msg.Quantum) + start += 4 + native.PutUint32(b[start:start+4], msg.Level) + start += 4 + native.PutUint32(b[start:start+4], msg.Prio) +} + +func (msg *TcHtbCopt) serializeSafe() []byte { + length := SizeofTcHtbCopt + b := make([]byte, length) + msg.write(b) + return b +} + +func deserializeTcHtbCoptSafe(b []byte) *TcHtbCopt { + var msg = TcHtbCopt{} + binary.Read(bytes.NewReader(b[0:SizeofTcHtbCopt]), NativeEndian(), &msg) + return &msg +} + +func TestTcHtbCoptDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofTcHtbCopt) + rand.Read(orig) + safemsg := deserializeTcHtbCoptSafe(orig) + msg := DeserializeTcHtbCopt(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go new file mode 100644 index 000000000..d24637d27 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go @@ -0,0 +1,258 @@ +package nl + +import ( + "bytes" + "net" + "unsafe" +) + +// Infinity for packet and byte counts +const ( + XFRM_INF = ^uint64(0) +) + +// Message Types +const ( + XFRM_MSG_BASE = 0x10 + XFRM_MSG_NEWSA = 0x10 + XFRM_MSG_DELSA = 0x11 + XFRM_MSG_GETSA = 0x12 + XFRM_MSG_NEWPOLICY = 0x13 + XFRM_MSG_DELPOLICY = 0x14 + XFRM_MSG_GETPOLICY = 0x15 + XFRM_MSG_ALLOCSPI = 0x16 + XFRM_MSG_ACQUIRE = 0x17 + XFRM_MSG_EXPIRE = 0x18 + XFRM_MSG_UPDPOLICY = 0x19 + XFRM_MSG_UPDSA = 0x1a + XFRM_MSG_POLEXPIRE = 0x1b + XFRM_MSG_FLUSHSA = 0x1c + XFRM_MSG_FLUSHPOLICY = 0x1d + XFRM_MSG_NEWAE = 0x1e + XFRM_MSG_GETAE = 0x1f + XFRM_MSG_REPORT = 0x20 + XFRM_MSG_MIGRATE = 0x21 + XFRM_MSG_NEWSADINFO = 0x22 + XFRM_MSG_GETSADINFO = 0x23 + XFRM_MSG_NEWSPDINFO = 0x24 + XFRM_MSG_GETSPDINFO = 0x25 + XFRM_MSG_MAPPING = 0x26 + XFRM_MSG_MAX = 0x26 + XFRM_NR_MSGTYPES = 0x17 +) + +// Attribute types +const ( + /* Netlink message attributes. */ + XFRMA_UNSPEC = 0x00 + XFRMA_ALG_AUTH = 0x01 /* struct xfrm_algo */ + XFRMA_ALG_CRYPT = 0x02 /* struct xfrm_algo */ + XFRMA_ALG_COMP = 0x03 /* struct xfrm_algo */ + XFRMA_ENCAP = 0x04 /* struct xfrm_algo + struct xfrm_encap_tmpl */ + XFRMA_TMPL = 0x05 /* 1 or more struct xfrm_user_tmpl */ + XFRMA_SA = 0x06 /* struct xfrm_usersa_info */ + XFRMA_POLICY = 0x07 /* struct xfrm_userpolicy_info */ + XFRMA_SEC_CTX = 0x08 /* struct xfrm_sec_ctx */ + XFRMA_LTIME_VAL = 0x09 + XFRMA_REPLAY_VAL = 0x0a + XFRMA_REPLAY_THRESH = 0x0b + XFRMA_ETIMER_THRESH = 0x0c + XFRMA_SRCADDR = 0x0d /* xfrm_address_t */ + XFRMA_COADDR = 0x0e /* xfrm_address_t */ + XFRMA_LASTUSED = 0x0f /* unsigned long */ + XFRMA_POLICY_TYPE = 0x10 /* struct xfrm_userpolicy_type */ + XFRMA_MIGRATE = 0x11 + XFRMA_ALG_AEAD = 0x12 /* struct xfrm_algo_aead */ + XFRMA_KMADDRESS = 0x13 /* struct xfrm_user_kmaddress */ + XFRMA_ALG_AUTH_TRUNC = 0x14 /* struct xfrm_algo_auth */ + XFRMA_MARK = 0x15 /* struct xfrm_mark */ + XFRMA_TFCPAD = 0x16 /* __u32 */ + XFRMA_REPLAY_ESN_VAL = 0x17 /* struct xfrm_replay_esn */ + XFRMA_SA_EXTRA_FLAGS = 0x18 /* __u32 */ + XFRMA_MAX = 0x18 +) + +const ( + SizeofXfrmAddress = 0x10 + SizeofXfrmSelector = 0x38 + SizeofXfrmLifetimeCfg = 0x40 + SizeofXfrmLifetimeCur = 0x20 + SizeofXfrmId = 0x18 +) + +// typedef union { +// __be32 a4; +// __be32 a6[4]; +// } xfrm_address_t; + +type XfrmAddress [SizeofXfrmAddress]byte + +func (x *XfrmAddress) ToIP() net.IP { + var empty = [12]byte{} + ip := make(net.IP, net.IPv6len) + if bytes.Equal(x[4:16], empty[:]) { + ip[10] = 0xff + ip[11] = 0xff + copy(ip[12:16], x[0:4]) + } else { + copy(ip[:], x[:]) + } + return ip +} + +func (x *XfrmAddress) ToIPNet(prefixlen uint8) *net.IPNet { + ip := x.ToIP() + if GetIPFamily(ip) == FAMILY_V4 { + return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)} + } + return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 128)} +} + +func (x *XfrmAddress) FromIP(ip net.IP) { + var empty = [16]byte{} + if len(ip) < net.IPv4len { + copy(x[4:16], empty[:]) + } else if GetIPFamily(ip) == FAMILY_V4 { + copy(x[0:4], ip.To4()[0:4]) + copy(x[4:16], empty[:12]) + } else { + copy(x[0:16], ip.To16()[0:16]) + } +} + +func DeserializeXfrmAddress(b []byte) *XfrmAddress { + return (*XfrmAddress)(unsafe.Pointer(&b[0:SizeofXfrmAddress][0])) +} + +func (x *XfrmAddress) Serialize() []byte { + return (*(*[SizeofXfrmAddress]byte)(unsafe.Pointer(x)))[:] +} + +// struct xfrm_selector { +// xfrm_address_t daddr; +// xfrm_address_t saddr; +// __be16 dport; +// __be16 dport_mask; +// __be16 sport; +// __be16 sport_mask; +// __u16 family; +// __u8 prefixlen_d; +// __u8 prefixlen_s; +// __u8 proto; +// int ifindex; +// __kernel_uid32_t user; +// }; + +type XfrmSelector struct { + Daddr XfrmAddress + Saddr XfrmAddress + Dport uint16 // big endian + DportMask uint16 // big endian + Sport uint16 // big endian + SportMask uint16 // big endian + Family uint16 + PrefixlenD uint8 + PrefixlenS uint8 + Proto uint8 + Pad [3]byte + Ifindex int32 + User uint32 +} + +func (msg *XfrmSelector) Len() int { + return SizeofXfrmSelector +} + +func DeserializeXfrmSelector(b []byte) *XfrmSelector { + return (*XfrmSelector)(unsafe.Pointer(&b[0:SizeofXfrmSelector][0])) +} + +func (msg *XfrmSelector) Serialize() []byte { + return (*(*[SizeofXfrmSelector]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_lifetime_cfg { +// __u64 soft_byte_limit; +// __u64 hard_byte_limit; +// __u64 soft_packet_limit; +// __u64 hard_packet_limit; +// __u64 soft_add_expires_seconds; +// __u64 hard_add_expires_seconds; +// __u64 soft_use_expires_seconds; +// __u64 hard_use_expires_seconds; +// }; +// + +type XfrmLifetimeCfg struct { + SoftByteLimit uint64 + HardByteLimit uint64 + SoftPacketLimit uint64 + HardPacketLimit uint64 + SoftAddExpiresSeconds uint64 + HardAddExpiresSeconds uint64 + SoftUseExpiresSeconds uint64 + HardUseExpiresSeconds uint64 +} + +func (msg *XfrmLifetimeCfg) Len() int { + return SizeofXfrmLifetimeCfg +} + +func DeserializeXfrmLifetimeCfg(b []byte) *XfrmLifetimeCfg { + return (*XfrmLifetimeCfg)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCfg][0])) +} + +func (msg *XfrmLifetimeCfg) Serialize() []byte { + return (*(*[SizeofXfrmLifetimeCfg]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_lifetime_cur { +// __u64 bytes; +// __u64 packets; +// __u64 add_time; +// __u64 use_time; +// }; + +type XfrmLifetimeCur struct { + Bytes uint64 + Packets uint64 + AddTime uint64 + UseTime uint64 +} + +func (msg *XfrmLifetimeCur) Len() int { + return SizeofXfrmLifetimeCur +} + +func DeserializeXfrmLifetimeCur(b []byte) *XfrmLifetimeCur { + return (*XfrmLifetimeCur)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCur][0])) +} + +func (msg *XfrmLifetimeCur) Serialize() []byte { + return (*(*[SizeofXfrmLifetimeCur]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_id { +// xfrm_address_t daddr; +// __be32 spi; +// __u8 proto; +// }; + +type XfrmId struct { + Daddr XfrmAddress + Spi uint32 // big endian + Proto uint8 + Pad [3]byte +} + +func (msg *XfrmId) Len() int { + return SizeofXfrmId +} + +func DeserializeXfrmId(b []byte) *XfrmId { + return (*XfrmId)(unsafe.Pointer(&b[0:SizeofXfrmId][0])) +} + +func (msg *XfrmId) Serialize() []byte { + return (*(*[SizeofXfrmId]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_linux_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_linux_test.go new file mode 100644 index 000000000..04404d751 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_linux_test.go @@ -0,0 +1,161 @@ +package nl + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func (msg *XfrmAddress) write(b []byte) { + copy(b[0:SizeofXfrmAddress], msg[:]) +} + +func (msg *XfrmAddress) serializeSafe() []byte { + b := make([]byte, SizeofXfrmAddress) + msg.write(b) + return b +} + +func deserializeXfrmAddressSafe(b []byte) *XfrmAddress { + var msg = XfrmAddress{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmAddress]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmAddressDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmAddress) + rand.Read(orig) + safemsg := deserializeXfrmAddressSafe(orig) + msg := DeserializeXfrmAddress(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmSelector) write(b []byte) { + const AddrEnd = SizeofXfrmAddress * 2 + native := NativeEndian() + msg.Daddr.write(b[0:SizeofXfrmAddress]) + msg.Saddr.write(b[SizeofXfrmAddress:AddrEnd]) + native.PutUint16(b[AddrEnd:AddrEnd+2], msg.Dport) + native.PutUint16(b[AddrEnd+2:AddrEnd+4], msg.DportMask) + native.PutUint16(b[AddrEnd+4:AddrEnd+6], msg.Sport) + native.PutUint16(b[AddrEnd+6:AddrEnd+8], msg.SportMask) + native.PutUint16(b[AddrEnd+8:AddrEnd+10], msg.Family) + b[AddrEnd+10] = msg.PrefixlenD + b[AddrEnd+11] = msg.PrefixlenS + b[AddrEnd+12] = msg.Proto + copy(b[AddrEnd+13:AddrEnd+16], msg.Pad[:]) + native.PutUint32(b[AddrEnd+16:AddrEnd+20], uint32(msg.Ifindex)) + native.PutUint32(b[AddrEnd+20:AddrEnd+24], msg.User) +} + +func (msg *XfrmSelector) serializeSafe() []byte { + length := SizeofXfrmSelector + b := make([]byte, length) + msg.write(b) + return b +} + +func deserializeXfrmSelectorSafe(b []byte) *XfrmSelector { + var msg = XfrmSelector{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmSelector]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmSelectorDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmSelector) + rand.Read(orig) + safemsg := deserializeXfrmSelectorSafe(orig) + msg := DeserializeXfrmSelector(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmLifetimeCfg) write(b []byte) { + native := NativeEndian() + native.PutUint64(b[0:8], msg.SoftByteLimit) + native.PutUint64(b[8:16], msg.HardByteLimit) + native.PutUint64(b[16:24], msg.SoftPacketLimit) + native.PutUint64(b[24:32], msg.HardPacketLimit) + native.PutUint64(b[32:40], msg.SoftAddExpiresSeconds) + native.PutUint64(b[40:48], msg.HardAddExpiresSeconds) + native.PutUint64(b[48:56], msg.SoftUseExpiresSeconds) + native.PutUint64(b[56:64], msg.HardUseExpiresSeconds) +} + +func (msg *XfrmLifetimeCfg) serializeSafe() []byte { + length := SizeofXfrmLifetimeCfg + b := make([]byte, length) + msg.write(b) + return b +} + +func deserializeXfrmLifetimeCfgSafe(b []byte) *XfrmLifetimeCfg { + var msg = XfrmLifetimeCfg{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmLifetimeCfg]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmLifetimeCfgDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmLifetimeCfg) + rand.Read(orig) + safemsg := deserializeXfrmLifetimeCfgSafe(orig) + msg := DeserializeXfrmLifetimeCfg(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmLifetimeCur) write(b []byte) { + native := NativeEndian() + native.PutUint64(b[0:8], msg.Bytes) + native.PutUint64(b[8:16], msg.Packets) + native.PutUint64(b[16:24], msg.AddTime) + native.PutUint64(b[24:32], msg.UseTime) +} + +func (msg *XfrmLifetimeCur) serializeSafe() []byte { + length := SizeofXfrmLifetimeCur + b := make([]byte, length) + msg.write(b) + return b +} + +func deserializeXfrmLifetimeCurSafe(b []byte) *XfrmLifetimeCur { + var msg = XfrmLifetimeCur{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmLifetimeCur]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmLifetimeCurDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmLifetimeCur) + rand.Read(orig) + safemsg := deserializeXfrmLifetimeCurSafe(orig) + msg := DeserializeXfrmLifetimeCur(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmId) write(b []byte) { + native := NativeEndian() + msg.Daddr.write(b[0:SizeofXfrmAddress]) + native.PutUint32(b[SizeofXfrmAddress:SizeofXfrmAddress+4], msg.Spi) + b[SizeofXfrmAddress+4] = msg.Proto + copy(b[SizeofXfrmAddress+5:SizeofXfrmAddress+8], msg.Pad[:]) +} + +func (msg *XfrmId) serializeSafe() []byte { + b := make([]byte, SizeofXfrmId) + msg.write(b) + return b +} + +func deserializeXfrmIdSafe(b []byte) *XfrmId { + var msg = XfrmId{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmId]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmIdDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmId) + rand.Read(orig) + safemsg := deserializeXfrmIdSafe(orig) + msg := DeserializeXfrmId(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go new file mode 100644 index 000000000..66f7e03d2 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go @@ -0,0 +1,119 @@ +package nl + +import ( + "unsafe" +) + +const ( + SizeofXfrmUserpolicyId = 0x40 + SizeofXfrmUserpolicyInfo = 0xa8 + SizeofXfrmUserTmpl = 0x40 +) + +// struct xfrm_userpolicy_id { +// struct xfrm_selector sel; +// __u32 index; +// __u8 dir; +// }; +// + +type XfrmUserpolicyId struct { + Sel XfrmSelector + Index uint32 + Dir uint8 + Pad [3]byte +} + +func (msg *XfrmUserpolicyId) Len() int { + return SizeofXfrmUserpolicyId +} + +func DeserializeXfrmUserpolicyId(b []byte) *XfrmUserpolicyId { + return (*XfrmUserpolicyId)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyId][0])) +} + +func (msg *XfrmUserpolicyId) Serialize() []byte { + return (*(*[SizeofXfrmUserpolicyId]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_userpolicy_info { +// struct xfrm_selector sel; +// struct xfrm_lifetime_cfg lft; +// struct xfrm_lifetime_cur curlft; +// __u32 priority; +// __u32 index; +// __u8 dir; +// __u8 action; +// #define XFRM_POLICY_ALLOW 0 +// #define XFRM_POLICY_BLOCK 1 +// __u8 flags; +// #define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */ +// /* Automatically expand selector to include matching ICMP payloads. */ +// #define XFRM_POLICY_ICMP 2 +// __u8 share; +// }; + +type XfrmUserpolicyInfo struct { + Sel XfrmSelector + Lft XfrmLifetimeCfg + Curlft XfrmLifetimeCur + Priority uint32 + Index uint32 + Dir uint8 + Action uint8 + Flags uint8 + Share uint8 + Pad [4]byte +} + +func (msg *XfrmUserpolicyInfo) Len() int { + return SizeofXfrmUserpolicyInfo +} + +func DeserializeXfrmUserpolicyInfo(b []byte) *XfrmUserpolicyInfo { + return (*XfrmUserpolicyInfo)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyInfo][0])) +} + +func (msg *XfrmUserpolicyInfo) Serialize() []byte { + return (*(*[SizeofXfrmUserpolicyInfo]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_user_tmpl { +// struct xfrm_id id; +// __u16 family; +// xfrm_address_t saddr; +// __u32 reqid; +// __u8 mode; +// __u8 share; +// __u8 optional; +// __u32 aalgos; +// __u32 ealgos; +// __u32 calgos; +// } + +type XfrmUserTmpl struct { + XfrmId XfrmId + Family uint16 + Pad1 [2]byte + Saddr XfrmAddress + Reqid uint32 + Mode uint8 + Share uint8 + Optional uint8 + Pad2 byte + Aalgos uint32 + Ealgos uint32 + Calgos uint32 +} + +func (msg *XfrmUserTmpl) Len() int { + return SizeofXfrmUserTmpl +} + +func DeserializeXfrmUserTmpl(b []byte) *XfrmUserTmpl { + return (*XfrmUserTmpl)(unsafe.Pointer(&b[0:SizeofXfrmUserTmpl][0])) +} + +func (msg *XfrmUserTmpl) Serialize() []byte { + return (*(*[SizeofXfrmUserTmpl]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go new file mode 100644 index 000000000..08a604b9c --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go @@ -0,0 +1,109 @@ +package nl + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func (msg *XfrmUserpolicyId) write(b []byte) { + native := NativeEndian() + msg.Sel.write(b[0:SizeofXfrmSelector]) + native.PutUint32(b[SizeofXfrmSelector:SizeofXfrmSelector+4], msg.Index) + b[SizeofXfrmSelector+4] = msg.Dir + copy(b[SizeofXfrmSelector+5:SizeofXfrmSelector+8], msg.Pad[:]) +} + +func (msg *XfrmUserpolicyId) serializeSafe() []byte { + b := make([]byte, SizeofXfrmUserpolicyId) + msg.write(b) + return b +} + +func deserializeXfrmUserpolicyIdSafe(b []byte) *XfrmUserpolicyId { + var msg = XfrmUserpolicyId{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmUserpolicyId]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmUserpolicyIdDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmUserpolicyId) + rand.Read(orig) + safemsg := deserializeXfrmUserpolicyIdSafe(orig) + msg := DeserializeXfrmUserpolicyId(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmUserpolicyInfo) write(b []byte) { + const CfgEnd = SizeofXfrmSelector + SizeofXfrmLifetimeCfg + const CurEnd = CfgEnd + SizeofXfrmLifetimeCur + native := NativeEndian() + msg.Sel.write(b[0:SizeofXfrmSelector]) + msg.Lft.write(b[SizeofXfrmSelector:CfgEnd]) + msg.Curlft.write(b[CfgEnd:CurEnd]) + native.PutUint32(b[CurEnd:CurEnd+4], msg.Priority) + native.PutUint32(b[CurEnd+4:CurEnd+8], msg.Index) + b[CurEnd+8] = msg.Dir + b[CurEnd+9] = msg.Action + b[CurEnd+10] = msg.Flags + b[CurEnd+11] = msg.Share + copy(b[CurEnd+12:CurEnd+16], msg.Pad[:]) +} + +func (msg *XfrmUserpolicyInfo) serializeSafe() []byte { + b := make([]byte, SizeofXfrmUserpolicyInfo) + msg.write(b) + return b +} + +func deserializeXfrmUserpolicyInfoSafe(b []byte) *XfrmUserpolicyInfo { + var msg = XfrmUserpolicyInfo{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmUserpolicyInfo]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmUserpolicyInfoDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmUserpolicyInfo) + rand.Read(orig) + safemsg := deserializeXfrmUserpolicyInfoSafe(orig) + msg := DeserializeXfrmUserpolicyInfo(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmUserTmpl) write(b []byte) { + const AddrEnd = SizeofXfrmId + 4 + SizeofXfrmAddress + native := NativeEndian() + msg.XfrmId.write(b[0:SizeofXfrmId]) + native.PutUint16(b[SizeofXfrmId:SizeofXfrmId+2], msg.Family) + copy(b[SizeofXfrmId+2:SizeofXfrmId+4], msg.Pad1[:]) + msg.Saddr.write(b[SizeofXfrmId+4 : AddrEnd]) + native.PutUint32(b[AddrEnd:AddrEnd+4], msg.Reqid) + b[AddrEnd+4] = msg.Mode + b[AddrEnd+5] = msg.Share + b[AddrEnd+6] = msg.Optional + b[AddrEnd+7] = msg.Pad2 + native.PutUint32(b[AddrEnd+8:AddrEnd+12], msg.Aalgos) + native.PutUint32(b[AddrEnd+12:AddrEnd+16], msg.Ealgos) + native.PutUint32(b[AddrEnd+16:AddrEnd+20], msg.Calgos) +} + +func (msg *XfrmUserTmpl) serializeSafe() []byte { + b := make([]byte, SizeofXfrmUserTmpl) + msg.write(b) + return b +} + +func deserializeXfrmUserTmplSafe(b []byte) *XfrmUserTmpl { + var msg = XfrmUserTmpl{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmUserTmpl]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmUserTmplDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmUserTmpl) + rand.Read(orig) + safemsg := deserializeXfrmUserTmplSafe(orig) + msg := DeserializeXfrmUserTmpl(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go new file mode 100644 index 000000000..4876ce458 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go @@ -0,0 +1,221 @@ +package nl + +import ( + "unsafe" +) + +const ( + SizeofXfrmUsersaId = 0x18 + SizeofXfrmStats = 0x0c + SizeofXfrmUsersaInfo = 0xe0 + SizeofXfrmAlgo = 0x44 + SizeofXfrmAlgoAuth = 0x48 + SizeofXfrmEncapTmpl = 0x18 +) + +// struct xfrm_usersa_id { +// xfrm_address_t daddr; +// __be32 spi; +// __u16 family; +// __u8 proto; +// }; + +type XfrmUsersaId struct { + Daddr XfrmAddress + Spi uint32 // big endian + Family uint16 + Proto uint8 + Pad byte +} + +func (msg *XfrmUsersaId) Len() int { + return SizeofXfrmUsersaId +} + +func DeserializeXfrmUsersaId(b []byte) *XfrmUsersaId { + return (*XfrmUsersaId)(unsafe.Pointer(&b[0:SizeofXfrmUsersaId][0])) +} + +func (msg *XfrmUsersaId) Serialize() []byte { + return (*(*[SizeofXfrmUsersaId]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_stats { +// __u32 replay_window; +// __u32 replay; +// __u32 integrity_failed; +// }; + +type XfrmStats struct { + ReplayWindow uint32 + Replay uint32 + IntegrityFailed uint32 +} + +func (msg *XfrmStats) Len() int { + return SizeofXfrmStats +} + +func DeserializeXfrmStats(b []byte) *XfrmStats { + return (*XfrmStats)(unsafe.Pointer(&b[0:SizeofXfrmStats][0])) +} + +func (msg *XfrmStats) Serialize() []byte { + return (*(*[SizeofXfrmStats]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_usersa_info { +// struct xfrm_selector sel; +// struct xfrm_id id; +// xfrm_address_t saddr; +// struct xfrm_lifetime_cfg lft; +// struct xfrm_lifetime_cur curlft; +// struct xfrm_stats stats; +// __u32 seq; +// __u32 reqid; +// __u16 family; +// __u8 mode; /* XFRM_MODE_xxx */ +// __u8 replay_window; +// __u8 flags; +// #define XFRM_STATE_NOECN 1 +// #define XFRM_STATE_DECAP_DSCP 2 +// #define XFRM_STATE_NOPMTUDISC 4 +// #define XFRM_STATE_WILDRECV 8 +// #define XFRM_STATE_ICMP 16 +// #define XFRM_STATE_AF_UNSPEC 32 +// #define XFRM_STATE_ALIGN4 64 +// #define XFRM_STATE_ESN 128 +// }; +// +// #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1 +// + +type XfrmUsersaInfo struct { + Sel XfrmSelector + Id XfrmId + Saddr XfrmAddress + Lft XfrmLifetimeCfg + Curlft XfrmLifetimeCur + Stats XfrmStats + Seq uint32 + Reqid uint32 + Family uint16 + Mode uint8 + ReplayWindow uint8 + Flags uint8 + Pad [7]byte +} + +func (msg *XfrmUsersaInfo) Len() int { + return SizeofXfrmUsersaInfo +} + +func DeserializeXfrmUsersaInfo(b []byte) *XfrmUsersaInfo { + return (*XfrmUsersaInfo)(unsafe.Pointer(&b[0:SizeofXfrmUsersaInfo][0])) +} + +func (msg *XfrmUsersaInfo) Serialize() []byte { + return (*(*[SizeofXfrmUsersaInfo]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_algo { +// char alg_name[64]; +// unsigned int alg_key_len; /* in bits */ +// char alg_key[0]; +// }; + +type XfrmAlgo struct { + AlgName [64]byte + AlgKeyLen uint32 + AlgKey []byte +} + +func (msg *XfrmAlgo) Len() int { + return SizeofXfrmAlgo + int(msg.AlgKeyLen/8) +} + +func DeserializeXfrmAlgo(b []byte) *XfrmAlgo { + ret := XfrmAlgo{} + copy(ret.AlgName[:], b[0:64]) + ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64])) + ret.AlgKey = b[68:ret.Len()] + return &ret +} + +func (msg *XfrmAlgo) Serialize() []byte { + b := make([]byte, msg.Len()) + copy(b[0:64], msg.AlgName[:]) + copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:]) + copy(b[68:msg.Len()], msg.AlgKey[:]) + return b +} + +// struct xfrm_algo_auth { +// char alg_name[64]; +// unsigned int alg_key_len; /* in bits */ +// unsigned int alg_trunc_len; /* in bits */ +// char alg_key[0]; +// }; + +type XfrmAlgoAuth struct { + AlgName [64]byte + AlgKeyLen uint32 + AlgTruncLen uint32 + AlgKey []byte +} + +func (msg *XfrmAlgoAuth) Len() int { + return SizeofXfrmAlgoAuth + int(msg.AlgKeyLen/8) +} + +func DeserializeXfrmAlgoAuth(b []byte) *XfrmAlgoAuth { + ret := XfrmAlgoAuth{} + copy(ret.AlgName[:], b[0:64]) + ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64])) + ret.AlgTruncLen = *(*uint32)(unsafe.Pointer(&b[68])) + ret.AlgKey = b[72:ret.Len()] + return &ret +} + +func (msg *XfrmAlgoAuth) Serialize() []byte { + b := make([]byte, msg.Len()) + copy(b[0:64], msg.AlgName[:]) + copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:]) + copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgTruncLen)))[:]) + copy(b[72:msg.Len()], msg.AlgKey[:]) + return b +} + +// struct xfrm_algo_aead { +// char alg_name[64]; +// unsigned int alg_key_len; /* in bits */ +// unsigned int alg_icv_len; /* in bits */ +// char alg_key[0]; +// } + +// struct xfrm_encap_tmpl { +// __u16 encap_type; +// __be16 encap_sport; +// __be16 encap_dport; +// xfrm_address_t encap_oa; +// }; + +type XfrmEncapTmpl struct { + EncapType uint16 + EncapSport uint16 // big endian + EncapDport uint16 // big endian + Pad [2]byte + EncapOa XfrmAddress +} + +func (msg *XfrmEncapTmpl) Len() int { + return SizeofXfrmEncapTmpl +} + +func DeserializeXfrmEncapTmpl(b []byte) *XfrmEncapTmpl { + return (*XfrmEncapTmpl)(unsafe.Pointer(&b[0:SizeofXfrmEncapTmpl][0])) +} + +func (msg *XfrmEncapTmpl) Serialize() []byte { + return (*(*[SizeofXfrmEncapTmpl]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go new file mode 100644 index 000000000..d5281e9a6 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go @@ -0,0 +1,207 @@ +package nl + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func (msg *XfrmUsersaId) write(b []byte) { + native := NativeEndian() + msg.Daddr.write(b[0:SizeofXfrmAddress]) + native.PutUint32(b[SizeofXfrmAddress:SizeofXfrmAddress+4], msg.Spi) + native.PutUint16(b[SizeofXfrmAddress+4:SizeofXfrmAddress+6], msg.Family) + b[SizeofXfrmAddress+6] = msg.Proto + b[SizeofXfrmAddress+7] = msg.Pad +} + +func (msg *XfrmUsersaId) serializeSafe() []byte { + b := make([]byte, SizeofXfrmUsersaId) + msg.write(b) + return b +} + +func deserializeXfrmUsersaIdSafe(b []byte) *XfrmUsersaId { + var msg = XfrmUsersaId{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmUsersaId]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmUsersaIdDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmUsersaId) + rand.Read(orig) + safemsg := deserializeXfrmUsersaIdSafe(orig) + msg := DeserializeXfrmUsersaId(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmStats) write(b []byte) { + native := NativeEndian() + native.PutUint32(b[0:4], msg.ReplayWindow) + native.PutUint32(b[4:8], msg.Replay) + native.PutUint32(b[8:12], msg.IntegrityFailed) +} + +func (msg *XfrmStats) serializeSafe() []byte { + b := make([]byte, SizeofXfrmStats) + msg.write(b) + return b +} + +func deserializeXfrmStatsSafe(b []byte) *XfrmStats { + var msg = XfrmStats{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmStats]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmStatsDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmStats) + rand.Read(orig) + safemsg := deserializeXfrmStatsSafe(orig) + msg := DeserializeXfrmStats(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmUsersaInfo) write(b []byte) { + const IdEnd = SizeofXfrmSelector + SizeofXfrmId + const AddressEnd = IdEnd + SizeofXfrmAddress + const CfgEnd = AddressEnd + SizeofXfrmLifetimeCfg + const CurEnd = CfgEnd + SizeofXfrmLifetimeCur + const StatsEnd = CurEnd + SizeofXfrmStats + native := NativeEndian() + msg.Sel.write(b[0:SizeofXfrmSelector]) + msg.Id.write(b[SizeofXfrmSelector:IdEnd]) + msg.Saddr.write(b[IdEnd:AddressEnd]) + msg.Lft.write(b[AddressEnd:CfgEnd]) + msg.Curlft.write(b[CfgEnd:CurEnd]) + msg.Stats.write(b[CurEnd:StatsEnd]) + native.PutUint32(b[StatsEnd:StatsEnd+4], msg.Seq) + native.PutUint32(b[StatsEnd+4:StatsEnd+8], msg.Reqid) + native.PutUint16(b[StatsEnd+8:StatsEnd+10], msg.Family) + b[StatsEnd+10] = msg.Mode + b[StatsEnd+11] = msg.ReplayWindow + b[StatsEnd+12] = msg.Flags + copy(b[StatsEnd+13:StatsEnd+20], msg.Pad[:]) +} + +func (msg *XfrmUsersaInfo) serializeSafe() []byte { + b := make([]byte, SizeofXfrmUsersaInfo) + msg.write(b) + return b +} + +func deserializeXfrmUsersaInfoSafe(b []byte) *XfrmUsersaInfo { + var msg = XfrmUsersaInfo{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmUsersaInfo]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmUsersaInfoDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmUsersaInfo) + rand.Read(orig) + safemsg := deserializeXfrmUsersaInfoSafe(orig) + msg := DeserializeXfrmUsersaInfo(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmAlgo) write(b []byte) { + native := NativeEndian() + copy(b[0:64], msg.AlgName[:]) + native.PutUint32(b[64:68], msg.AlgKeyLen) + copy(b[68:msg.Len()], msg.AlgKey[:]) +} + +func (msg *XfrmAlgo) serializeSafe() []byte { + b := make([]byte, msg.Len()) + msg.write(b) + return b +} + +func deserializeXfrmAlgoSafe(b []byte) *XfrmAlgo { + var msg = XfrmAlgo{} + copy(msg.AlgName[:], b[0:64]) + binary.Read(bytes.NewReader(b[64:68]), NativeEndian(), &msg.AlgKeyLen) + msg.AlgKey = b[68:msg.Len()] + return &msg +} + +func TestXfrmAlgoDeserializeSerialize(t *testing.T) { + // use a 32 byte key len + var orig = make([]byte, SizeofXfrmAlgo+32) + rand.Read(orig) + // set the key len to 256 bits + orig[64] = 0 + orig[65] = 1 + orig[66] = 0 + orig[67] = 0 + safemsg := deserializeXfrmAlgoSafe(orig) + msg := DeserializeXfrmAlgo(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmAlgoAuth) write(b []byte) { + native := NativeEndian() + copy(b[0:64], msg.AlgName[:]) + native.PutUint32(b[64:68], msg.AlgKeyLen) + native.PutUint32(b[68:72], msg.AlgTruncLen) + copy(b[72:msg.Len()], msg.AlgKey[:]) +} + +func (msg *XfrmAlgoAuth) serializeSafe() []byte { + b := make([]byte, msg.Len()) + msg.write(b) + return b +} + +func deserializeXfrmAlgoAuthSafe(b []byte) *XfrmAlgoAuth { + var msg = XfrmAlgoAuth{} + copy(msg.AlgName[:], b[0:64]) + binary.Read(bytes.NewReader(b[64:68]), NativeEndian(), &msg.AlgKeyLen) + binary.Read(bytes.NewReader(b[68:72]), NativeEndian(), &msg.AlgTruncLen) + msg.AlgKey = b[72:msg.Len()] + return &msg +} + +func TestXfrmAlgoAuthDeserializeSerialize(t *testing.T) { + // use a 32 byte key len + var orig = make([]byte, SizeofXfrmAlgoAuth+32) + rand.Read(orig) + // set the key len to 256 bits + orig[64] = 0 + orig[65] = 1 + orig[66] = 0 + orig[67] = 0 + safemsg := deserializeXfrmAlgoAuthSafe(orig) + msg := DeserializeXfrmAlgoAuth(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} + +func (msg *XfrmEncapTmpl) write(b []byte) { + native := NativeEndian() + native.PutUint16(b[0:2], msg.EncapType) + native.PutUint16(b[2:4], msg.EncapSport) + native.PutUint16(b[4:6], msg.EncapDport) + copy(b[6:8], msg.Pad[:]) + msg.EncapOa.write(b[8:SizeofXfrmAddress]) +} + +func (msg *XfrmEncapTmpl) serializeSafe() []byte { + b := make([]byte, SizeofXfrmEncapTmpl) + msg.write(b) + return b +} + +func deserializeXfrmEncapTmplSafe(b []byte) *XfrmEncapTmpl { + var msg = XfrmEncapTmpl{} + binary.Read(bytes.NewReader(b[0:SizeofXfrmEncapTmpl]), NativeEndian(), &msg) + return &msg +} + +func TestXfrmEncapTmplDeserializeSerialize(t *testing.T) { + var orig = make([]byte, SizeofXfrmEncapTmpl) + rand.Read(orig) + safemsg := deserializeXfrmEncapTmplSafe(orig) + msg := DeserializeXfrmEncapTmpl(orig) + testDeserializeSerialize(t, orig, safemsg, msg) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/protinfo.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/protinfo.go new file mode 100644 index 000000000..f39ab8f4e --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/protinfo.go @@ -0,0 +1,53 @@ +package netlink + +import ( + "strings" +) + +// Protinfo represents bridge flags from netlink. +type Protinfo struct { + Hairpin bool + Guard bool + FastLeave bool + RootBlock bool + Learning bool + Flood bool +} + +// String returns a list of enabled flags +func (prot *Protinfo) String() string { + var boolStrings []string + if prot.Hairpin { + boolStrings = append(boolStrings, "Hairpin") + } + if prot.Guard { + boolStrings = append(boolStrings, "Guard") + } + if prot.FastLeave { + boolStrings = append(boolStrings, "FastLeave") + } + if prot.RootBlock { + boolStrings = append(boolStrings, "RootBlock") + } + if prot.Learning { + boolStrings = append(boolStrings, "Learning") + } + if prot.Flood { + boolStrings = append(boolStrings, "Flood") + } + return strings.Join(boolStrings, " ") +} + +func boolToByte(x bool) []byte { + if x { + return []byte{1} + } + return []byte{0} +} + +func byteToBool(x byte) bool { + if uint8(x) != 0 { + return true + } + return false +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/protinfo_linux.go new file mode 100644 index 000000000..7181eba10 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -0,0 +1,60 @@ +package netlink + +import ( + "fmt" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +func LinkGetProtinfo(link Link) (Protinfo, error) { + base := link.Attrs() + ensureIndex(base) + var pi Protinfo + req := nl.NewNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + req.AddData(msg) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if err != nil { + return pi, err + } + + for _, m := range msgs { + ans := nl.DeserializeIfInfomsg(m) + if int(ans.Index) != base.Index { + continue + } + attrs, err := nl.ParseRouteAttr(m[ans.Len():]) + if err != nil { + return pi, err + } + for _, attr := range attrs { + if attr.Attr.Type != syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED { + continue + } + infos, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return pi, err + } + var pi Protinfo + for _, info := range infos { + switch info.Attr.Type { + case nl.IFLA_BRPORT_MODE: + pi.Hairpin = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_GUARD: + pi.Guard = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_FAST_LEAVE: + pi.FastLeave = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_PROTECT: + pi.RootBlock = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_LEARNING: + pi.Learning = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_UNICAST_FLOOD: + pi.Flood = byteToBool(info.Value[0]) + } + } + return pi, nil + } + } + return pi, fmt.Errorf("Device with index %d not found", base.Index) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/protinfo_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/protinfo_test.go new file mode 100644 index 000000000..f94c42b1c --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/protinfo_test.go @@ -0,0 +1,98 @@ +package netlink + +import "testing" + +func TestProtinfo(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + master := &Bridge{LinkAttrs{Name: "foo"}} + if err := LinkAdd(master); err != nil { + t.Fatal(err) + } + iface1 := &Dummy{LinkAttrs{Name: "bar1", MasterIndex: master.Index}} + iface2 := &Dummy{LinkAttrs{Name: "bar2", MasterIndex: master.Index}} + iface3 := &Dummy{LinkAttrs{Name: "bar3"}} + + if err := LinkAdd(iface1); err != nil { + t.Fatal(err) + } + if err := LinkAdd(iface2); err != nil { + t.Fatal(err) + } + if err := LinkAdd(iface3); err != nil { + t.Fatal(err) + } + + oldpi1, err := LinkGetProtinfo(iface1) + if err != nil { + t.Fatal(err) + } + oldpi2, err := LinkGetProtinfo(iface2) + if err != nil { + t.Fatal(err) + } + + if err := LinkSetHairpin(iface1, true); err != nil { + t.Fatal(err) + } + + if err := LinkSetRootBlock(iface1, true); err != nil { + t.Fatal(err) + } + + pi1, err := LinkGetProtinfo(iface1) + if err != nil { + t.Fatal(err) + } + if !pi1.Hairpin { + t.Fatalf("Hairpin mode is not enabled for %s, but should", iface1.Name) + } + if !pi1.RootBlock { + t.Fatalf("RootBlock is not enabled for %s, but should", iface1.Name) + } + if pi1.Guard != oldpi1.Guard { + t.Fatalf("Guard field was changed for %s but shouldn't", iface1.Name) + } + if pi1.FastLeave != oldpi1.FastLeave { + t.Fatalf("FastLeave field was changed for %s but shouldn't", iface1.Name) + } + if pi1.Learning != oldpi1.Learning { + t.Fatalf("Learning field was changed for %s but shouldn't", iface1.Name) + } + if pi1.Flood != oldpi1.Flood { + t.Fatalf("Flood field was changed for %s but shouldn't", iface1.Name) + } + + if err := LinkSetGuard(iface2, true); err != nil { + t.Fatal(err) + } + if err := LinkSetLearning(iface2, false); err != nil { + t.Fatal(err) + } + pi2, err := LinkGetProtinfo(iface2) + if err != nil { + t.Fatal(err) + } + if pi2.Hairpin { + t.Fatalf("Hairpin mode is enabled for %s, but shouldn't", iface2.Name) + } + if !pi2.Guard { + t.Fatalf("Guard is not enabled for %s, but should", iface2.Name) + } + if pi2.Learning { + t.Fatalf("Learning is enabled for %s, but shouldn't", iface2.Name) + } + if pi2.RootBlock != oldpi2.RootBlock { + t.Fatalf("RootBlock field was changed for %s but shouldn't", iface2.Name) + } + if pi2.FastLeave != oldpi2.FastLeave { + t.Fatalf("FastLeave field was changed for %s but shouldn't", iface2.Name) + } + if pi2.Flood != oldpi2.Flood { + t.Fatalf("Flood field was changed for %s but shouldn't", iface2.Name) + } + + if err := LinkSetHairpin(iface3, true); err == nil || err.Error() != "operation not supported" { + t.Fatalf("Set protinfo attrs for link without master is not supported, but err: %s", err) + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/qdisc.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/qdisc.go new file mode 100644 index 000000000..48fe7c798 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/qdisc.go @@ -0,0 +1,290 @@ +package netlink + +import ( + "fmt" + "math" +) + +const ( + HANDLE_NONE = 0 + HANDLE_INGRESS = 0xFFFFFFF1 + HANDLE_ROOT = 0xFFFFFFFF + PRIORITY_MAP_LEN = 16 +) + +type Qdisc interface { + Attrs() *QdiscAttrs + Type() string +} + +// Qdisc represents a netlink qdisc. A qdisc is associated with a link, +// has a handle, a parent and a refcnt. The root qdisc of a device should +// have parent == HANDLE_ROOT. +type QdiscAttrs struct { + LinkIndex int + Handle uint32 + Parent uint32 + Refcnt uint32 // read only +} + +func (q QdiscAttrs) String() string { + return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Refcnt: %s}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Refcnt) +} + +func MakeHandle(major, minor uint16) uint32 { + return (uint32(major) << 16) | uint32(minor) +} + +func MajorMinor(handle uint32) (uint16, uint16) { + return uint16((handle & 0xFFFF0000) >> 16), uint16(handle & 0x0000FFFFF) +} + +func HandleStr(handle uint32) string { + switch handle { + case HANDLE_NONE: + return "none" + case HANDLE_INGRESS: + return "ingress" + case HANDLE_ROOT: + return "root" + default: + major, minor := MajorMinor(handle) + return fmt.Sprintf("%x:%x", major, minor) + } +} + +func Percentage2u32(percentage float32) uint32 { + // FIXME this is most likely not the best way to convert from % to uint32 + if percentage == 100 { + return math.MaxUint32 + } + return uint32(math.MaxUint32 * (percentage / 100)) +} + +// PfifoFast is the default qdisc created by the kernel if one has not +// been defined for the interface +type PfifoFast struct { + QdiscAttrs + Bands uint8 + PriorityMap [PRIORITY_MAP_LEN]uint8 +} + +func (qdisc *PfifoFast) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *PfifoFast) Type() string { + return "pfifo_fast" +} + +// Prio is a basic qdisc that works just like PfifoFast +type Prio struct { + QdiscAttrs + Bands uint8 + PriorityMap [PRIORITY_MAP_LEN]uint8 +} + +func NewPrio(attrs QdiscAttrs) *Prio { + return &Prio{ + QdiscAttrs: attrs, + Bands: 3, + PriorityMap: [PRIORITY_MAP_LEN]uint8{1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}, + } +} + +func (qdisc *Prio) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Prio) Type() string { + return "prio" +} + +// Htb is a classful qdisc that rate limits based on tokens +type Htb struct { + QdiscAttrs + Version uint32 + Rate2Quantum uint32 + Defcls uint32 + Debug uint32 + DirectPkts uint32 +} + +func NewHtb(attrs QdiscAttrs) *Htb { + return &Htb{ + QdiscAttrs: attrs, + Version: 3, + Defcls: 0, + Rate2Quantum: 10, + Debug: 0, + DirectPkts: 0, + } +} + +func (qdisc *Htb) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Htb) Type() string { + return "htb" +} + +// Netem is a classless qdisc that rate limits based on tokens + +type NetemQdiscAttrs struct { + Latency uint32 // in us + DelayCorr float32 // in % + Limit uint32 + Loss float32 // in % + LossCorr float32 // in % + Gap uint32 + Duplicate float32 // in % + DuplicateCorr float32 // in % + Jitter uint32 // in us + ReorderProb float32 // in % + ReorderCorr float32 // in % + CorruptProb float32 // in % + CorruptCorr float32 // in % +} + +func (q NetemQdiscAttrs) String() string { + return fmt.Sprintf( + "{Latency: %d, Limit: %d, Loss: %d, Gap: %d, Duplicate: %d, Jitter: %d}", + q.Latency, q.Limit, q.Loss, q.Gap, q.Duplicate, q.Jitter, + ) +} + +type Netem struct { + QdiscAttrs + Latency uint32 + DelayCorr uint32 + Limit uint32 + Loss uint32 + LossCorr uint32 + Gap uint32 + Duplicate uint32 + DuplicateCorr uint32 + Jitter uint32 + ReorderProb uint32 + ReorderCorr uint32 + CorruptProb uint32 + CorruptCorr uint32 +} + +func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem { + var limit uint32 = 1000 + var loss_corr, delay_corr, duplicate_corr uint32 + var reorder_prob, reorder_corr uint32 + var corrupt_prob, corrupt_corr uint32 + + latency := nattrs.Latency + loss := Percentage2u32(nattrs.Loss) + gap := nattrs.Gap + duplicate := Percentage2u32(nattrs.Duplicate) + jitter := nattrs.Jitter + + // Correlation + if latency > 0 && jitter > 0 { + delay_corr = Percentage2u32(nattrs.DelayCorr) + } + if loss > 0 { + loss_corr = Percentage2u32(nattrs.LossCorr) + } + if duplicate > 0 { + duplicate_corr = Percentage2u32(nattrs.DuplicateCorr) + } + // FIXME should validate values(like loss/duplicate are percentages...) + latency = time2Tick(latency) + + if nattrs.Limit != 0 { + limit = nattrs.Limit + } + // Jitter is only value if latency is > 0 + if latency > 0 { + jitter = time2Tick(jitter) + } + + reorder_prob = Percentage2u32(nattrs.ReorderProb) + reorder_corr = Percentage2u32(nattrs.ReorderCorr) + + if reorder_prob > 0 { + // ERROR if lantency == 0 + if gap == 0 { + gap = 1 + } + } + + corrupt_prob = Percentage2u32(nattrs.CorruptProb) + corrupt_corr = Percentage2u32(nattrs.CorruptCorr) + + return &Netem{ + QdiscAttrs: attrs, + Latency: latency, + DelayCorr: delay_corr, + Limit: limit, + Loss: loss, + LossCorr: loss_corr, + Gap: gap, + Duplicate: duplicate, + DuplicateCorr: duplicate_corr, + Jitter: jitter, + ReorderProb: reorder_prob, + ReorderCorr: reorder_corr, + CorruptProb: corrupt_prob, + CorruptCorr: corrupt_corr, + } +} + +func (qdisc *Netem) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Netem) Type() string { + return "netem" +} + +// Tbf is a classless qdisc that rate limits based on tokens +type Tbf struct { + QdiscAttrs + // TODO: handle 64bit rate properly + Rate uint64 + Limit uint32 + Buffer uint32 + // TODO: handle other settings +} + +func (qdisc *Tbf) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Tbf) Type() string { + return "tbf" +} + +// Ingress is a qdisc for adding ingress filters +type Ingress struct { + QdiscAttrs +} + +func (qdisc *Ingress) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Ingress) Type() string { + return "ingress" +} + +// GenericQdisc qdiscs represent types that are not currently understood +// by this netlink library. +type GenericQdisc struct { + QdiscAttrs + QdiscType string +} + +func (qdisc *GenericQdisc) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *GenericQdisc) Type() string { + return qdisc.QdiscType +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/qdisc_linux.go new file mode 100644 index 000000000..d9a8b170f --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -0,0 +1,415 @@ +package netlink + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// QdiscDel will delete a qdisc from the system. +// Equivalent to: `tc qdisc del $qdisc` +func QdiscDel(qdisc Qdisc) error { + return qdiscModify(syscall.RTM_DELQDISC, 0, qdisc) +} + +// QdiscChange will change a qdisc in place +// Equivalent to: `tc qdisc change $qdisc` +// The parent and handle MUST NOT be changed. +func QdiscChange(qdisc Qdisc) error { + return qdiscModify(syscall.RTM_NEWQDISC, 0, qdisc) +} + +// QdiscReplace will replace a qdisc to the system. +// Equivalent to: `tc qdisc replace $qdisc` +// The handle MUST change. +func QdiscReplace(qdisc Qdisc) error { + return qdiscModify( + syscall.RTM_NEWQDISC, + syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE, + qdisc) +} + +// QdiscAdd will add a qdisc to the system. +// Equivalent to: `tc qdisc add $qdisc` +func QdiscAdd(qdisc Qdisc) error { + return qdiscModify( + syscall.RTM_NEWQDISC, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, + qdisc) +} + +func qdiscModify(cmd, flags int, qdisc Qdisc) error { + req := nl.NewNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) + base := qdisc.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + } + req.AddData(msg) + + // When deleting don't bother building the rest of the netlink payload + if cmd != syscall.RTM_DELQDISC { + if err := qdiscPayload(req, qdisc); err != nil { + return err + } + } + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { + + req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type()))) + + options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) + if prio, ok := qdisc.(*Prio); ok { + tcmap := nl.TcPrioMap{ + Bands: int32(prio.Bands), + Priomap: prio.PriorityMap, + } + options = nl.NewRtAttr(nl.TCA_OPTIONS, tcmap.Serialize()) + } else if tbf, ok := qdisc.(*Tbf); ok { + opt := nl.TcTbfQopt{} + // TODO: handle rate > uint32 + opt.Rate.Rate = uint32(tbf.Rate) + opt.Limit = tbf.Limit + opt.Buffer = tbf.Buffer + nl.NewRtAttrChild(options, nl.TCA_TBF_PARMS, opt.Serialize()) + } else if htb, ok := qdisc.(*Htb); ok { + opt := nl.TcHtbGlob{} + opt.Version = htb.Version + opt.Rate2Quantum = htb.Rate2Quantum + opt.Defcls = htb.Defcls + // TODO: Handle Debug properly. For now default to 0 + opt.Debug = htb.Debug + opt.DirectPkts = htb.DirectPkts + nl.NewRtAttrChild(options, nl.TCA_HTB_INIT, opt.Serialize()) + // nl.NewRtAttrChild(options, nl.TCA_HTB_DIRECT_QLEN, opt.Serialize()) + } else if netem, ok := qdisc.(*Netem); ok { + opt := nl.TcNetemQopt{} + opt.Latency = netem.Latency + opt.Limit = netem.Limit + opt.Loss = netem.Loss + opt.Gap = netem.Gap + opt.Duplicate = netem.Duplicate + opt.Jitter = netem.Jitter + options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize()) + // Correlation + corr := nl.TcNetemCorr{} + corr.DelayCorr = netem.DelayCorr + corr.LossCorr = netem.LossCorr + corr.DupCorr = netem.DuplicateCorr + + if corr.DelayCorr > 0 || corr.LossCorr > 0 || corr.DupCorr > 0 { + nl.NewRtAttrChild(options, nl.TCA_NETEM_CORR, corr.Serialize()) + } + // Corruption + corruption := nl.TcNetemCorrupt{} + corruption.Probability = netem.CorruptProb + corruption.Correlation = netem.CorruptCorr + if corruption.Probability > 0 { + nl.NewRtAttrChild(options, nl.TCA_NETEM_CORRUPT, corruption.Serialize()) + } + // Reorder + reorder := nl.TcNetemReorder{} + reorder.Probability = netem.ReorderProb + reorder.Correlation = netem.ReorderCorr + if reorder.Probability > 0 { + nl.NewRtAttrChild(options, nl.TCA_NETEM_REORDER, reorder.Serialize()) + } + } else if _, ok := qdisc.(*Ingress); ok { + // ingress filters must use the proper handle + if qdisc.Attrs().Parent != HANDLE_INGRESS { + return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS") + } + } + + req.AddData(options) + return nil +} + +// QdiscList gets a list of qdiscs in the system. +// Equivalent to: `tc qdisc show`. +// The list can be filtered by link. +func QdiscList(link Link) ([]Qdisc, error) { + req := nl.NewNetlinkRequest(syscall.RTM_GETQDISC, syscall.NLM_F_DUMP) + index := int32(0) + if link != nil { + base := link.Attrs() + ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + } + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWQDISC) + if err != nil { + return nil, err + } + + var res []Qdisc + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + // skip qdiscs from other interfaces + if link != nil && msg.Ifindex != index { + continue + } + + base := QdiscAttrs{ + LinkIndex: int(msg.Ifindex), + Handle: msg.Handle, + Parent: msg.Parent, + Refcnt: msg.Info, + } + var qdisc Qdisc + qdiscType := "" + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_KIND: + qdiscType = string(attr.Value[:len(attr.Value)-1]) + switch qdiscType { + case "pfifo_fast": + qdisc = &PfifoFast{} + case "prio": + qdisc = &Prio{} + case "tbf": + qdisc = &Tbf{} + case "ingress": + qdisc = &Ingress{} + case "htb": + qdisc = &Htb{} + case "netem": + qdisc = &Netem{} + default: + qdisc = &GenericQdisc{QdiscType: qdiscType} + } + case nl.TCA_OPTIONS: + switch qdiscType { + case "pfifo_fast": + // pfifo returns TcPrioMap directly without wrapping it in rtattr + if err := parsePfifoFastData(qdisc, attr.Value); err != nil { + return nil, err + } + case "prio": + // prio returns TcPrioMap directly without wrapping it in rtattr + if err := parsePrioData(qdisc, attr.Value); err != nil { + return nil, err + } + case "tbf": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + if err := parseTbfData(qdisc, data); err != nil { + return nil, err + } + case "htb": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + if err := parseHtbData(qdisc, data); err != nil { + return nil, err + } + case "netem": + if err := parseNetemData(qdisc, attr.Value); err != nil { + return nil, err + } + + // no options for ingress + } + } + } + *qdisc.Attrs() = base + res = append(res, qdisc) + } + + return res, nil +} + +func parsePfifoFastData(qdisc Qdisc, value []byte) error { + pfifo := qdisc.(*PfifoFast) + tcmap := nl.DeserializeTcPrioMap(value) + pfifo.PriorityMap = tcmap.Priomap + pfifo.Bands = uint8(tcmap.Bands) + return nil +} + +func parsePrioData(qdisc Qdisc, value []byte) error { + prio := qdisc.(*Prio) + tcmap := nl.DeserializeTcPrioMap(value) + prio.PriorityMap = tcmap.Priomap + prio.Bands = uint8(tcmap.Bands) + return nil +} + +func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { + native = nl.NativeEndian() + htb := qdisc.(*Htb) + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_HTB_INIT: + opt := nl.DeserializeTcHtbGlob(datum.Value) + htb.Version = opt.Version + htb.Rate2Quantum = opt.Rate2Quantum + htb.Defcls = opt.Defcls + htb.Debug = opt.Debug + htb.DirectPkts = opt.DirectPkts + case nl.TCA_HTB_DIRECT_QLEN: + // TODO + //htb.DirectQlen = native.uint32(datum.Value) + } + } + return nil +} + +func parseNetemData(qdisc Qdisc, value []byte) error { + netem := qdisc.(*Netem) + opt := nl.DeserializeTcNetemQopt(value) + netem.Latency = opt.Latency + netem.Limit = opt.Limit + netem.Loss = opt.Loss + netem.Gap = opt.Gap + netem.Duplicate = opt.Duplicate + netem.Jitter = opt.Jitter + data, err := nl.ParseRouteAttr(value[nl.SizeofTcNetemQopt:]) + if err != nil { + return err + } + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_NETEM_CORR: + opt := nl.DeserializeTcNetemCorr(datum.Value) + netem.DelayCorr = opt.DelayCorr + netem.LossCorr = opt.LossCorr + netem.DuplicateCorr = opt.DupCorr + case nl.TCA_NETEM_CORRUPT: + opt := nl.DeserializeTcNetemCorrupt(datum.Value) + netem.CorruptProb = opt.Probability + netem.CorruptCorr = opt.Correlation + case nl.TCA_NETEM_REORDER: + opt := nl.DeserializeTcNetemReorder(datum.Value) + netem.ReorderProb = opt.Probability + netem.ReorderCorr = opt.Correlation + } + } + return nil +} + +func parseTbfData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { + native = nl.NativeEndian() + tbf := qdisc.(*Tbf) + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_TBF_PARMS: + opt := nl.DeserializeTcTbfQopt(datum.Value) + tbf.Rate = uint64(opt.Rate.Rate) + tbf.Limit = opt.Limit + tbf.Buffer = opt.Buffer + case nl.TCA_TBF_RATE64: + tbf.Rate = native.Uint64(datum.Value[0:4]) + } + } + return nil +} + +const ( + TIME_UNITS_PER_SEC = 1000000 +) + +var ( + tickInUsec float64 = 0.0 + clockFactor float64 = 0.0 + hz float64 = 0.0 +) + +func initClock() { + data, err := ioutil.ReadFile("/proc/net/psched") + if err != nil { + return + } + parts := strings.Split(strings.TrimSpace(string(data)), " ") + if len(parts) < 3 { + return + } + var vals [3]uint64 + for i := range vals { + val, err := strconv.ParseUint(parts[i], 16, 32) + if err != nil { + return + } + vals[i] = val + } + // compatibility + if vals[2] == 1000000000 { + vals[0] = vals[1] + } + clockFactor = float64(vals[2]) / TIME_UNITS_PER_SEC + tickInUsec = float64(vals[0]) / float64(vals[1]) * clockFactor + hz = float64(vals[0]) +} + +func TickInUsec() float64 { + if tickInUsec == 0.0 { + initClock() + } + return tickInUsec +} + +func ClockFactor() float64 { + if clockFactor == 0.0 { + initClock() + } + return clockFactor +} + +func Hz() float64 { + if hz == 0.0 { + initClock() + } + return hz +} + +func time2Tick(time uint32) uint32 { + return uint32(float64(time) * TickInUsec()) +} + +func tick2Time(tick uint32) uint32 { + return uint32(float64(tick) / TickInUsec()) +} + +func time2Ktime(time uint32) uint32 { + return uint32(float64(time) * ClockFactor()) +} + +func ktime2Time(ktime uint32) uint32 { + return uint32(float64(ktime) / ClockFactor()) +} + +func burst(rate uint64, buffer uint32) uint32 { + return uint32(float64(rate) * float64(tick2Time(buffer)) / TIME_UNITS_PER_SEC) +} + +func latency(rate uint64, limit, buffer uint32) float64 { + return TIME_UNITS_PER_SEC*(float64(limit)/float64(rate)) - float64(tick2Time(buffer)) +} + +func Xmittime(rate uint64, size uint32) float64 { + return TickInUsec() * TIME_UNITS_PER_SEC * (float64(size) / float64(rate)) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/qdisc_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/qdisc_test.go new file mode 100644 index 000000000..6e1772fe3 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/qdisc_test.go @@ -0,0 +1,345 @@ +package netlink + +import ( + "testing" +) + +func TestTbfAddDel(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil { + t.Fatal(err) + } + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + if err := LinkSetUp(link); err != nil { + t.Fatal(err) + } + qdisc := &Tbf{ + QdiscAttrs: QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Handle: MakeHandle(1, 0), + Parent: HANDLE_ROOT, + }, + Rate: 131072, + Limit: 1220703, + Buffer: 16793, + } + if err := QdiscAdd(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err := QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 1 { + t.Fatal("Failed to add qdisc") + } + tbf, ok := qdiscs[0].(*Tbf) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + if tbf.Rate != qdisc.Rate { + t.Fatal("Rate doesn't match") + } + if tbf.Limit != qdisc.Limit { + t.Fatal("Limit doesn't match") + } + if tbf.Buffer != qdisc.Buffer { + t.Fatal("Buffer doesn't match") + } + if err := QdiscDel(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 0 { + t.Fatal("Failed to remove qdisc") + } +} + +func TestHtbAddDel(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil { + t.Fatal(err) + } + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + if err := LinkSetUp(link); err != nil { + t.Fatal(err) + } + + attrs := QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Handle: MakeHandle(1, 0), + Parent: HANDLE_ROOT, + } + + qdisc := NewHtb(attrs) + qdisc.Rate2Quantum = 5 + if err := QdiscAdd(qdisc); err != nil { + t.Fatal(err) + } + + qdiscs, err := QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 1 { + t.Fatal("Failed to add qdisc") + } + htb, ok := qdiscs[0].(*Htb) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + if htb.Defcls != qdisc.Defcls { + t.Fatal("Defcls doesn't match") + } + if htb.Rate2Quantum != qdisc.Rate2Quantum { + t.Fatal("Rate2Quantum doesn't match") + } + if htb.Debug != qdisc.Debug { + t.Fatal("Debug doesn't match") + } + if err := QdiscDel(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 0 { + t.Fatal("Failed to remove qdisc") + } +} + +func TestPrioAddDel(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil { + t.Fatal(err) + } + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + if err := LinkSetUp(link); err != nil { + t.Fatal(err) + } + qdisc := NewPrio(QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Handle: MakeHandle(1, 0), + Parent: HANDLE_ROOT, + }) + if err := QdiscAdd(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err := QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 1 { + t.Fatal("Failed to add qdisc") + } + _, ok := qdiscs[0].(*Prio) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + if err := QdiscDel(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 0 { + t.Fatal("Failed to remove qdisc") + } +} + +func TestTbfAddHtbReplaceDel(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil { + t.Fatal(err) + } + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + if err := LinkSetUp(link); err != nil { + t.Fatal(err) + } + + // Add + attrs := QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Handle: MakeHandle(1, 0), + Parent: HANDLE_ROOT, + } + qdisc := &Tbf{ + QdiscAttrs: attrs, + Rate: 131072, + Limit: 1220703, + Buffer: 16793, + } + if err := QdiscAdd(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err := QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 1 { + t.Fatal("Failed to add qdisc") + } + tbf, ok := qdiscs[0].(*Tbf) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + if tbf.Rate != qdisc.Rate { + t.Fatal("Rate doesn't match") + } + if tbf.Limit != qdisc.Limit { + t.Fatal("Limit doesn't match") + } + if tbf.Buffer != qdisc.Buffer { + t.Fatal("Buffer doesn't match") + } + // Replace + // For replace to work, the handle MUST be different that the running one + attrs.Handle = MakeHandle(2, 0) + qdisc2 := NewHtb(attrs) + qdisc2.Rate2Quantum = 5 + if err := QdiscReplace(qdisc2); err != nil { + t.Fatal(err) + } + + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 1 { + t.Fatal("Failed to add qdisc") + } + htb, ok := qdiscs[0].(*Htb) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + if htb.Defcls != qdisc2.Defcls { + t.Fatal("Defcls doesn't match") + } + if htb.Rate2Quantum != qdisc2.Rate2Quantum { + t.Fatal("Rate2Quantum doesn't match") + } + if htb.Debug != qdisc2.Debug { + t.Fatal("Debug doesn't match") + } + + if err := QdiscDel(qdisc2); err != nil { + t.Fatal(err) + } + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 0 { + t.Fatal("Failed to remove qdisc") + } +} + +func TestTbfAddTbfChangeDel(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil { + t.Fatal(err) + } + link, err := LinkByName("foo") + if err != nil { + t.Fatal(err) + } + if err := LinkSetUp(link); err != nil { + t.Fatal(err) + } + + // Add + attrs := QdiscAttrs{ + LinkIndex: link.Attrs().Index, + Handle: MakeHandle(1, 0), + Parent: HANDLE_ROOT, + } + qdisc := &Tbf{ + QdiscAttrs: attrs, + Rate: 131072, + Limit: 1220703, + Buffer: 16793, + } + if err := QdiscAdd(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err := QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 1 { + t.Fatal("Failed to add qdisc") + } + tbf, ok := qdiscs[0].(*Tbf) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + if tbf.Rate != qdisc.Rate { + t.Fatal("Rate doesn't match") + } + if tbf.Limit != qdisc.Limit { + t.Fatal("Limit doesn't match") + } + if tbf.Buffer != qdisc.Buffer { + t.Fatal("Buffer doesn't match") + } + // Change + // For change to work, the handle MUST not change + qdisc.Rate = 23456 + if err := QdiscChange(qdisc); err != nil { + t.Fatal(err) + } + + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 1 { + t.Fatal("Failed to add qdisc") + } + tbf, ok = qdiscs[0].(*Tbf) + if !ok { + t.Fatal("Qdisc is the wrong type") + } + if tbf.Rate != qdisc.Rate { + t.Fatal("Rate doesn't match") + } + if tbf.Limit != qdisc.Limit { + t.Fatal("Limit doesn't match") + } + if tbf.Buffer != qdisc.Buffer { + t.Fatal("Buffer doesn't match") + } + + if err := QdiscDel(qdisc); err != nil { + t.Fatal(err) + } + qdiscs, err = QdiscList(link) + if err != nil { + t.Fatal(err) + } + if len(qdiscs) != 0 { + t.Fatal("Failed to remove qdisc") + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/route.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/route.go new file mode 100644 index 000000000..a7303d4c2 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/route.go @@ -0,0 +1,80 @@ +package netlink + +import ( + "fmt" + "net" + "syscall" +) + +// Scope is an enum representing a route scope. +type Scope uint8 + +const ( + SCOPE_UNIVERSE Scope = syscall.RT_SCOPE_UNIVERSE + SCOPE_SITE Scope = syscall.RT_SCOPE_SITE + SCOPE_LINK Scope = syscall.RT_SCOPE_LINK + SCOPE_HOST Scope = syscall.RT_SCOPE_HOST + SCOPE_NOWHERE Scope = syscall.RT_SCOPE_NOWHERE +) + +type NextHopFlag int + +const ( + FLAG_ONLINK NextHopFlag = syscall.RTNH_F_ONLINK + FLAG_PERVASIVE NextHopFlag = syscall.RTNH_F_PERVASIVE +) + +// Route represents a netlink route. +type Route struct { + LinkIndex int + ILinkIndex int + Scope Scope + Dst *net.IPNet + Src net.IP + Gw net.IP + Protocol int + Priority int + Table int + Type int + Tos int + Flags int +} + +func (r Route) String() string { + return fmt.Sprintf("{Ifindex: %d Dst: %s Src: %s Gw: %s Flags: %s}", r.LinkIndex, r.Dst, + r.Src, r.Gw, r.ListFlags()) +} + +func (r *Route) SetFlag(flag NextHopFlag) { + r.Flags |= int(flag) +} + +func (r *Route) ClearFlag(flag NextHopFlag) { + r.Flags &^= int(flag) +} + +type flagString struct { + f NextHopFlag + s string +} + +var testFlags = []flagString{ + flagString{f: FLAG_ONLINK, s: "onlink"}, + flagString{f: FLAG_PERVASIVE, s: "pervasive"}, +} + +func (r *Route) ListFlags() []string { + var flags []string + for _, tf := range testFlags { + if r.Flags&int(tf.f) != 0 { + flags = append(flags, tf.s) + } + } + return flags +} + +// RouteUpdate is sent when a route changes - type is RTM_NEWROUTE or RTM_DELROUTE +type RouteUpdate struct { + Type uint16 + Route +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/route_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/route_linux.go new file mode 100644 index 000000000..d8026a7d1 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/route_linux.go @@ -0,0 +1,327 @@ +package netlink + +import ( + "fmt" + "net" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// RtAttr is shared so it is in netlink_linux.go + +const ( + RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota) + RT_FILTER_SCOPE + RT_FILTER_TYPE + RT_FILTER_TOS + RT_FILTER_IIF + RT_FILTER_OIF + RT_FILTER_DST + RT_FILTER_SRC + RT_FILTER_GW + RT_FILTER_TABLE +) + +// RouteAdd will add a route to the system. +// Equivalent to: `ip route add $route` +func RouteAdd(route *Route) error { + req := nl.NewNetlinkRequest(syscall.RTM_NEWROUTE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + return routeHandle(route, req, nl.NewRtMsg()) +} + +// RouteDel will delete a route from the system. +// Equivalent to: `ip route del $route` +func RouteDel(route *Route) error { + req := nl.NewNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK) + return routeHandle(route, req, nl.NewRtDelMsg()) +} + +func routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error { + if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil { + return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil") + } + + family := -1 + var rtAttrs []*nl.RtAttr + + if route.Dst != nil && route.Dst.IP != nil { + dstLen, _ := route.Dst.Mask.Size() + msg.Dst_len = uint8(dstLen) + dstFamily := nl.GetIPFamily(route.Dst.IP) + family = dstFamily + var dstData []byte + if dstFamily == FAMILY_V4 { + dstData = route.Dst.IP.To4() + } else { + dstData = route.Dst.IP.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) + } + + if route.Src != nil { + srcFamily := nl.GetIPFamily(route.Src) + if family != -1 && family != srcFamily { + return fmt.Errorf("source and destination ip are not the same IP family") + } + family = srcFamily + var srcData []byte + if srcFamily == FAMILY_V4 { + srcData = route.Src.To4() + } else { + srcData = route.Src.To16() + } + // The commonly used src ip for routes is actually PREFSRC + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PREFSRC, srcData)) + } + + if route.Gw != nil { + gwFamily := nl.GetIPFamily(route.Gw) + if family != -1 && family != gwFamily { + return fmt.Errorf("gateway, source, and destination ip are not the same IP family") + } + family = gwFamily + var gwData []byte + if gwFamily == FAMILY_V4 { + gwData = route.Gw.To4() + } else { + gwData = route.Gw.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_GATEWAY, gwData)) + } + + if route.Table > 0 { + if route.Table >= 256 { + msg.Table = syscall.RT_TABLE_UNSPEC + b := make([]byte, 4) + native.PutUint32(b, uint32(route.Table)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_TABLE, b)) + } else { + msg.Table = uint8(route.Table) + } + } + + if route.Priority > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(route.Priority)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PRIORITY, b)) + } + if route.Tos > 0 { + msg.Tos = uint8(route.Tos) + } + if route.Protocol > 0 { + msg.Protocol = uint8(route.Protocol) + } + if route.Type > 0 { + msg.Type = uint8(route.Type) + } + + msg.Scope = uint8(route.Scope) + msg.Family = uint8(family) + req.AddData(msg) + for _, attr := range rtAttrs { + req.AddData(attr) + } + + var ( + b = make([]byte, 4) + native = nl.NativeEndian() + ) + native.PutUint32(b, uint32(route.LinkIndex)) + + req.AddData(nl.NewRtAttr(syscall.RTA_OIF, b)) + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// RouteList gets a list of routes in the system. +// Equivalent to: `ip route show`. +// The list can be filtered by link and ip family. +func RouteList(link Link, family int) ([]Route, error) { + var routeFilter *Route + if link != nil { + routeFilter = &Route{ + LinkIndex: link.Attrs().Index, + } + } + return RouteListFiltered(family, routeFilter, RT_FILTER_OIF) +} + +// RouteListFiltered gets a list of routes in the system filtered with specified rules. +// All rules must be defined in RouteFilter struct +func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { + req := nl.NewNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) + infmsg := nl.NewIfInfomsg(family) + req.AddData(infmsg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) + if err != nil { + return nil, err + } + + var res []Route + for _, m := range msgs { + msg := nl.DeserializeRtMsg(m) + if msg.Flags&syscall.RTM_F_CLONED != 0 { + // Ignore cloned routes + continue + } + if msg.Table != syscall.RT_TABLE_MAIN { + if filter == nil || filter != nil && filterMask&RT_FILTER_TABLE == 0 { + // Ignore non-main tables + continue + } + } + route, err := deserializeRoute(m) + if err != nil { + return nil, err + } + if filter != nil { + switch { + case filterMask&RT_FILTER_TABLE != 0 && route.Table != filter.Table: + continue + case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol: + continue + case filterMask&RT_FILTER_SCOPE != 0 && route.Scope != filter.Scope: + continue + case filterMask&RT_FILTER_TYPE != 0 && route.Type != filter.Type: + continue + case filterMask&RT_FILTER_TOS != 0 && route.Tos != filter.Tos: + continue + case filterMask&RT_FILTER_OIF != 0 && route.LinkIndex != filter.LinkIndex: + continue + case filterMask&RT_FILTER_IIF != 0 && route.ILinkIndex != filter.ILinkIndex: + continue + case filterMask&RT_FILTER_GW != 0 && !route.Gw.Equal(filter.Gw): + continue + case filterMask&RT_FILTER_SRC != 0 && !route.Src.Equal(filter.Src): + continue + case filterMask&RT_FILTER_DST != 0 && filter.Dst != nil: + if route.Dst == nil { + continue + } + aMaskLen, aMaskBits := route.Dst.Mask.Size() + bMaskLen, bMaskBits := filter.Dst.Mask.Size() + if !(route.Dst.IP.Equal(filter.Dst.IP) && aMaskLen == bMaskLen && aMaskBits == bMaskBits) { + continue + } + } + } + res = append(res, route) + } + return res, nil +} + +// deserializeRoute decodes a binary netlink message into a Route struct +func deserializeRoute(m []byte) (Route, error) { + msg := nl.DeserializeRtMsg(m) + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return Route{}, err + } + route := Route{ + Scope: Scope(msg.Scope), + Protocol: int(msg.Protocol), + Table: int(msg.Table), + Type: int(msg.Type), + Tos: int(msg.Tos), + Flags: int(msg.Flags), + } + + native := nl.NativeEndian() + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.RTA_GATEWAY: + route.Gw = net.IP(attr.Value) + case syscall.RTA_PREFSRC: + route.Src = net.IP(attr.Value) + case syscall.RTA_DST: + route.Dst = &net.IPNet{ + IP: attr.Value, + Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)), + } + case syscall.RTA_OIF: + route.LinkIndex = int(native.Uint32(attr.Value[0:4])) + case syscall.RTA_IIF: + route.ILinkIndex = int(native.Uint32(attr.Value[0:4])) + case syscall.RTA_PRIORITY: + route.Priority = int(native.Uint32(attr.Value[0:4])) + case syscall.RTA_TABLE: + route.Table = int(native.Uint32(attr.Value[0:4])) + } + } + return route, nil +} + +// RouteGet gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get'. +func RouteGet(destination net.IP) ([]Route, error) { + req := nl.NewNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST) + family := nl.GetIPFamily(destination) + var destinationData []byte + var bitlen uint8 + if family == FAMILY_V4 { + destinationData = destination.To4() + bitlen = 32 + } else { + destinationData = destination.To16() + bitlen = 128 + } + msg := &nl.RtMsg{} + msg.Family = uint8(family) + msg.Dst_len = bitlen + req.AddData(msg) + + rtaDst := nl.NewRtAttr(syscall.RTA_DST, destinationData) + req.AddData(rtaDst) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) + if err != nil { + return nil, err + } + + var res []Route + for _, m := range msgs { + route, err := deserializeRoute(m) + if err != nil { + return nil, err + } + res = append(res, route) + } + return res, nil + +} + +// RouteSubscribe takes a chan down which notifications will be sent +// when routes are added or deleted. Close the 'done' chan to stop subscription. +func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error { + s, err := nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE) + if err != nil { + return err + } + if done != nil { + go func() { + <-done + s.Close() + }() + } + go func() { + defer close(ch) + for { + msgs, err := s.Receive() + if err != nil { + return + } + for _, m := range msgs { + route, err := deserializeRoute(m.Data) + if err != nil { + return + } + ch <- RouteUpdate{Type: m.Header.Type, Route: route} + } + } + }() + + return nil +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/route_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/route_test.go new file mode 100644 index 000000000..fa0b579f7 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/route_test.go @@ -0,0 +1,210 @@ +package netlink + +import ( + "net" + "syscall" + "testing" + "time" +) + +func TestRouteAddDel(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + // get loopback interface + link, err := LinkByName("lo") + if err != nil { + t.Fatal(err) + } + + // bring the interface up + if err := LinkSetUp(link); err != nil { + t.Fatal(err) + } + + // add a gateway route + dst := &net.IPNet{ + IP: net.IPv4(192, 168, 0, 0), + Mask: net.CIDRMask(24, 32), + } + + ip := net.IPv4(127, 1, 1, 1) + route := Route{LinkIndex: link.Attrs().Index, Dst: dst, Src: ip} + if err := RouteAdd(&route); err != nil { + t.Fatal(err) + } + routes, err := RouteList(link, FAMILY_V4) + if err != nil { + t.Fatal(err) + } + if len(routes) != 1 { + t.Fatal("Route not added properly") + } + + dstIP := net.IPv4(192, 168, 0, 42) + routeToDstIP, err := RouteGet(dstIP) + if err != nil { + t.Fatal(err) + } + + if len(routeToDstIP) == 0 { + t.Fatal("Default route not present") + } + if err := RouteDel(&route); err != nil { + t.Fatal(err) + } + routes, err = RouteList(link, FAMILY_V4) + if err != nil { + t.Fatal(err) + } + if len(routes) != 0 { + t.Fatal("Route not removed properly") + } + +} + +func TestRouteAddIncomplete(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + // get loopback interface + link, err := LinkByName("lo") + if err != nil { + t.Fatal(err) + } + + // bring the interface up + if err = LinkSetUp(link); err != nil { + t.Fatal(err) + } + + route := Route{LinkIndex: link.Attrs().Index} + if err := RouteAdd(&route); err == nil { + t.Fatal("Adding incomplete route should fail") + } +} + +func expectRouteUpdate(ch <-chan RouteUpdate, t uint16, dst net.IP) bool { + for { + timeout := time.After(time.Minute) + select { + case update := <-ch: + if update.Type == t && update.Route.Dst.IP.Equal(dst) { + return true + } + case <-timeout: + return false + } + } +} + +func TestRouteSubscribe(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + ch := make(chan RouteUpdate) + done := make(chan struct{}) + defer close(done) + if err := RouteSubscribe(ch, done); err != nil { + t.Fatal(err) + } + + // get loopback interface + link, err := LinkByName("lo") + if err != nil { + t.Fatal(err) + } + + // bring the interface up + if err = LinkSetUp(link); err != nil { + t.Fatal(err) + } + + // add a gateway route + dst := &net.IPNet{ + IP: net.IPv4(192, 168, 0, 0), + Mask: net.CIDRMask(24, 32), + } + + ip := net.IPv4(127, 1, 1, 1) + route := Route{LinkIndex: link.Attrs().Index, Dst: dst, Src: ip} + if err := RouteAdd(&route); err != nil { + t.Fatal(err) + } + + if !expectRouteUpdate(ch, syscall.RTM_NEWROUTE, dst.IP) { + t.Fatal("Add update not received as expected") + } + if err := RouteDel(&route); err != nil { + t.Fatal(err) + } + if !expectRouteUpdate(ch, syscall.RTM_DELROUTE, dst.IP) { + t.Fatal("Del update not received as expected") + } +} + +func TestRouteExtraFields(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + // get loopback interface + link, err := LinkByName("lo") + if err != nil { + t.Fatal(err) + } + // bring the interface up + if err = LinkSetUp(link); err != nil { + t.Fatal(err) + } + + // add a gateway route + dst := &net.IPNet{ + IP: net.IPv4(1, 1, 1, 1), + Mask: net.CIDRMask(32, 32), + } + + src := net.IPv4(127, 3, 3, 3) + route := Route{ + LinkIndex: link.Attrs().Index, + Dst: dst, + Src: src, + Scope: syscall.RT_SCOPE_LINK, + Priority: 13, + Table: syscall.RT_TABLE_MAIN, + Type: syscall.RTN_UNICAST, + Tos: 14, + } + if err := RouteAdd(&route); err != nil { + t.Fatal(err) + } + routes, err := RouteListFiltered(FAMILY_V4, &Route{ + Dst: dst, + Src: src, + Scope: syscall.RT_SCOPE_LINK, + Table: syscall.RT_TABLE_MAIN, + Type: syscall.RTN_UNICAST, + Tos: 14, + }, RT_FILTER_DST|RT_FILTER_SRC|RT_FILTER_SCOPE|RT_FILTER_TABLE|RT_FILTER_TYPE|RT_FILTER_TOS) + if err != nil { + t.Fatal(err) + } + if len(routes) != 1 { + t.Fatal("Route not added properly") + } + + if routes[0].Scope != syscall.RT_SCOPE_LINK { + t.Fatal("Invalid Scope. Route not added properly") + } + if routes[0].Priority != 13 { + t.Fatal("Invalid Priority. Route not added properly") + } + if routes[0].Table != syscall.RT_TABLE_MAIN { + t.Fatal("Invalid Scope. Route not added properly") + } + if routes[0].Type != syscall.RTN_UNICAST { + t.Fatal("Invalid Type. Route not added properly") + } + if routes[0].Tos != 14 { + t.Fatal("Invalid Tos. Route not added properly") + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/rule.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/rule.go new file mode 100644 index 000000000..bd699a7e1 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/rule.go @@ -0,0 +1,43 @@ +package netlink + +import ( + "fmt" + "net" + + "github.com/vishvananda/netlink/nl" +) + +// Rule represents a netlink rule. +type Rule struct { + *nl.RtMsg + Priority int + Table int + Mark int + Mask int + TunID uint + Goto int + Src *net.IPNet + Dst *net.IPNet + Flow int + IifName string + OifName string + SuppressIfgroup int + SuppressPrefixlen int +} + +func (r Rule) String() string { + return fmt.Sprintf("ip rule %d: from %s table %d", r.Priority, r.Src, r.Table) +} + +// NewRule return empty rules. +func NewRule() *Rule { + return &Rule{ + SuppressIfgroup: -1, + SuppressPrefixlen: -1, + Priority: -1, + Mark: -1, + Mask: -1, + Goto: -1, + Flow: -1, + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/rule_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/rule_linux.go new file mode 100644 index 000000000..ba84be00e --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -0,0 +1,198 @@ +package netlink + +import ( + "fmt" + "net" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// RuleAdd adds a rule to the system. +// Equivalent to: ip rule add +func RuleAdd(rule *Rule) error { + req := nl.NewNetlinkRequest(syscall.RTM_NEWRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + return ruleHandle(rule, req) +} + +// RuleDel deletes a rule from the system. +// Equivalent to: ip rule del +func RuleDel(rule *Rule) error { + req := nl.NewNetlinkRequest(syscall.RTM_DELRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + return ruleHandle(rule, req) +} + +func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { + msg := nl.NewRtMsg() + msg.Family = syscall.AF_INET + var dstFamily uint8 + + var rtAttrs []*nl.RtAttr + if rule.Dst != nil && rule.Dst.IP != nil { + dstLen, _ := rule.Dst.Mask.Size() + msg.Dst_len = uint8(dstLen) + msg.Family = uint8(nl.GetIPFamily(rule.Dst.IP)) + dstFamily = msg.Family + var dstData []byte + if msg.Family == syscall.AF_INET { + dstData = rule.Dst.IP.To4() + } else { + dstData = rule.Dst.IP.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) + } + + if rule.Src != nil && rule.Src.IP != nil { + msg.Family = uint8(nl.GetIPFamily(rule.Src.IP)) + if dstFamily != 0 && dstFamily != msg.Family { + return fmt.Errorf("source and destination ip are not the same IP family") + } + srcLen, _ := rule.Src.Mask.Size() + msg.Src_len = uint8(srcLen) + var srcData []byte + if msg.Family == syscall.AF_INET { + srcData = rule.Src.IP.To4() + } else { + srcData = rule.Src.IP.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_SRC, srcData)) + } + + if rule.Table >= 0 { + msg.Table = uint8(rule.Table) + if rule.Table >= 256 { + msg.Table = syscall.RT_TABLE_UNSPEC + } + } + + req.AddData(msg) + for i := range rtAttrs { + req.AddData(rtAttrs[i]) + } + + var ( + b = make([]byte, 4) + native = nl.NativeEndian() + ) + + if rule.Priority >= 0 { + native.PutUint32(b, uint32(rule.Priority)) + req.AddData(nl.NewRtAttr(nl.FRA_PRIORITY, b)) + } + if rule.Mark >= 0 { + native.PutUint32(b, uint32(rule.Mark)) + req.AddData(nl.NewRtAttr(nl.FRA_FWMARK, b)) + } + if rule.Mask >= 0 { + native.PutUint32(b, uint32(rule.Mask)) + req.AddData(nl.NewRtAttr(nl.FRA_FWMASK, b)) + } + if rule.Flow >= 0 { + native.PutUint32(b, uint32(rule.Flow)) + req.AddData(nl.NewRtAttr(nl.FRA_FLOW, b)) + } + if rule.TunID > 0 { + native.PutUint32(b, uint32(rule.TunID)) + req.AddData(nl.NewRtAttr(nl.FRA_TUN_ID, b)) + } + if rule.Table >= 256 { + native.PutUint32(b, uint32(rule.Table)) + req.AddData(nl.NewRtAttr(nl.FRA_TABLE, b)) + } + if msg.Table > 0 { + if rule.SuppressPrefixlen >= 0 { + native.PutUint32(b, uint32(rule.SuppressPrefixlen)) + req.AddData(nl.NewRtAttr(nl.FRA_SUPPRESS_PREFIXLEN, b)) + } + if rule.SuppressIfgroup >= 0 { + native.PutUint32(b, uint32(rule.SuppressIfgroup)) + req.AddData(nl.NewRtAttr(nl.FRA_SUPPRESS_IFGROUP, b)) + } + } + if rule.IifName != "" { + req.AddData(nl.NewRtAttr(nl.FRA_IIFNAME, []byte(rule.IifName))) + } + if rule.OifName != "" { + req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName))) + } + if rule.Goto >= 0 { + msg.Type = nl.FR_ACT_NOP + native.PutUint32(b, uint32(rule.Goto)) + req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) + } + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// RuleList lists rules in the system. +// Equivalent to: ip rule list +func RuleList(family int) ([]Rule, error) { + req := nl.NewNetlinkRequest(syscall.RTM_GETRULE, syscall.NLM_F_DUMP|syscall.NLM_F_REQUEST) + msg := nl.NewIfInfomsg(family) + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWRULE) + if err != nil { + return nil, err + } + + native := nl.NativeEndian() + var res = make([]Rule, 0) + for i := range msgs { + msg := nl.DeserializeRtMsg(msgs[i]) + attrs, err := nl.ParseRouteAttr(msgs[i][msg.Len():]) + if err != nil { + return nil, err + } + + rule := NewRule() + rule.RtMsg = msg + + for j := range attrs { + switch attrs[j].Attr.Type { + case syscall.RTA_TABLE: + rule.Table = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_SRC: + rule.Src = &net.IPNet{ + IP: attrs[j].Value, + Mask: net.CIDRMask(int(msg.Src_len), 8*len(attrs[j].Value)), + } + case nl.FRA_DST: + rule.Dst = &net.IPNet{ + IP: attrs[j].Value, + Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attrs[j].Value)), + } + case nl.FRA_FWMARK: + rule.Mark = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_FWMASK: + rule.Mask = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_TUN_ID: + rule.TunID = uint(native.Uint64(attrs[j].Value[0:4])) + case nl.FRA_IIFNAME: + rule.IifName = string(attrs[j].Value[:len(attrs[j].Value)-1]) + case nl.FRA_OIFNAME: + rule.OifName = string(attrs[j].Value[:len(attrs[j].Value)-1]) + case nl.FRA_SUPPRESS_PREFIXLEN: + i := native.Uint32(attrs[j].Value[0:4]) + if i != 0xffffffff { + rule.SuppressPrefixlen = int(i) + } + case nl.FRA_SUPPRESS_IFGROUP: + i := native.Uint32(attrs[j].Value[0:4]) + if i != 0xffffffff { + rule.SuppressIfgroup = int(i) + } + case nl.FRA_FLOW: + rule.Flow = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_GOTO: + rule.Goto = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_PRIORITY: + rule.Priority = int(native.Uint32(attrs[j].Value[0:4])) + } + } + res = append(res, *rule) + } + + return res, nil +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/rule_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/rule_test.go new file mode 100644 index 000000000..63f995c9c --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/rule_test.go @@ -0,0 +1,66 @@ +package netlink + +import ( + "net" + "syscall" + "testing" +) + +func TestRuleAddDel(t *testing.T) { + srcNet := &net.IPNet{IP: net.IPv4(172, 16, 0, 1), Mask: net.CIDRMask(16, 32)} + dstNet := &net.IPNet{IP: net.IPv4(172, 16, 1, 1), Mask: net.CIDRMask(24, 32)} + + rules_begin, err := RuleList(syscall.AF_INET) + if err != nil { + t.Fatal(err) + } + + rule := NewRule() + rule.Table = syscall.RT_TABLE_MAIN + rule.Src = srcNet + rule.Dst = dstNet + rule.Priority = 5 + rule.OifName = "lo" + rule.IifName = "lo" + if err := RuleAdd(rule); err != nil { + t.Fatal(err) + } + + rules, err := RuleList(syscall.AF_INET) + if err != nil { + t.Fatal(err) + } + + if len(rules) != len(rules_begin)+1 { + t.Fatal("Rule not added properly") + } + + // find this rule + var found bool + for i := range rules { + if rules[i].Table == rule.Table && + rules[i].Src != nil && rules[i].Src.String() == srcNet.String() && + rules[i].Dst != nil && rules[i].Dst.String() == dstNet.String() && + rules[i].OifName == rule.OifName && + rules[i].Priority == rule.Priority && + rules[i].IifName == rule.IifName { + found = true + } + } + if !found { + t.Fatal("Rule has diffrent options than one added") + } + + if err := RuleDel(rule); err != nil { + t.Fatal(err) + } + + rules_end, err := RuleList(syscall.AF_INET) + if err != nil { + t.Fatal(err) + } + + if len(rules_end) != len(rules_begin) { + t.Fatal("Rule not removed properly") + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm.go new file mode 100644 index 000000000..621ffb6c6 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm.go @@ -0,0 +1,64 @@ +package netlink + +import ( + "fmt" + "syscall" +) + +// Proto is an enum representing an ipsec protocol. +type Proto uint8 + +const ( + XFRM_PROTO_ROUTE2 Proto = syscall.IPPROTO_ROUTING + XFRM_PROTO_ESP Proto = syscall.IPPROTO_ESP + XFRM_PROTO_AH Proto = syscall.IPPROTO_AH + XFRM_PROTO_HAO Proto = syscall.IPPROTO_DSTOPTS + XFRM_PROTO_COMP Proto = syscall.IPPROTO_COMP + XFRM_PROTO_IPSEC_ANY Proto = syscall.IPPROTO_RAW +) + +func (p Proto) String() string { + switch p { + case XFRM_PROTO_ROUTE2: + return "route2" + case XFRM_PROTO_ESP: + return "esp" + case XFRM_PROTO_AH: + return "ah" + case XFRM_PROTO_HAO: + return "hao" + case XFRM_PROTO_COMP: + return "comp" + case XFRM_PROTO_IPSEC_ANY: + return "ipsec-any" + } + return fmt.Sprintf("%d", p) +} + +// Mode is an enum representing an ipsec transport. +type Mode uint8 + +const ( + XFRM_MODE_TRANSPORT Mode = iota + XFRM_MODE_TUNNEL + XFRM_MODE_ROUTEOPTIMIZATION + XFRM_MODE_IN_TRIGGER + XFRM_MODE_BEET + XFRM_MODE_MAX +) + +func (m Mode) String() string { + switch m { + case XFRM_MODE_TRANSPORT: + return "transport" + case XFRM_MODE_TUNNEL: + return "tunnel" + case XFRM_MODE_ROUTEOPTIMIZATION: + return "ro" + case XFRM_MODE_IN_TRIGGER: + return "in_trigger" + case XFRM_MODE_BEET: + return "beet" + } + return fmt.Sprintf("%d", m) +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_policy.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_policy.go new file mode 100644 index 000000000..d85c65d2d --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_policy.go @@ -0,0 +1,59 @@ +package netlink + +import ( + "fmt" + "net" +) + +// Dir is an enum representing an ipsec template direction. +type Dir uint8 + +const ( + XFRM_DIR_IN Dir = iota + XFRM_DIR_OUT + XFRM_DIR_FWD + XFRM_SOCKET_IN + XFRM_SOCKET_OUT + XFRM_SOCKET_FWD +) + +func (d Dir) String() string { + switch d { + case XFRM_DIR_IN: + return "dir in" + case XFRM_DIR_OUT: + return "dir out" + case XFRM_DIR_FWD: + return "dir fwd" + case XFRM_SOCKET_IN: + return "socket in" + case XFRM_SOCKET_OUT: + return "socket out" + case XFRM_SOCKET_FWD: + return "socket fwd" + } + return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN) +} + +// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec +// policy. These rules are matched with XfrmState to determine encryption +// and authentication algorithms. +type XfrmPolicyTmpl struct { + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Reqid int +} + +// XfrmPolicy represents an ipsec policy. It represents the overlay network +// and has a list of XfrmPolicyTmpls representing the base addresses of +// the policy. +type XfrmPolicy struct { + Dst *net.IPNet + Src *net.IPNet + Dir Dir + Priority int + Index int + Tmpls []XfrmPolicyTmpl +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go new file mode 100644 index 000000000..2daf6dc8b --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -0,0 +1,127 @@ +package netlink + +import ( + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) { + sel.Family = uint16(nl.GetIPFamily(policy.Dst.IP)) + sel.Daddr.FromIP(policy.Dst.IP) + sel.Saddr.FromIP(policy.Src.IP) + prefixlenD, _ := policy.Dst.Mask.Size() + sel.PrefixlenD = uint8(prefixlenD) + prefixlenS, _ := policy.Src.Mask.Size() + sel.PrefixlenS = uint8(prefixlenS) +} + +// XfrmPolicyAdd will add an xfrm policy to the system. +// Equivalent to: `ip xfrm policy add $policy` +func XfrmPolicyAdd(policy *XfrmPolicy) error { + req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWPOLICY, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := &nl.XfrmUserpolicyInfo{} + selFromPolicy(&msg.Sel, policy) + msg.Priority = uint32(policy.Priority) + msg.Index = uint32(policy.Index) + msg.Dir = uint8(policy.Dir) + msg.Lft.SoftByteLimit = nl.XFRM_INF + msg.Lft.HardByteLimit = nl.XFRM_INF + msg.Lft.SoftPacketLimit = nl.XFRM_INF + msg.Lft.HardPacketLimit = nl.XFRM_INF + req.AddData(msg) + + tmplData := make([]byte, nl.SizeofXfrmUserTmpl*len(policy.Tmpls)) + for i, tmpl := range policy.Tmpls { + start := i * nl.SizeofXfrmUserTmpl + userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl]) + userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst) + userTmpl.Saddr.FromIP(tmpl.Src) + userTmpl.XfrmId.Proto = uint8(tmpl.Proto) + userTmpl.Mode = uint8(tmpl.Mode) + userTmpl.Reqid = uint32(tmpl.Reqid) + userTmpl.Aalgos = ^uint32(0) + userTmpl.Ealgos = ^uint32(0) + userTmpl.Calgos = ^uint32(0) + } + if len(tmplData) > 0 { + tmpls := nl.NewRtAttr(nl.XFRMA_TMPL, tmplData) + req.AddData(tmpls) + } + + _, err := req.Execute(syscall.NETLINK_XFRM, 0) + return err +} + +// XfrmPolicyDel will delete an xfrm policy from the system. Note that +// the Tmpls are ignored when matching the policy to delete. +// Equivalent to: `ip xfrm policy del $policy` +func XfrmPolicyDel(policy *XfrmPolicy) error { + req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELPOLICY, syscall.NLM_F_ACK) + + msg := &nl.XfrmUserpolicyId{} + selFromPolicy(&msg.Sel, policy) + msg.Index = uint32(policy.Index) + msg.Dir = uint8(policy.Dir) + req.AddData(msg) + + _, err := req.Execute(syscall.NETLINK_XFRM, 0) + return err +} + +// XfrmPolicyList gets a list of xfrm policies in the system. +// Equivalent to: `ip xfrm policy show`. +// The list can be filtered by ip family. +func XfrmPolicyList(family int) ([]XfrmPolicy, error) { + req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP) + + msg := nl.NewIfInfomsg(family) + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) + if err != nil { + return nil, err + } + + var res []XfrmPolicy + for _, m := range msgs { + msg := nl.DeserializeXfrmUserpolicyInfo(m) + + if family != FAMILY_ALL && family != int(msg.Sel.Family) { + continue + } + + var policy XfrmPolicy + + policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD) + policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS) + policy.Priority = int(msg.Priority) + policy.Index = int(msg.Index) + policy.Dir = Dir(msg.Dir) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.XFRMA_TMPL: + max := len(attr.Value) + for i := 0; i < max; i += nl.SizeofXfrmUserTmpl { + var resTmpl XfrmPolicyTmpl + tmpl := nl.DeserializeXfrmUserTmpl(attr.Value[i : i+nl.SizeofXfrmUserTmpl]) + resTmpl.Dst = tmpl.XfrmId.Daddr.ToIP() + resTmpl.Src = tmpl.Saddr.ToIP() + resTmpl.Proto = Proto(tmpl.XfrmId.Proto) + resTmpl.Mode = Mode(tmpl.Mode) + resTmpl.Reqid = int(tmpl.Reqid) + policy.Tmpls = append(policy.Tmpls, resTmpl) + } + } + } + res = append(res, policy) + } + return res, nil +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_policy_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_policy_test.go new file mode 100644 index 000000000..06d178d1f --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_policy_test.go @@ -0,0 +1,49 @@ +package netlink + +import ( + "net" + "testing" +) + +func TestXfrmPolicyAddDel(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + src, _ := ParseIPNet("127.1.1.1/32") + dst, _ := ParseIPNet("127.1.1.2/32") + policy := XfrmPolicy{ + Src: src, + Dst: dst, + Dir: XFRM_DIR_OUT, + } + tmpl := XfrmPolicyTmpl{ + Src: net.ParseIP("127.0.0.1"), + Dst: net.ParseIP("127.0.0.2"), + Proto: XFRM_PROTO_ESP, + Mode: XFRM_MODE_TUNNEL, + } + policy.Tmpls = append(policy.Tmpls, tmpl) + if err := XfrmPolicyAdd(&policy); err != nil { + t.Fatal(err) + } + policies, err := XfrmPolicyList(FAMILY_ALL) + if err != nil { + t.Fatal(err) + } + + if len(policies) != 1 { + t.Fatal("Policy not added properly") + } + + if err = XfrmPolicyDel(&policy); err != nil { + t.Fatal(err) + } + + policies, err = XfrmPolicyList(FAMILY_ALL) + if err != nil { + t.Fatal(err) + } + if len(policies) != 0 { + t.Fatal("Policy not removed properly") + } +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_state.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_state.go new file mode 100644 index 000000000..5b8f2df70 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_state.go @@ -0,0 +1,53 @@ +package netlink + +import ( + "net" +) + +// XfrmStateAlgo represents the algorithm to use for the ipsec encryption. +type XfrmStateAlgo struct { + Name string + Key []byte + TruncateLen int // Auth only +} + +// EncapType is an enum representing an ipsec template direction. +type EncapType uint8 + +const ( + XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1 + XFRM_ENCAP_ESPINUDP +) + +func (e EncapType) String() string { + switch e { + case XFRM_ENCAP_ESPINUDP_NONIKE: + return "espinudp-nonike" + case XFRM_ENCAP_ESPINUDP: + return "espinudp" + } + return "unknown" +} + +// XfrmEncap represents the encapsulation to use for the ipsec encryption. +type XfrmStateEncap struct { + Type EncapType + SrcPort int + DstPort int + OriginalAddress net.IP +} + +// XfrmState represents the state of an ipsec policy. It optionally +// contains an XfrmStateAlgo for encryption and one for authentication. +type XfrmState struct { + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Spi int + Reqid int + ReplayWindow int + Auth *XfrmStateAlgo + Crypt *XfrmStateAlgo + Encap *XfrmStateEncap +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go new file mode 100644 index 000000000..5f44ec852 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -0,0 +1,181 @@ +package netlink + +import ( + "fmt" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +func writeStateAlgo(a *XfrmStateAlgo) []byte { + algo := nl.XfrmAlgo{ + AlgKeyLen: uint32(len(a.Key) * 8), + AlgKey: a.Key, + } + end := len(a.Name) + if end > 64 { + end = 64 + } + copy(algo.AlgName[:end], a.Name) + return algo.Serialize() +} + +func writeStateAlgoAuth(a *XfrmStateAlgo) []byte { + algo := nl.XfrmAlgoAuth{ + AlgKeyLen: uint32(len(a.Key) * 8), + AlgTruncLen: uint32(a.TruncateLen), + AlgKey: a.Key, + } + end := len(a.Name) + if end > 64 { + end = 64 + } + copy(algo.AlgName[:end], a.Name) + return algo.Serialize() +} + +// XfrmStateAdd will add an xfrm state to the system. +// Equivalent to: `ip xfrm state add $state` +func XfrmStateAdd(state *XfrmState) error { + // A state with spi 0 can't be deleted so don't allow it to be set + if state.Spi == 0 { + return fmt.Errorf("Spi must be set when adding xfrm state.") + } + req := nl.NewNetlinkRequest(nl.XFRM_MSG_NEWSA, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := &nl.XfrmUsersaInfo{} + msg.Family = uint16(nl.GetIPFamily(state.Dst)) + msg.Id.Daddr.FromIP(state.Dst) + msg.Saddr.FromIP(state.Src) + msg.Id.Proto = uint8(state.Proto) + msg.Mode = uint8(state.Mode) + msg.Id.Spi = nl.Swap32(uint32(state.Spi)) + msg.Reqid = uint32(state.Reqid) + msg.ReplayWindow = uint8(state.ReplayWindow) + msg.Lft.SoftByteLimit = nl.XFRM_INF + msg.Lft.HardByteLimit = nl.XFRM_INF + msg.Lft.SoftPacketLimit = nl.XFRM_INF + msg.Lft.HardPacketLimit = nl.XFRM_INF + req.AddData(msg) + + if state.Auth != nil { + out := nl.NewRtAttr(nl.XFRMA_ALG_AUTH_TRUNC, writeStateAlgoAuth(state.Auth)) + req.AddData(out) + } + if state.Crypt != nil { + out := nl.NewRtAttr(nl.XFRMA_ALG_CRYPT, writeStateAlgo(state.Crypt)) + req.AddData(out) + } + if state.Encap != nil { + encapData := make([]byte, nl.SizeofXfrmEncapTmpl) + encap := nl.DeserializeXfrmEncapTmpl(encapData) + encap.EncapType = uint16(state.Encap.Type) + encap.EncapSport = nl.Swap16(uint16(state.Encap.SrcPort)) + encap.EncapDport = nl.Swap16(uint16(state.Encap.DstPort)) + encap.EncapOa.FromIP(state.Encap.OriginalAddress) + out := nl.NewRtAttr(nl.XFRMA_ENCAP, encapData) + req.AddData(out) + } + + _, err := req.Execute(syscall.NETLINK_XFRM, 0) + return err +} + +// XfrmStateDel will delete an xfrm state from the system. Note that +// the Algos are ignored when matching the state to delete. +// Equivalent to: `ip xfrm state del $state` +func XfrmStateDel(state *XfrmState) error { + req := nl.NewNetlinkRequest(nl.XFRM_MSG_DELSA, syscall.NLM_F_ACK) + + msg := &nl.XfrmUsersaId{} + msg.Daddr.FromIP(state.Dst) + msg.Family = uint16(nl.GetIPFamily(state.Dst)) + msg.Proto = uint8(state.Proto) + msg.Spi = nl.Swap32(uint32(state.Spi)) + req.AddData(msg) + + saddr := nl.XfrmAddress{} + saddr.FromIP(state.Src) + srcdata := nl.NewRtAttr(nl.XFRMA_SRCADDR, saddr.Serialize()) + + req.AddData(srcdata) + + _, err := req.Execute(syscall.NETLINK_XFRM, 0) + return err +} + +// XfrmStateList gets a list of xfrm states in the system. +// Equivalent to: `ip xfrm state show`. +// The list can be filtered by ip family. +func XfrmStateList(family int) ([]XfrmState, error) { + req := nl.NewNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP) + + msg := nl.NewIfInfomsg(family) + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) + if err != nil { + return nil, err + } + + var res []XfrmState + for _, m := range msgs { + msg := nl.DeserializeXfrmUsersaInfo(m) + + if family != FAMILY_ALL && family != int(msg.Family) { + continue + } + + var state XfrmState + + state.Dst = msg.Id.Daddr.ToIP() + state.Src = msg.Saddr.ToIP() + state.Proto = Proto(msg.Id.Proto) + state.Mode = Mode(msg.Mode) + state.Spi = int(nl.Swap32(msg.Id.Spi)) + state.Reqid = int(msg.Reqid) + state.ReplayWindow = int(msg.ReplayWindow) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.XFRMA_ALG_AUTH, nl.XFRMA_ALG_CRYPT: + var resAlgo *XfrmStateAlgo + if attr.Attr.Type == nl.XFRMA_ALG_AUTH { + if state.Auth == nil { + state.Auth = new(XfrmStateAlgo) + } + resAlgo = state.Auth + } else { + state.Crypt = new(XfrmStateAlgo) + resAlgo = state.Crypt + } + algo := nl.DeserializeXfrmAlgo(attr.Value[:]) + (*resAlgo).Name = nl.BytesToString(algo.AlgName[:]) + (*resAlgo).Key = algo.AlgKey + case nl.XFRMA_ALG_AUTH_TRUNC: + if state.Auth == nil { + state.Auth = new(XfrmStateAlgo) + } + algo := nl.DeserializeXfrmAlgoAuth(attr.Value[:]) + state.Auth.Name = nl.BytesToString(algo.AlgName[:]) + state.Auth.Key = algo.AlgKey + state.Auth.TruncateLen = int(algo.AlgTruncLen) + case nl.XFRMA_ENCAP: + encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:]) + state.Encap = new(XfrmStateEncap) + state.Encap.Type = EncapType(encap.EncapType) + state.Encap.SrcPort = int(nl.Swap16(encap.EncapSport)) + state.Encap.DstPort = int(nl.Swap16(encap.EncapDport)) + state.Encap.OriginalAddress = encap.EncapOa.ToIP() + } + + } + res = append(res, state) + } + return res, nil +} diff --git a/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_state_test.go b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_state_test.go new file mode 100644 index 000000000..df57ef8b7 --- /dev/null +++ b/networking/libsnnet/vendor/github.com/vishvananda/netlink/xfrm_state_test.go @@ -0,0 +1,50 @@ +package netlink + +import ( + "net" + "testing" +) + +func TestXfrmStateAddDel(t *testing.T) { + tearDown := setUpNetlinkTest(t) + defer tearDown() + + state := XfrmState{ + Src: net.ParseIP("127.0.0.1"), + Dst: net.ParseIP("127.0.0.2"), + Proto: XFRM_PROTO_ESP, + Mode: XFRM_MODE_TUNNEL, + Spi: 1, + Auth: &XfrmStateAlgo{ + Name: "hmac(sha256)", + Key: []byte("abcdefghijklmnopqrstuvwzyzABCDEF"), + }, + Crypt: &XfrmStateAlgo{ + Name: "cbc(aes)", + Key: []byte("abcdefghijklmnopqrstuvwzyzABCDEF"), + }, + } + if err := XfrmStateAdd(&state); err != nil { + t.Fatal(err) + } + policies, err := XfrmStateList(FAMILY_ALL) + if err != nil { + t.Fatal(err) + } + + if len(policies) != 1 { + t.Fatal("State not added properly") + } + + if err = XfrmStateDel(&state); err != nil { + t.Fatal(err) + } + + policies, err = XfrmStateList(FAMILY_ALL) + if err != nil { + t.Fatal(err) + } + if len(policies) != 0 { + t.Fatal("State not removed properly") + } +} diff --git a/networking/libsnnet/vnic.go b/networking/libsnnet/vnic.go new file mode 100644 index 000000000..8c3c6628a --- /dev/null +++ b/networking/libsnnet/vnic.go @@ -0,0 +1,361 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet + +import ( + "net" + "strings" + "syscall" + + "github.com/vishvananda/netlink" +) + +// NewVnic is used to initialize the Vnic properties +// This has to be called prior to Create() or GetDevice() +func NewVnic(id string) (*Vnic, error) { + Vnic := &Vnic{} + Vnic.Link = &netlink.GenericLink{} + Vnic.GlobalID = id + Vnic.Role = TenantVM + return Vnic, nil +} + +// NewContainerVnic is used to initialize a container Vnic properties +// This has to be called prior to Create() or GetDevice() +func NewContainerVnic(id string) (*Vnic, error) { + Vnic := &Vnic{} + Vnic.Link = &netlink.Veth{} + Vnic.GlobalID = id + Vnic.Role = TenantContainer + return Vnic, nil +} + +//InterfaceName is used to retrieve the name of the physical interface to +//which the VM or the container needs to be connected to +//Returns "" if the link is not setup +func (v *Vnic) InterfaceName() string { + switch v.Role { + case TenantVM: + return v.LinkName + case TenantContainer: + return v.PeerName() + default: + return "" + } +} + +//PeerName is used to retrieve the peer name +//Returns "" if the link is not setup or if the link +//has no peer +func (v *Vnic) PeerName() string { + if v.Role != TenantContainer { + return v.LinkName + } + + if strings.HasPrefix(v.LinkName, prefixVnicHost) { + return strings.Replace(v.LinkName, prefixVnicHost, prefixVnicCont, 1) + } + if strings.HasPrefix(v.LinkName, prefixVnicCont) { + return strings.Replace(v.LinkName, prefixVnicCont, prefixVnicHost, 1) + } + return "" +} + +// GetDevice is used to associate with an existing Vnic provided it satisfies +// the needs of a Vnic. Returns error if the Vnic does not exist +func (v *Vnic) GetDevice() error { + + if v.GlobalID == "" { + return netError(v, "get device unnamed vnic") + } + + link, err := netlink.LinkByAlias(v.GlobalID) + if err != nil { + return netError(v, "get device interface does not exist: %v", v.GlobalID) + } + + switch v.Role { + case TenantVM: + vl, ok := link.(*netlink.GenericLink) + if !ok { + return netError(v, "get device incorrect interface type %v %v", v.GlobalID, link.Type()) + } + + // TODO: Why do both tun and tap interfaces return the type tun + if link.Type() != "tun" { + return netError(v, "get device incorrect interface type %v %v", v.GlobalID, link.Type()) + } + + if flags := uint(link.Attrs().Flags); (flags & syscall.IFF_TAP) == 0 { + return netError(v, "get device incorrect interface type %v %v", v.GlobalID, link) + } + v.LinkName = vl.Name + v.Link = vl + case TenantContainer: + vl, ok := link.(*netlink.Veth) + if !ok { + return netError(v, "get device incorrect interface type %v %v", v.GlobalID, link.Type()) + } + v.LinkName = vl.Name + v.Link = vl + default: + return netError(v, " invalid or unsupported VNIC type %v", v.GlobalID) + } + + return nil +} + +// Create instantiates new vnic +func (v *Vnic) Create() error { + var err error + + if v.GlobalID == "" { + return netError(v, "create cannot create an unnamed vnic") + } + + switch v.Role { + case TenantVM: + case TenantContainer: + default: + return netError(v, "invalid vnic role specified") + } + + if v.LinkName == "" { + if v.LinkName, err = GenIface(v, true); err != nil { + return netError(v, "create geniface %v %v", v.GlobalID, err) + } + + if _, err := netlink.LinkByAlias(v.GlobalID); err == nil { + return netError(v, "create interface exists %v", v.GlobalID) + } + } + + switch v.Role { + case TenantVM: + + tap := &netlink.Tuntap{ + LinkAttrs: netlink.LinkAttrs{Name: v.LinkName}, + Mode: netlink.TUNTAP_MODE_TAP, + } + + if err := netlink.LinkAdd(tap); err != nil { + return netError(v, "create link add %v %v", v.GlobalID, err) + } + + link, err := netlink.LinkByName(v.LinkName) + if err != nil { + return netError(v, "create link by name %v %v", v.GlobalID, err) + } + + vl, ok := link.(*netlink.GenericLink) + if !ok { + return netError(v, "create incorrect interface type %v %v", v.GlobalID, link.Type()) + } + + v.Link = vl + case TenantContainer: + //We create only the host side veth, the container side is setup by the kernel + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: v.LinkName, + }, + PeerName: v.PeerName(), + } + + if err := netlink.LinkAdd(veth); err != nil { + return netError(v, "create link add %v %v", v.GlobalID, err) + } + + link, err := netlink.LinkByName(v.LinkName) + if err != nil { + return netError(v, "create link by name %v %v", v.GlobalID, err) + } + vl, ok := link.(*netlink.Veth) + if !ok { + return netError(v, "create incorrect interface type %v %v", v.GlobalID, link.Type()) + } + + v.Link = vl + } + + if err := v.setAlias(v.GlobalID); err != nil { + v.Destroy() + return netError(v, "create set alias %v %v", v.GlobalID, err) + } + + return nil +} + +// Destroy a vnic +func (v *Vnic) Destroy() error { + + if v.Link == nil || v.Link.Attrs().Index == 0 { + return netError(v, "destroy unnitialized") + } + + if err := netlink.LinkDel(v.Link); err != nil { + return netError(v, "destroy link del %v", err) + } + + return nil + +} + +// Attach the vnic to a bridge or a switch. Will return error if the vnic +// incapable of binding to the specified device +func (v *Vnic) Attach(dev interface{}) error { + + if v.Link == nil || v.Link.Attrs().Index == 0 { + return netError(v, "attach unnitialized") + } + + br, ok := dev.(*Bridge) + if !ok { + return netError(v, "attach device %v, %T", dev, dev) + } + + if br.Link == nil || br.Link.Index == 0 { + return netError(v, "attach bridge unnitialized") + } + + if err := netlink.LinkSetMaster(v.Link, br.Link); err != nil { + return netError(v, "attach set master %v", err) + } + + return nil +} + +// Detach the vnic from the device it is attached to +func (v *Vnic) Detach(dev interface{}) error { + + if v.Link == nil || v.Link.Attrs().Index == 0 { + return netError(v, "detach unnitialized") + } + + br, ok := dev.(*Bridge) + + if !ok { + return netError(v, "detach unknown device %v, %T", dev, dev) + } + + if br.Link == nil { + return netError(v, "detach bridge unnitialized") + } + + if err := netlink.LinkSetNoMaster(v.Link); err != nil { + return netError(v, "detach set no master %v", err) + } + + return nil +} + +// Enable the vnic +func (v *Vnic) Enable() error { + + if v.Link == nil || v.Link.Attrs().Index == 0 { + return netError(v, "enable unnitialized") + } + + if err := netlink.LinkSetUp(v.Link); err != nil { + return netError(v, "enable link set set up %v", err) + } + + return nil + +} + +// Disable the vnic +func (v *Vnic) Disable() error { + + if v.Link == nil || v.Link.Attrs().Index == 0 { + return netError(v, "disable unnitialized") + } + + if err := netlink.LinkSetDown(v.Link); err != nil { + return netError(v, "disable link set down %v", err) + } + + return nil +} + +//SetMTU of the interface +func (v *Vnic) SetMTU(mtu int) error { + + if v.Link == nil || v.Link.Attrs().Index == 0 { + return netError(v, "disable unnitialized") + } + + switch v.Role { + case TenantVM: + /* Set by DHCP. */ + case TenantContainer: + /* Need to set the MTU of both ends */ + if err := netlink.LinkSetMTU(v.Link, mtu); err != nil { + return netError(v, "link set mtu %v", err) + } + peerVeth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: v.PeerName(), + }, + PeerName: v.LinkName, + } + if err := netlink.LinkSetMTU(peerVeth, mtu); err != nil { + return netError(v, "link set peer mtu %v", err) + } + } + + return nil +} + +//SetHardwareAddr of the interface +func (v *Vnic) SetHardwareAddr(addr net.HardwareAddr) error { + + if v.Link == nil || v.Link.Attrs().Index == 0 { + return netError(v, "disable unnitialized") + } + + switch v.Role { + case TenantVM: + /* Set by QEMU. */ + case TenantContainer: + /* Need to set the MAC on the container side */ + peerVeth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: v.PeerName(), + }, + PeerName: v.LinkName, + } + if err := netlink.LinkSetHardwareAddr(peerVeth, addr); err != nil { + return netError(v, "link set peer mtu %v", err) + } + } + + return nil +} + +func (v *Vnic) setAlias(alias string) error { + + if v.Link == nil || v.Link.Attrs().Index == 0 { + return netError(v, "set alias unnitialized") + } + + if err := netlink.LinkSetAlias(v.Link, alias); err != nil { + return netError(v, "link set alias %v %v", alias, err) + } + + return nil +} diff --git a/networking/libsnnet/vnic_test.go b/networking/libsnnet/vnic_test.go new file mode 100644 index 000000000..c24c93770 --- /dev/null +++ b/networking/libsnnet/vnic_test.go @@ -0,0 +1,357 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package libsnnet_test + +import ( + "strings" + "testing" + + "github.com/01org/ciao/networking/libsnnet" +) + +//Tests all the basic VNIC primitives +// +//Tests for creation, get, enable, disable and destroy +//primitives. If any of these fail, it may be a issue +//with the underlying netlink or kernel dependencies +// +//Test is expected to pass +func TestVnic_Basic(t *testing.T) { + + vnic, _ := libsnnet.NewVnic("testvnic") + + if err := vnic.Create(); err != nil { + t.Errorf("Vnic creation failed: %v", err) + } + + vnic1, _ := libsnnet.NewVnic("testvnic") + + if err := vnic1.GetDevice(); err != nil { + t.Errorf("Vnic Get Device failed: %v", err) + } + + if err := vnic.Enable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := vnic.Disable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := vnic.Destroy(); err != nil { + t.Errorf("Vnic deletion failed: %v", err) + } + +} + +//Tests all the basic Container VNIC primitives +// +//Tests for creation, get, enable, disable and destroy +//primitives. If any of these fail, it may be a issue +//with the underlying netlink or kernel dependencies +// +//Test is expected to pass +func TestVnicContainer_Basic(t *testing.T) { + + vnic, _ := libsnnet.NewContainerVnic("testvnic") + + if err := vnic.Create(); err != nil { + t.Errorf("Vnic creation failed: %v", err) + } + + vnic1, _ := libsnnet.NewContainerVnic("testvnic") + + if err := vnic1.GetDevice(); err != nil { + t.Errorf("Vnic Get Device failed: %v", err) + } + + if err := vnic.Enable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := vnic.Disable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := vnic.Destroy(); err != nil { + t.Errorf("Vnic deletion failed: %v", err) + } + +} + +//Duplicate VNIC creation detection +// +//Checks if the VNIC create primitive fails gracefully +//on a duplicate VNIC creation +// +//Test is expected to pass +func TestVnic_Dup(t *testing.T) { + vnic, _ := libsnnet.NewVnic("testvnic") + + if err := vnic.Create(); err != nil { + t.Errorf("Vnic creation failed: %v", err) + } + + defer vnic.Destroy() + + vnic1, _ := libsnnet.NewVnic("testvnic") + + if err := vnic1.Create(); err == nil { + t.Errorf("Duplicate Vnic creation: %v", err) + } + +} + +//Duplicate Container VNIC creation detection +// +//Checks if the VNIC create primitive fails gracefully +//on a duplicate VNIC creation +// +//Test is expected to pass +func TestVnicContainer_Dup(t *testing.T) { + vnic, _ := libsnnet.NewVnic("testconvnic") + + if err := vnic.Create(); err != nil { + t.Errorf("Vnic creation failed: %v", err) + } + + defer vnic.Destroy() + + vnic1, _ := libsnnet.NewVnic("testconvnic") + + if err := vnic1.Create(); err == nil { + t.Errorf("Duplicate Vnic creation: %v", err) + } + +} + +//Negative test case for VNIC primitives +// +//Simulates various error scenarios and ensures that +//they are handled gracefully +// +//Test is expected to pass +func TestVnic_Invalid(t *testing.T) { + vnic, err := libsnnet.NewVnic("testvnic") + + if err = vnic.GetDevice(); err == nil { + t.Errorf("Non existent device: %v", vnic) + } + if !strings.HasPrefix(err.Error(), "vnic error") { + t.Errorf("Invalid error format %v", err) + } + + if err = vnic.Enable(); err == nil { + t.Errorf("Non existent device: %v", vnic) + } + if !strings.HasPrefix(err.Error(), "vnic error") { + t.Errorf("Invalid error format %v", err) + } + + if err = vnic.Disable(); err == nil { + t.Errorf("Non existent device: %v", vnic) + } + if !strings.HasPrefix(err.Error(), "vnic error") { + t.Errorf("Invalid error format %v", err) + } + + if err = vnic.Destroy(); err == nil { + t.Errorf("Non existent device: %v", vnic) + } + if !strings.HasPrefix(err.Error(), "vnic error") { + t.Errorf("Invalid error format %v", err) + } + +} + +//Negative test case for Container VNIC primitives +// +//Simulates various error scenarios and ensures that +//they are handled gracefully +// +//Test is expected to pass +func TestVnicContainer_Invalid(t *testing.T) { + vnic, err := libsnnet.NewContainerVnic("testcvnic") + + if err = vnic.GetDevice(); err == nil { + t.Errorf("Non existent device: %v", vnic) + } + if !strings.HasPrefix(err.Error(), "vnic error") { + t.Errorf("Invalid error format %v", err) + } + + if err = vnic.Enable(); err == nil { + t.Errorf("Non existent device: %v", vnic) + } + if !strings.HasPrefix(err.Error(), "vnic error") { + t.Errorf("Invalid error format %v", err) + } + + if err = vnic.Disable(); err == nil { + t.Errorf("Non existent device: %v", vnic) + } + if !strings.HasPrefix(err.Error(), "vnic error") { + t.Errorf("Invalid error format %v", err) + } + + if err = vnic.Destroy(); err == nil { + t.Errorf("Non existent device: %v", vnic) + } + if !strings.HasPrefix(err.Error(), "vnic error") { + t.Errorf("Invalid error format %v", err) + } + +} + +//Test ability to attach to an existing Vnic +// +//Tests the the ability to attach to an existing +//vnic and perform all VNIC operations on it +// +//Test is expected to pass +func TestVnic_GetDevice(t *testing.T) { + vnic1, _ := libsnnet.NewVnic("testvnic") + + if err := vnic1.Create(); err != nil { + t.Errorf("Vnic creation failed: %v", err) + } + + vnic, _ := libsnnet.NewVnic("testvnic") + + if err := vnic.GetDevice(); err != nil { + t.Errorf("Vnic Get Device failed: %v", err) + } + + if err := vnic.Enable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := vnic.Disable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := vnic.Destroy(); err != nil { + t.Errorf("Vnic deletion failed: %v", err) + } +} + +//Test ability to attach to an existing Container Vnic +// +//Tests the the ability to attach to an existing +//vnic and perform all VNIC operations on it +// +//Test is expected to pass +func TestVnicContainer_GetDevice(t *testing.T) { + vnic1, _ := libsnnet.NewContainerVnic("testvnic") + + if err := vnic1.Create(); err != nil { + t.Errorf("Vnic creation failed: %v", err) + } + + vnic, _ := libsnnet.NewContainerVnic("testvnic") + + if err := vnic.GetDevice(); err != nil { + t.Errorf("Vnic Get Device failed: %v", err) + } + + if err := vnic.Enable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := vnic.Disable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := vnic.Destroy(); err != nil { + t.Errorf("Vnic deletion failed: %v", err) + } +} + +//Tests VNIC attach to a bridge +// +//Tests all interactions between VNIC and Bridge +// +//Test is expected to pass +func TestVnic_Bridge(t *testing.T) { + vnic, _ := libsnnet.NewVnic("testvnic") + bridge, _ := libsnnet.NewBridge("testbridge") + + if err := vnic.Create(); err != nil { + t.Errorf("Vnic Create failed: %v", err) + } + + defer vnic.Destroy() + + if err := bridge.Create(); err != nil { + t.Errorf("Vnic Create failed: %v", err) + } + defer bridge.Destroy() + + if err := vnic.Attach(bridge); err != nil { + t.Errorf("Vnic attach failed: %v", err) + } + + if err := vnic.Enable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := bridge.Enable(); err != nil { + t.Errorf("Vnic deletion failed: %v", err) + } + + if err := vnic.Detach(bridge); err != nil { + t.Errorf("Vnic detach failed: %v", err) + } + +} + +//Tests Container VNIC attach to a bridge +// +//Tests all interactions between VNIC and Bridge +// +//Test is expected to pass +func TestVnicContainer_Bridge(t *testing.T) { + vnic, _ := libsnnet.NewContainerVnic("testvnic") + bridge, _ := libsnnet.NewBridge("testbridge") + + if err := vnic.Create(); err != nil { + t.Errorf("Vnic Create failed: %v", err) + } + + defer vnic.Destroy() + + if err := bridge.Create(); err != nil { + t.Errorf("Vnic Create failed: %v", err) + } + defer bridge.Destroy() + + if err := vnic.Attach(bridge); err != nil { + t.Errorf("Vnic attach failed: %v", err) + } + + if err := vnic.Enable(); err != nil { + t.Errorf("Vnic enable failed: %v", err) + } + + if err := bridge.Enable(); err != nil { + t.Errorf("Vnic deletion failed: %v", err) + } + + if err := vnic.Detach(bridge); err != nil { + t.Errorf("Vnic detach failed: %v", err) + } +} diff --git a/payloads/assignpublicIP.go b/payloads/assignpublicIP.go new file mode 100644 index 000000000..5e765112d --- /dev/null +++ b/payloads/assignpublicIP.go @@ -0,0 +1,80 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type PublicIPCommand struct { + ConcentratorUUID string `yaml:"concentrator_uuid"` + TenantUUID string `yaml:"tenant_uuid"` + InstanceUUID string `yaml:"instance_uuid"` + PublicIP string `yaml:"public_ip"` + PrivateIP string `yaml:"private_ip"` + VnicMAC string `yaml:"vnic_mac"` +} + +type CommandAssignPublicIP struct { + AssignIP PublicIPCommand `yaml:"assign_public_ip"` +} + +type CommandReleasePublicIP struct { + ReleaseIP PublicIPCommand `yaml:"release_public_ip"` +} + +type PublicIPFailureReason string + +const ( + PublicIPNoInstance PublicIPFailureReason = "no_instance" + PublicIPInvalidPayload = "invalid_payload" + PublicIPInvalidData = "invalid_data" + PublicIPAssignFailure = "assign_failure" + PublicIPReleaseFailure = "release_failure" +) + +type ErrorPublicIPFailure struct { + ConcentratorUUID string `yaml:"concentrator_uuid"` + TenantUUID string `yaml:"tenant_uuid"` + InstanceUUID string `yaml:"instance_uuid"` + PublicIP string `yaml:"public_ip"` + PrivateIP string `yaml:"private_ip"` + VnicMAC string `yaml:"vnic_mac"` + Reason PublicIPFailureReason `yaml:"reason"` +} + +func (s *ErrorPublicIPFailure) Init() { + s.ConcentratorUUID = "" + s.TenantUUID = "" + s.InstanceUUID = "" + s.Reason = "" + s.PublicIP = "" + s.PrivateIP = "" + s.VnicMAC = "" +} + +func (r PublicIPFailureReason) String() string { + switch r { + case PublicIPNoInstance: + return "Instance does not exist" + case PublicIPInvalidPayload: + return "YAML payload is corrupt" + case PublicIPInvalidData: + return "Command section of YAML payload is corrupt or missing required information" + case PublicIPAssignFailure: + return "Public IP assignment operation_failed" + case PublicIPReleaseFailure: + return "Public IP release operation_failed" + } + return "" +} diff --git a/payloads/assignpublicIP_test.go b/payloads/assignpublicIP_test.go new file mode 100644 index 000000000..fa1ddc279 --- /dev/null +++ b/payloads/assignpublicIP_test.go @@ -0,0 +1,151 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "testing" + + "gopkg.in/yaml.v2" +) + +const instancePublicIP = "10.1.2.3" +const instancePrivateIP = "192.168.1.2" +const vnicMAC = "aa:bb:cc:01:02:03" + +const assignIPYaml = "" + + "assign_public_ip:\n" + + " concentrator_uuid: " + cnciUUID + "\n" + + " tenant_uuid: " + tenantUUID + "\n" + + " instance_uuid: " + instanceUUID + "\n" + + " public_ip: " + instancePublicIP + "\n" + + " private_ip: " + instancePrivateIP + "\n" + + " vnic_mac: " + vnicMAC + "\n" + +const releaseIPYaml = "" + + "release_public_ip:\n" + + " concentrator_uuid: " + cnciUUID + "\n" + + " tenant_uuid: " + tenantUUID + "\n" + + " instance_uuid: " + instanceUUID + "\n" + + " public_ip: " + instancePublicIP + "\n" + + " private_ip: " + instancePrivateIP + "\n" + + " vnic_mac: " + vnicMAC + "\n" + +func TestAssignPublicIPUnmarshal(t *testing.T) { + var assignIP CommandAssignPublicIP + + err := yaml.Unmarshal([]byte(assignIPYaml), &assignIP) + if err != nil { + t.Error(err) + } + + if assignIP.AssignIP.ConcentratorUUID != cnciUUID { + t.Errorf("Wrong concentrator UUID field [%s]", assignIP.AssignIP.ConcentratorUUID) + } + + if assignIP.AssignIP.TenantUUID != tenantUUID { + t.Errorf("Wrong tenant UUID field [%s]", assignIP.AssignIP.TenantUUID) + } + + if assignIP.AssignIP.InstanceUUID != instanceUUID { + t.Errorf("Wrong instance UUID field [%s]", assignIP.AssignIP.InstanceUUID) + } + + if assignIP.AssignIP.PublicIP != instancePublicIP { + t.Errorf("Wrong public IP field [%s]", assignIP.AssignIP.PublicIP) + } + + if assignIP.AssignIP.PrivateIP != instancePrivateIP { + t.Errorf("Wrong private IP field [%s]", assignIP.AssignIP.PrivateIP) + } + + if assignIP.AssignIP.VnicMAC != vnicMAC { + t.Errorf("Wrong VNIC MAC field [%s]", assignIP.AssignIP.VnicMAC) + } +} + +func TestReleasePublicIPUnmarshal(t *testing.T) { + var releaseIP CommandReleasePublicIP + + err := yaml.Unmarshal([]byte(releaseIPYaml), &releaseIP) + if err != nil { + t.Error(err) + } + + if releaseIP.ReleaseIP.ConcentratorUUID != cnciUUID { + t.Errorf("Wrong concentrator UUID field [%s]", releaseIP.ReleaseIP.ConcentratorUUID) + } + + if releaseIP.ReleaseIP.TenantUUID != tenantUUID { + t.Errorf("Wrong tenant UUID field [%s]", releaseIP.ReleaseIP.TenantUUID) + } + + if releaseIP.ReleaseIP.InstanceUUID != instanceUUID { + t.Errorf("Wrong instance UUID field [%s]", releaseIP.ReleaseIP.InstanceUUID) + } + + if releaseIP.ReleaseIP.PublicIP != instancePublicIP { + t.Errorf("Wrong public IP field [%s]", releaseIP.ReleaseIP.PublicIP) + } + + if releaseIP.ReleaseIP.PrivateIP != instancePrivateIP { + t.Errorf("Wrong private IP field [%s]", releaseIP.ReleaseIP.PrivateIP) + } + + if releaseIP.ReleaseIP.VnicMAC != vnicMAC { + t.Errorf("Wrong VNIC MAC field [%s]", releaseIP.ReleaseIP.VnicMAC) + } +} + +func TestAssignPublicIPMarshal(t *testing.T) { + var assignIP CommandAssignPublicIP + + assignIP.AssignIP.ConcentratorUUID = cnciUUID + assignIP.AssignIP.TenantUUID = tenantUUID + assignIP.AssignIP.InstanceUUID = instanceUUID + assignIP.AssignIP.PublicIP = instancePublicIP + assignIP.AssignIP.PrivateIP = instancePrivateIP + assignIP.AssignIP.VnicMAC = vnicMAC + + y, err := yaml.Marshal(&assignIP) + if err != nil { + t.Error(err) + } + + if string(y) != assignIPYaml { + t.Errorf("AssignPublicIP marshalling failed\n[%s]\n vs\n[%s]", string(y), assignIPYaml) + } +} + +func TestReleasePublicIPMarshal(t *testing.T) { + var releaseIP CommandReleasePublicIP + + releaseIP.ReleaseIP.ConcentratorUUID = cnciUUID + releaseIP.ReleaseIP.TenantUUID = tenantUUID + releaseIP.ReleaseIP.InstanceUUID = instanceUUID + releaseIP.ReleaseIP.PublicIP = instancePublicIP + releaseIP.ReleaseIP.PrivateIP = instancePrivateIP + releaseIP.ReleaseIP.VnicMAC = vnicMAC + + y, err := yaml.Marshal(&releaseIP) + if err != nil { + t.Error(err) + } + + if string(y) != releaseIPYaml { + t.Errorf("ReleasePublicIP marshalling failed\n[%s]\n vs\n[%s]", string(y), releaseIPYaml) + } +} diff --git a/payloads/cnciinstance.go b/payloads/cnciinstance.go new file mode 100644 index 000000000..ad4b115a4 --- /dev/null +++ b/payloads/cnciinstance.go @@ -0,0 +1,21 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type CNCIInstanceConfig struct { + SchedulerAddr string `yaml:"scheduler_addr"` +} diff --git a/payloads/cnciinstance_test.go b/payloads/cnciinstance_test.go new file mode 100644 index 000000000..3c614e715 --- /dev/null +++ b/payloads/cnciinstance_test.go @@ -0,0 +1,50 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "testing" + + "gopkg.in/yaml.v2" +) + +const cnciInstanceData = "scheduler_addr: 192.168.42.5\n" + +func TestCNCIInstanceUnmarshal(t *testing.T) { + var config CNCIInstanceConfig + err := yaml.Unmarshal([]byte(cnciInstanceData), &config) + if err != nil { + t.Error(err) + } + + if config.SchedulerAddr != "192.168.42.5" { + t.Errorf("Wrong ADDR field [%s]", config.SchedulerAddr) + } +} + +func TestCNCIInstanceMarshal(t *testing.T) { + config := CNCIInstanceConfig{SchedulerAddr: "192.168.42.5"} + + y, err := yaml.Marshal(&config) + if err != nil { + t.Error(err) + } + + if string(y) != cnciInstanceData { + t.Errorf("CNCIInstance marshalling failed\n[%s]\n vs\n[%s]", string(y), cnciInstanceData) + } +} diff --git a/payloads/compute.go b/payloads/compute.go new file mode 100644 index 000000000..6931bd3a6 --- /dev/null +++ b/payloads/compute.go @@ -0,0 +1,230 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "time" +) + +type PrivateAddresses struct { + Addr string `json:"addr"` + OSEXTIPSMACMacAddr string `json:"OS-EXT-IPS-MAC:mac_addr"` + OSEXTIPSType string `json:"OS-EXT-IPS:type"` + Version int `json:"version"` +} + +type Addresses struct { + Private []PrivateAddresses `json:"private"` +} + +type Link struct { + Href string `json:"href"` + Rel string `json:"rel"` +} + +type Flavor struct { + ID string `json:"id"` + Links []Link `json:"links"` +} + +type Image struct { + ID string `json:"id"` + Links []Link `json:"links"` +} + +type SecurityGroup struct { + Name string `json:"name"` +} + +type Server struct { + Addresses Addresses `json:"addresses"` + Created time.Time `json:"created"` + Flavor Flavor `json:"flavor"` + HostID string `json:"hostId"` + ID string `json:"id"` + Image Image `json:"image"` + KeyName string `json:"key_name"` + Links []Link `json:"links"` + Name string `json:"name"` + AccessIPv4 string `json:"accessIPv4"` + AccessIPv6 string `json:"accessIPv6"` + ConfigDrive string `json:"config_drive"` + OSDCFDiskConfig string `json:"OS-DCF:diskConfig"` + OSEXTAZAvailabilityZone string `json:"OS-EXT-AZ:availability_zone"` + OSEXTSRVATTRHost string `json:"OS-EXT-SRV-ATTR:host"` + OSEXTSRVATTRHypervisorHostname string `json:"OS-EXT-SRV-ATTR:hypervisor_hostname"` + OSEXTSRVATTRInstanceName string `json:"OS-EXT-SRV-ATTR:instance_name"` + OSEXTSTSPowerState int `json:"OS-EXT-STS:power_state"` + OSEXTSTSTaskState string `json:"OS-EXT-STS:task_state"` + OSEXTSTSVMState string `json:"OS-EXT-STS:vm_state"` + OsExtendedVolumesVolumesAttached []string `json:"os-extended-volumes:volumes_attached"` + OSSRVUSGLaunchedAt time.Time `json:"OS-SRV-USG:launched_at"` + OSSRVUSGTerminatedAt time.Time `json:"OS-SRV-USG:terminated_at"` + Progress int `json:"progress"` + SecurityGroups []SecurityGroup `json:"security_groups"` + Status string `json:"status"` + HostStatus string `json:"host_status"` + TenantID string `json:"tenant_id"` + Updated time.Time `json:"updated"` + UserID string `json:"user_id"` + SSHIP string `json:"ssh_ip"` + SSHPort int `json:"ssh_port"` +} + +type ComputeServers struct { + Servers []Server `json:"servers"` +} + +type ComputeServer struct { + Server Server `json:"server"` +} + +type ComputeFlavors struct { + Flavors []struct { + ID string `json:"id"` + Links []Link `json:"links"` + Name string `json:"name"` + } `json:"flavors"` +} + +type FlavorDetails struct { + OSFLVDISABLEDDisabled bool `json:"OS-FLV-DISABLED:disabled"` + Disk string `json:"disk"` /* OpenStack API says this is an int */ + OSFLVEXTDATAEphemeral int `json:"OS-FLV-EXT-DATA:ephemeral"` + OsFlavorAccessIsPublic bool `json:"os-flavor-access:is_public"` + ID string `json:"id"` + Links []Link `json:"links"` + Name string `json:"name"` + RAM int `json:"ram"` + Swap string `json:"swap"` + Vcpus int `json:"vcpus"` +} + +type ComputeFlavorDetails struct { + Flavor FlavorDetails `json:"flavor"` +} + +type ComputeCreateServer struct { + Server struct { + Name string `json:"name"` + Image string `json:"imageRef"` + Workload string `json:"flavorRef"` + MaxInstances int `json:"max_count"` + MinInstances int `json:"min_count"` + } `json:"server"` +} + +type CiaoComputeTenants struct { + Tenants []struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"tenants"` +} + +type CiaoComputeNode struct { + ID string `json:"id"` + Timestamp time.Time `json:"updated"` + Status string `json:"status"` + MemTotal int `json:"ram_total"` + MemAvailable int `json:"ram_available"` + DiskTotal int `json:"disk_total"` + DiskAvailable int `json:"disk_available"` + Load int `json:"load"` + OnlineCPUs int `json:"online_cpus"` + TotalInstances int `json:"total_instances"` + TotalRunningInstances int `json:"total_running_instances"` + TotalPendingInstances int `json:"total_pending_instances"` + TotalPausedInstances int `json:"total_paused_instances"` +} + +type CiaoComputeNodes struct { + Nodes []CiaoComputeNode `json:"nodes"` +} + +type CiaoTenantResources struct { + ID string `json:"id"` + Timestamp time.Time `json:"updated"` + InstanceLimit int `json:"instances_limit"` + InstanceUsage int `json:"instances_usage"` + VCPULimit int `json:"cpus_limit"` + VCPUUsage int `json:"cpus_usage"` + MemLimit int `json:"ram_limit"` + MemUsage int `json:"ram_usage"` + DiskLimit int `json:"disk_limit"` + DiskUsage int `json:"disk_usage"` +} + +type CiaoUsage struct { + VCPU int `json:"cpus_usage"` + Memory int `json:"ram_usage"` + Disk int `json:"disk_usage"` + Timestamp time.Time `json:"timestamp"` +} + +type CiaoUsageHistory struct { + Usages []CiaoUsage `json: usage` +} + +type CiaoCNCISubnet struct { + Subnet string `json:"subnet_cidr"` +} + +type CiaoCNCI struct { + ID string `json:"id"` + TenantID string `json:"tenant_id"` + IPv4 string `json:"IPv4"` + Geography string `json:"geography"` + Subnets []CiaoCNCISubnet `json:"subnets"` +} + +type CiaoCNCIDetail struct { + CiaoCNCI `json:"cnci"` +} + +type CiaoCNCIs struct { + CNCIs []CiaoCNCI `json:"cncis"` +} + +type CiaoServerStats struct { + ID string `json:"id"` + NodeID string `json:"node_id"` + Timestamp time.Time `json:"updated"` + Status string `json:"status"` + TenantID string `json:"tenant_id"` + IPv4 string `json:"IPv4"` + VCPUUsage int `json:"cpus_usage"` + MemUsage int `json:"ram_usage"` + DiskUsage int `json:"disk_usage"` +} + +type CiaoServersStats struct { + Servers []CiaoServerStats `json:"servers"` +} + +type CiaoClusterStatus struct { + Status struct { + TotalNodes int `json:"total_nodes"` + TotalNodesReady int `json:"total_nodes_ready"` + TotalNodesFull int `json:"total_nodes_full"` + TotalNodesOffline int `json:"total_nodes_offline"` + TotalNodesMaintenance int `json:"total_nodes_maintenance"` + } `json:"cluster"` +} + +type CNCIDetail struct { + IPv4 string `json:"IPv4"` +} diff --git a/payloads/concentratorinstanceadded.go b/payloads/concentratorinstanceadded.go new file mode 100644 index 000000000..9ec0b03c1 --- /dev/null +++ b/payloads/concentratorinstanceadded.go @@ -0,0 +1,28 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type ConcentratorInstanceAddedEvent struct { + InstanceUUID string `yaml:"instance_uuid"` + TenantUUID string `yaml:"tenant_uuid"` + ConcentratorIP string `yaml:"concentrator_ip"` + ConcentratorMAC string `yaml:"concentrator_mac"` +} + +type EventConcentratorInstanceAdded struct { + CNCIAdded ConcentratorInstanceAddedEvent `yaml:"concentrator_instance_added"` +} diff --git a/payloads/concentratorinstanceadded_test.go b/payloads/concentratorinstanceadded_test.go new file mode 100644 index 000000000..c7bb8c56b --- /dev/null +++ b/payloads/concentratorinstanceadded_test.go @@ -0,0 +1,78 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "testing" + + "gopkg.in/yaml.v2" +) + +const cnciUUID = "3390740c-dce9-48d6-b83a-a717417072ce" +const tenantUUID = "2491851d-dce9-48d6-b83a-a717417072ce" +const cnciIP = "10.1.2.3" +const cnciMAC = "CA:FE:C0:00:01:02" + +const cnciAddedYaml = "" + + "concentrator_instance_added:\n" + + " instance_uuid: " + cnciUUID + "\n" + + " tenant_uuid: " + tenantUUID + "\n" + + " concentrator_ip: " + cnciIP + "\n" + + " concentrator_mac: " + cnciMAC + "\n" + +func TestConcentratorAddedUnmarshal(t *testing.T) { + var cnciAdded EventConcentratorInstanceAdded + + err := yaml.Unmarshal([]byte(cnciAddedYaml), &cnciAdded) + if err != nil { + t.Error(err) + } + + if cnciAdded.CNCIAdded.InstanceUUID != cnciUUID { + t.Errorf("Wrong instance UUID field [%s]", cnciAdded.CNCIAdded.InstanceUUID) + } + + if cnciAdded.CNCIAdded.TenantUUID != tenantUUID { + t.Errorf("Wrong tenant UUID field [%s]", cnciAdded.CNCIAdded.TenantUUID) + } + + if cnciAdded.CNCIAdded.ConcentratorIP != cnciIP { + t.Errorf("Wrong CNCI IP field [%s]", cnciAdded.CNCIAdded.ConcentratorIP) + } + + if cnciAdded.CNCIAdded.ConcentratorMAC != cnciMAC { + t.Errorf("Wrong CNCI MAC field [%s]", cnciAdded.CNCIAdded.ConcentratorMAC) + } +} + +func TestConcentratorAddedMarshal(t *testing.T) { + var cnciAdded EventConcentratorInstanceAdded + + cnciAdded.CNCIAdded.InstanceUUID = cnciUUID + cnciAdded.CNCIAdded.TenantUUID = tenantUUID + cnciAdded.CNCIAdded.ConcentratorIP = cnciIP + cnciAdded.CNCIAdded.ConcentratorMAC = cnciMAC + + y, err := yaml.Marshal(&cnciAdded) + if err != nil { + t.Error(err) + } + + if string(y) != cnciAddedYaml { + t.Errorf("ConcentratorInstanceAdded marshalling failed\n[%s]\n vs\n[%s]", string(y), cnciAddedYaml) + } +} diff --git a/payloads/configure.go b/payloads/configure.go new file mode 100644 index 000000000..45d39e589 --- /dev/null +++ b/payloads/configure.go @@ -0,0 +1,49 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type ServiceType string + +const ( + Glance ServiceType = "glance" + Keystone ServiceType = "keystone" +) + +func (s ServiceType) String() string { + switch s { + case Glance: + return "glance" + case Keystone: + return "keystone" + } + + return "" +} + +type ConfigureService struct { + Type ServiceType `yaml:"type"` + URL string `yaml:"url"` +} + +type ConfigureCmd struct { + ImageService ConfigureService `yaml: image_service"` + IdentityService ConfigureService `yaml: identity_service"` +} + +type CommandConfigure struct { + Configure ConfigureCmd `yaml:"configure"` +} diff --git a/payloads/configure_test.go b/payloads/configure_test.go new file mode 100644 index 000000000..57e6b38d5 --- /dev/null +++ b/payloads/configure_test.go @@ -0,0 +1,66 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "testing" + + "gopkg.in/yaml.v2" +) + +const keystoneURL = "http://keystone.example.com" +const glanceURL = "http://glance.example.com" + +var configureYaml = "" + + "configure:\n" + + " imageservice:\n" + + " type: " + Glance.String() + "\n" + + " url: " + glanceURL + "\n" + + " identityservice:\n" + + " type: " + Keystone.String() + "\n" + + " url: " + keystoneURL + "\n" + +func TestConfigureUnmarshal(t *testing.T) { + var cfg CommandConfigure + + err := yaml.Unmarshal([]byte(configureYaml), &cfg) + if err != nil { + t.Error(err) + } + + if cfg.Configure.ImageService.Type != Glance { + t.Errorf("Wrong image service type [%s]", cfg.Configure.ImageService.Type) + } +} + +func TestConfigureMarshal(t *testing.T) { + var cfg CommandConfigure + + cfg.Configure.ImageService.Type = Glance + cfg.Configure.ImageService.URL = glanceURL + cfg.Configure.IdentityService.Type = Keystone + cfg.Configure.IdentityService.URL = keystoneURL + + y, err := yaml.Marshal(&cfg) + if err != nil { + t.Error(err) + } + + if string(y) != configureYaml { + t.Errorf("CONFIGURE marshalling failed\n[%s]\n vs\n[%s]", string(y), configureYaml) + } +} diff --git a/payloads/deletefailure.go b/payloads/deletefailure.go new file mode 100644 index 000000000..401297250 --- /dev/null +++ b/payloads/deletefailure.go @@ -0,0 +1,48 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type DeleteFailureReason string + +const ( + DeleteNoInstance DeleteFailureReason = "no_instance" + DeleteInvalidPayload = "invalid_payload" + DeleteInvalidData = "invalid_data" +) + +type ErrorDeleteFailure struct { + InstanceUUID string `yaml:"instance_uuid"` + Reason DeleteFailureReason `yaml:"reason"` +} + +func (s *ErrorDeleteFailure) Init() { + s.InstanceUUID = "" + s.Reason = "" +} + +func (r DeleteFailureReason) String() string { + switch r { + case DeleteNoInstance: + return "Instance does not exist" + case DeleteInvalidPayload: + return "YAML payload is corrupt" + case DeleteInvalidData: + return "Command section of YAML payload is corrupt or missing required information" + } + + return "" +} diff --git a/payloads/deletefailure_test.go b/payloads/deletefailure_test.go new file mode 100644 index 000000000..cfafff4e5 --- /dev/null +++ b/payloads/deletefailure_test.go @@ -0,0 +1,57 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "fmt" + "testing" + + "github.com/docker/distribution/uuid" + "gopkg.in/yaml.v2" +) + +func TestDeleteFailureUnmarshal(t *testing.T) { + deleteFailureYaml := `instance_uuid: 2400bce6-ccc8-4a45-b2aa-b5cc3790077b +reason: no_instance +` + var error ErrorDeleteFailure + err := yaml.Unmarshal([]byte(deleteFailureYaml), &error) + if err != nil { + t.Error(err) + } + + if error.InstanceUUID != "2400bce6-ccc8-4a45-b2aa-b5cc3790077b" { + t.Error("Wrong UUID field") + } + + if error.Reason != DeleteNoInstance { + t.Error("Wrong Error field") + } +} + +func TestDeleteFailureMarshal(t *testing.T) { + error := ErrorDeleteFailure{ + InstanceUUID: uuid.Generate().String(), + Reason: DeleteNoInstance, + } + + y, err := yaml.Marshal(&error) + if err != nil { + t.Error(err) + } + fmt.Println(string(y)) +} diff --git a/payloads/evacuate.go b/payloads/evacuate.go new file mode 100644 index 000000000..8264c9c9e --- /dev/null +++ b/payloads/evacuate.go @@ -0,0 +1,25 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type EvacuateCmd struct { + WorkloadAgentUUID string `yaml:"workload_agent_uuid"` +} + +type Evacuate struct { + Evacuate EvacuateCmd `yaml:"evacuate"` +} diff --git a/payloads/evacuate_test.go b/payloads/evacuate_test.go new file mode 100644 index 000000000..2c863ab20 --- /dev/null +++ b/payloads/evacuate_test.go @@ -0,0 +1,53 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "gopkg.in/yaml.v2" + "testing" +) + +const evacAgentUUID = "64803ffa-fb47-49fa-8191-15d2c34e4dd3" +const evacYaml = "" + + "evacuate:\n" + + " workload_agent_uuid: " + evacAgentUUID + "\n" + +func TestEvacMarshal(t *testing.T) { + var cmd Evacuate + cmd.Evacuate.WorkloadAgentUUID = evacAgentUUID + + y, err := yaml.Marshal(&cmd) + if err != nil { + t.Error(err) + } + + if string(y) != evacYaml { + t.Errorf("EVACUATE marshalling failed\n[%s]\n vs\n[%s]", string(y), evacYaml) + } +} + +func TestEvacUnmarshal(t *testing.T) { + var cmd Evacuate + err := yaml.Unmarshal([]byte(evacYaml), &cmd) + if err != nil { + t.Error(err) + } + + if cmd.Evacuate.WorkloadAgentUUID != evacAgentUUID { + t.Errorf("Wrong Agent UUID field [%s]", cmd.Evacuate.WorkloadAgentUUID) + } +} diff --git a/payloads/instancedeleted.go b/payloads/instancedeleted.go new file mode 100644 index 000000000..968d8f625 --- /dev/null +++ b/payloads/instancedeleted.go @@ -0,0 +1,25 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type InstanceDeletedEvent struct { + InstanceUUID string `yaml:"instance_uuid"` +} + +type EventInstanceDeleted struct { + InstanceDeleted InstanceDeletedEvent `yaml:"instance_deleted"` +} diff --git a/payloads/instancedeleted_test.go b/payloads/instancedeleted_test.go new file mode 100644 index 000000000..42aea4b2e --- /dev/null +++ b/payloads/instancedeleted_test.go @@ -0,0 +1,55 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "gopkg.in/yaml.v2" + "testing" +) + +const insDelUUID = "3390740c-dce9-48d6-b83a-a717417072ce" + +const insDelYaml = "" + + "instance_deleted:\n" + + " instance_uuid: " + insDelUUID + "\n" + +func TestInstanceDeletedUnmarshal(t *testing.T) { + var insDel EventInstanceDeleted + err := yaml.Unmarshal([]byte(insDelYaml), &insDel) + if err != nil { + t.Error(err) + } + + if insDel.InstanceDeleted.InstanceUUID != insDelUUID { + t.Errorf("Wrong instance UUID field [%s]", insDel.InstanceDeleted.InstanceUUID) + } +} + +func TestInstanceDeletedMarshal(t *testing.T) { + var insDel EventInstanceDeleted + + insDel.InstanceDeleted.InstanceUUID = instanceUUID + + y, err := yaml.Marshal(&insDel) + if err != nil { + t.Error(err) + } + + if string(y) != insDelYaml { + t.Errorf("InstanceDeleted marshalling failed\n[%s]\n vs\n[%s]", string(y), insDelYaml) + } +} diff --git a/payloads/publicIPassigned.go b/payloads/publicIPassigned.go new file mode 100644 index 000000000..4277a72e8 --- /dev/null +++ b/payloads/publicIPassigned.go @@ -0,0 +1,28 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type PublicIPEvent struct { + ConcentratorUUID string `yaml:"concentrator_uuid"` + InstanceUUID string `yaml:"instance_uuid"` + PublicIP string `yaml:"public_ip"` + PrivateIP string `yaml:"private_ip"` +} + +type EventPublicIPAssigned struct { + AssignedIP PublicIPEvent `yaml:"public_ip_assigned"` +} diff --git a/payloads/publicIPassigned_test.go b/payloads/publicIPassigned_test.go new file mode 100644 index 000000000..6defc7e02 --- /dev/null +++ b/payloads/publicIPassigned_test.go @@ -0,0 +1,72 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "gopkg.in/yaml.v2" + "testing" +) + +const assignedIPYaml = "" + + "public_ip_assigned:\n" + + " concentrator_uuid: " + cnciUUID + "\n" + + " instance_uuid: " + instanceUUID + "\n" + + " public_ip: " + instancePublicIP + "\n" + + " private_ip: " + instancePrivateIP + "\n" + +func TestPublicIPAssignedUnmarshal(t *testing.T) { + var assignedIP EventPublicIPAssigned + + err := yaml.Unmarshal([]byte(assignedIPYaml), &assignedIP) + if err != nil { + t.Error(err) + } + + if assignedIP.AssignedIP.ConcentratorUUID != cnciUUID { + t.Errorf("Wrong concentrator UUID field [%s]", assignedIP.AssignedIP.ConcentratorUUID) + } + + if assignedIP.AssignedIP.InstanceUUID != instanceUUID { + t.Errorf("Wrong instance UUID field [%s]", assignedIP.AssignedIP.InstanceUUID) + } + + if assignedIP.AssignedIP.PublicIP != instancePublicIP { + t.Errorf("Wrong public IP field [%s]", assignedIP.AssignedIP.PublicIP) + } + + if assignedIP.AssignedIP.PrivateIP != instancePrivateIP { + t.Errorf("Wrong private IP field [%s]", assignedIP.AssignedIP.PrivateIP) + } +} + +func TestPublicIPAssignedMarshal(t *testing.T) { + var assignedIP EventPublicIPAssigned + + assignedIP.AssignedIP.ConcentratorUUID = cnciUUID + assignedIP.AssignedIP.InstanceUUID = instanceUUID + assignedIP.AssignedIP.PublicIP = instancePublicIP + assignedIP.AssignedIP.PrivateIP = instancePrivateIP + + y, err := yaml.Marshal(&assignedIP) + if err != nil { + t.Error(err) + } + + if string(y) != assignedIPYaml { + t.Errorf("PublicIPAssigned marshalling failed\n[%s]\n vs\n[%s]", string(y), assignedIPYaml) + } +} diff --git a/payloads/ready.go b/payloads/ready.go new file mode 100644 index 000000000..038149703 --- /dev/null +++ b/payloads/ready.go @@ -0,0 +1,37 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type Ready struct { + NodeUUID string `yaml:"node_uuid"` + MemTotalMB int `yaml:"mem_total_mb"` + MemAvailableMB int `yaml:"mem_available_mb"` + DiskTotalMB int `yaml:"disk_total_mb"` + DiskAvailableMB int `yaml:"disk_available_mb"` + Load int `yaml:"load"` + CpusOnline int `yaml:"cpus_online"` +} + +func (s *Ready) Init() { + s.NodeUUID = "" + s.MemTotalMB = -1 + s.MemAvailableMB = -1 + s.DiskTotalMB = -1 + s.DiskAvailableMB = -1 + s.Load = -1 + s.CpusOnline = -1 +} diff --git a/payloads/ready_test.go b/payloads/ready_test.go new file mode 100644 index 000000000..4b65c25a4 --- /dev/null +++ b/payloads/ready_test.go @@ -0,0 +1,94 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "fmt" + "github.com/docker/distribution/uuid" + "gopkg.in/yaml.v2" + "testing" +) + +func TestReadyUnmarshal(t *testing.T) { + readyYaml := `node_uuid: 2400bce6-ccc8-4a45-b2aa-b5cc3790077b +mem_total_mb: 3896 +mem_available_mb: 3896 +disk_total_mb: 500000 +disk_available_mb: 256000 +load: 0 +cpus_online: 4 +` + var cmd Ready + err := yaml.Unmarshal([]byte(readyYaml), &cmd) + if err != nil { + t.Error(err) + } +} + +func TestReadyMarshal(t *testing.T) { + cmd := Ready{ + NodeUUID: uuid.Generate().String(), + MemTotalMB: 3896, + MemAvailableMB: 3896, + DiskTotalMB: 500000, + DiskAvailableMB: 256000, + Load: 0, + CpusOnline: 4, + } + + y, err := yaml.Marshal(&cmd) + if err != nil { + t.Error(err) + } + fmt.Println(string(y)) +} + +// make sure the yaml can be unmarshaled into the Ready struct +// when only some node stats are present +func TestReadyNodeNotAllStats(t *testing.T) { + readyYaml := `node_uuid: 2400bce6-ccc8-4a45-b2aa-b5cc3790077b +load: 1 +` + var cmd Ready + cmd.Init() + + err := yaml.Unmarshal([]byte(readyYaml), &cmd) + if err != nil { + t.Error(err) + } + + expectedCmd := Ready{ + NodeUUID: "2400bce6-ccc8-4a45-b2aa-b5cc3790077b", + MemTotalMB: -1, + MemAvailableMB: -1, + DiskTotalMB: -1, + DiskAvailableMB: -1, + Load: 1, + CpusOnline: -1, + } + if cmd.NodeUUID != expectedCmd.NodeUUID || + cmd.MemTotalMB != expectedCmd.MemTotalMB || + cmd.MemAvailableMB != expectedCmd.MemAvailableMB || + cmd.DiskTotalMB != expectedCmd.DiskTotalMB || + cmd.DiskAvailableMB != expectedCmd.DiskAvailableMB || + cmd.Load != expectedCmd.Load || + cmd.CpusOnline != expectedCmd.CpusOnline { + t.Error("Unexpected values in Ready") + } + + fmt.Println(cmd) +} diff --git a/payloads/restart_test.go b/payloads/restart_test.go new file mode 100644 index 000000000..4fa7a0381 --- /dev/null +++ b/payloads/restart_test.go @@ -0,0 +1,150 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "fmt" + "gopkg.in/yaml.v2" + "testing" +) + +func TestRestartUnmarshal(t *testing.T) { + restartYaml := `restart: + instance_uuid: 0e8516d7-af2f-454a-87ed-072aeb9faf53 + image_uuid: 5beea770-1ef5-4c26-8a6c-2026fbc98e37 + workload_agent_uuid: d37e8dd5-3625-42bb-97b5-05291013abad + fw_type: efi + persistence: host + requested_resources: + - type: vcpus + value: 2 + mandatory: true + - type: mem_mb + value: 1014 + mandatory: true + - type: disk_mb + value: 10000 + mandatory: true + estimated_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 128 + - type: disk_mb + value: 4096 +` + var cmd Restart + err := yaml.Unmarshal([]byte(restartYaml), &cmd) + if err != nil { + t.Error(err) + } +} + +func TestRestartMarshal(t *testing.T) { + reqVcpus := RequestedResource{ + Type: "vcpus", + Value: 2, + Mandatory: true, + } + reqMem := RequestedResource{ + Type: "mem_mb", + Value: 4096, + Mandatory: true, + } + reqDisk := RequestedResource{ + Type: "disk_mb", + Value: 10000, + Mandatory: true, + } + estVcpus := EstimatedResource{ + Type: "vcpus", + Value: 1, + } + estMem := EstimatedResource{ + Type: "mem_mb", + Value: 128, + } + estDisk := EstimatedResource{ + Type: "disk_mb", + Value: 4096, + } + var cmd Restart + cmd.Restart.InstanceUUID = "3ad186a6-7343-4541-a747-78f0dddd9e3e" + cmd.Restart.ImageUUID = "11a94b09-85b6-4434-9f4a-c19d863465f1" + cmd.Restart.WorkloadAgentUUID = "d3acac98-17db-42dc-9fc3-6f737b7b73c2" + cmd.Restart.RequestedResources = append(cmd.Restart.RequestedResources, reqVcpus) + cmd.Restart.RequestedResources = append(cmd.Restart.RequestedResources, reqMem) + cmd.Restart.RequestedResources = append(cmd.Restart.RequestedResources, reqDisk) + cmd.Restart.EstimatedResources = append(cmd.Restart.EstimatedResources, estVcpus) + cmd.Restart.EstimatedResources = append(cmd.Restart.EstimatedResources, estMem) + cmd.Restart.EstimatedResources = append(cmd.Restart.EstimatedResources, estDisk) + cmd.Restart.FWType = EFI + cmd.Restart.InstancePersistence = Host + + y, err := yaml.Marshal(&cmd) + if err != nil { + t.Error(err) + } + fmt.Println(string(y)) +} + +// make sure the yaml can be unmarshaled into the Restart struct with +// optional data not present +func TestRestartUnmarshalPartial(t *testing.T) { + restartYaml := `restart: + instance_uuid: a2675987-fa30-45ce-84a2-93ce67106f47 + workload_agent_uuid: 1ab3a664-d344-4a41-acf9-c94d8606e069 + fw_type: efi + persistence: host + requested_resources: + - type: vcpus + value: 2 + mandatory: true +` + var cmd Restart + err := yaml.Unmarshal([]byte(restartYaml), &cmd) + if err != nil { + t.Error(err) + } + fmt.Println(cmd) + + var expectedCmd Restart + expectedCmd.Restart.InstanceUUID = "a2675987-fa30-45ce-84a2-93ce67106f47" + expectedCmd.Restart.WorkloadAgentUUID = "1ab3a664-d344-4a41-acf9-c94d8606e069" + expectedCmd.Restart.FWType = EFI + expectedCmd.Restart.InstancePersistence = Host + vcpus := RequestedResource{ + Type: "vcpus", + Value: 2, + Mandatory: true, + } + expectedCmd.Restart.RequestedResources = append(expectedCmd.Restart.RequestedResources, vcpus) + + if cmd.Restart.InstanceUUID != expectedCmd.Restart.InstanceUUID || + cmd.Restart.WorkloadAgentUUID != expectedCmd.Restart.WorkloadAgentUUID || + cmd.Restart.FWType != expectedCmd.Restart.FWType || + cmd.Restart.InstancePersistence != expectedCmd.Restart.InstancePersistence || + len(cmd.Restart.RequestedResources) != 1 || + len(expectedCmd.Restart.RequestedResources) != 1 || + cmd.Restart.RequestedResources[0].Type != expectedCmd.Restart.RequestedResources[0].Type || + cmd.Restart.RequestedResources[0].Value != expectedCmd.Restart.RequestedResources[0].Value || + cmd.Restart.RequestedResources[0].Mandatory != expectedCmd.Restart.RequestedResources[0].Mandatory { + t.Error("Unexpected values in Restart") + } + + fmt.Println(cmd) +} diff --git a/payloads/restartfailure.go b/payloads/restartfailure.go new file mode 100644 index 000000000..e3fbd9083 --- /dev/null +++ b/payloads/restartfailure.go @@ -0,0 +1,60 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type RestartFailureReason string + +const ( + RestartNoInstance RestartFailureReason = "no_instance" + RestartInvalidPayload = "invalid_payload" + RestartInvalidData = "invalid_data" + RestartAlreadyRunning = "already_running" + RestartInstanceCorrupt = "instance_corrupt" + RestartLaunchFailure = "launch_failure" + RestartNetworkFailure = "network_failure" +) + +type ErrorRestartFailure struct { + InstanceUUID string `yaml:"instance_uuid"` + Reason RestartFailureReason `yaml:"reason"` +} + +func (s *ErrorRestartFailure) Init() { + s.InstanceUUID = "" + s.Reason = "" +} + +func (r RestartFailureReason) String() string { + switch r { + case RestartNoInstance: + return "Instance does not exist" + case RestartInvalidPayload: + return "YAML payload is corrupt" + case RestartInvalidData: + return "Command section of YAML payload is corrupt or missing required information" + case RestartAlreadyRunning: + return "Instance is already running" + case RestartInstanceCorrupt: + return "Instance is corrupt" + case RestartLaunchFailure: + return "Failed to launch instance" + case RestartNetworkFailure: + return "Failed to locate VNIC for instance" + } + + return "" +} diff --git a/payloads/restartfailure_test.go b/payloads/restartfailure_test.go new file mode 100644 index 000000000..4a44b8f81 --- /dev/null +++ b/payloads/restartfailure_test.go @@ -0,0 +1,57 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "fmt" + "testing" + + "github.com/docker/distribution/uuid" + "gopkg.in/yaml.v2" +) + +func TestRestartFailureUnmarshal(t *testing.T) { + restartFailureYaml := `instance_uuid: 2400bce6-ccc8-4a45-b2aa-b5cc3790077b +reason: already_running +` + var error ErrorRestartFailure + err := yaml.Unmarshal([]byte(restartFailureYaml), &error) + if err != nil { + t.Error(err) + } + + if error.InstanceUUID != "2400bce6-ccc8-4a45-b2aa-b5cc3790077b" { + t.Error("Wrong UUID field") + } + + if error.Reason != RestartAlreadyRunning { + t.Error("Wrong Error field") + } +} + +func TestRestartFailureMarshal(t *testing.T) { + error := ErrorRestartFailure{ + InstanceUUID: uuid.Generate().String(), + Reason: RestartInvalidPayload, + } + + y, err := yaml.Marshal(&error) + if err != nil { + t.Error(err) + } + fmt.Println(string(y)) +} diff --git a/payloads/start.go b/payloads/start.go new file mode 100644 index 000000000..b07aabe69 --- /dev/null +++ b/payloads/start.go @@ -0,0 +1,101 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type Persistence string +type Firmware string +type Resource string +type Hypervisor string + +const ( + All Persistence = "all" + VM = "vm" + Host = "host" +) + +const ( + EFI Firmware = "efi" + Legacy = "legacy" +) + +const ( + VCPUs Resource = "vcpus" + MemMB = "mem_mb" + DiskMB = "disk_mb" + NetworkNode = "network_node" +) + +const ( + QEMU Hypervisor = "qemu" + Docker = "docker" +) + +type RequestedResource struct { + Type Resource `yaml:"type"` + Value int `yaml:"value"` + Mandatory bool `yaml:"mandatory"` +} + +type EstimatedResource struct { + Type Resource `yaml:"type"` + Value int `yaml:"value"` +} + +type NetworkResources struct { + VnicMAC string `yaml:"vnic_mac"` + VnicUUID string `yaml:"vnic_uuid"` + ConcentratorUUID string `yaml:"concentrator_uuid"` + ConcentratorIP string `yaml:"concentrator_ip"` + Subnet string `yaml:"subnet"` + SubnetKey string `yaml:"subnet_key"` + SubnetUUID string `yaml:"subnet_uuid"` + PrivateIP string `yaml:"private_ip"` + PublicIP bool `yaml:"public_ip"` +} + +type StartCmd struct { + TenantUUID string `yaml:"tenant_uuid"` + InstanceUUID string `yaml:"instance_uuid"` + ImageUUID string `yaml:"image_uuid"` + DockerImage string `yaml:"docker_image"` + FWType Firmware `yaml:"fw_type"` + InstancePersistence Persistence `yaml:"persistence"` + VMType Hypervisor `yaml:"vm_type"` + RequestedResources []RequestedResource `yaml:"requested_resources"` + EstimatedResources []EstimatedResource `yaml:"estimated_resources"` + Networking NetworkResources `yaml:"networking"` +} + +type Start struct { + Start StartCmd `yaml:"start"` +} + +type RestartCmd struct { + TenantUUID string `yaml:"tenant_uuid"` + InstanceUUID string `yaml:"instance_uuid"` + ImageUUID string `yaml:"image_uuid"` + WorkloadAgentUUID string `yaml:"workload_agent_uuid"` + FWType Firmware `yaml:"fw_type"` + InstancePersistence Persistence `yaml:"persistence"` + RequestedResources []RequestedResource `yaml:"requested_resources"` + EstimatedResources []EstimatedResource `yaml:"estimated_resources"` + Networking NetworkResources `yaml:"networking"` +} + +type Restart struct { + Restart RestartCmd `yaml:"restart"` +} diff --git a/payloads/start_test.go b/payloads/start_test.go new file mode 100644 index 000000000..19f02da3c --- /dev/null +++ b/payloads/start_test.go @@ -0,0 +1,159 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "fmt" + "gopkg.in/yaml.v2" + "testing" +) + +func TestStartUnmarshal(t *testing.T) { + startYaml := `start: + instance_uuid: 3390740c-dce9-48d6-b83a-a717417072ce + image_uuid: 59460b8a-5f53-4e3e-b5ce-b71fed8c7e64 + fw_type: efi + persistence: host + vm_type: qemu + requested_resources: + - type: vcpus + value: 2 + mandatory: true + - type: mem_mb + value: 1014 + mandatory: true + - type: disk_mb + value: 10000 + mandatory: true + estimated_resources: + - type: vcpus + value: 1 + - type: mem_mb + value: 128 + - type: disk_mb + value: 4096 +` + var cmd Start + err := yaml.Unmarshal([]byte(startYaml), &cmd) + if err != nil { + t.Error(err) + } + + fmt.Printf("Instance UUID [%s]\n", cmd.Start.InstanceUUID) +} + +func TestStartMarshal(t *testing.T) { + reqVcpus := RequestedResource{ + Type: "vcpus", + Value: 2, + Mandatory: true, + } + reqMem := RequestedResource{ + Type: "mem_mb", + Value: 4096, + Mandatory: true, + } + reqDisk := RequestedResource{ + Type: "disk_mb", + Value: 10000, + Mandatory: true, + } + estVcpus := EstimatedResource{ + Type: "vcpus", + Value: 1, + } + estMem := EstimatedResource{ + Type: "mem_mb", + Value: 128, + } + estDisk := EstimatedResource{ + Type: "disk_mb", + Value: 4096, + } + var cmd Start + cmd.Start.InstanceUUID = "c73322e8-d5fe-4d57-874c-dcee4fd368cd" + cmd.Start.ImageUUID = "b265f62b-e957-47fd-a0a2-6dc261c7315c" + cmd.Start.DockerImage = "ubuntu/latest" + cmd.Start.RequestedResources = append(cmd.Start.RequestedResources, reqVcpus) + cmd.Start.RequestedResources = append(cmd.Start.RequestedResources, reqMem) + cmd.Start.RequestedResources = append(cmd.Start.RequestedResources, reqDisk) + cmd.Start.EstimatedResources = append(cmd.Start.EstimatedResources, estVcpus) + cmd.Start.EstimatedResources = append(cmd.Start.EstimatedResources, estMem) + cmd.Start.EstimatedResources = append(cmd.Start.EstimatedResources, estDisk) + cmd.Start.FWType = EFI + cmd.Start.InstancePersistence = Host + cmd.Start.VMType = QEMU + + y, err := yaml.Marshal(&cmd) + if err != nil { + t.Error(err) + } + fmt.Println(string(y)) +} + +// make sure the yaml can be unmarshaled into the Start struct with +// optional data not present +func TestStartUnmarshalPartial(t *testing.T) { + startYaml := `start: + instance_uuid: 923d1f2b-aabe-4a9b-9982-8664b0e52f93 + image_uuid: 53cdd9ef-228f-4ce1-911d-706c2b41454a + docker_image: ubuntu/latest + fw_type: efi + persistence: host + vm_type: qemu + requested_resources: + - type: vcpus + value: 2 + mandatory: true +` + var cmd Start + err := yaml.Unmarshal([]byte(startYaml), &cmd) + if err != nil { + t.Error(err) + } + fmt.Println(cmd) + + var expectedCmd Start + expectedCmd.Start.InstanceUUID = "923d1f2b-aabe-4a9b-9982-8664b0e52f93" + expectedCmd.Start.ImageUUID = "53cdd9ef-228f-4ce1-911d-706c2b41454a" + expectedCmd.Start.DockerImage = "ubuntu/latest" + expectedCmd.Start.FWType = EFI + expectedCmd.Start.InstancePersistence = Host + expectedCmd.Start.VMType = QEMU + vcpus := RequestedResource{ + Type: "vcpus", + Value: 2, + Mandatory: true, + } + expectedCmd.Start.RequestedResources = append(expectedCmd.Start.RequestedResources, vcpus) + + if cmd.Start.InstanceUUID != expectedCmd.Start.InstanceUUID || + cmd.Start.ImageUUID != expectedCmd.Start.ImageUUID || + cmd.Start.DockerImage != expectedCmd.Start.DockerImage || + cmd.Start.FWType != expectedCmd.Start.FWType || + cmd.Start.InstancePersistence != expectedCmd.Start.InstancePersistence || + cmd.Start.VMType != expectedCmd.Start.VMType || + len(cmd.Start.RequestedResources) != 1 || + len(expectedCmd.Start.RequestedResources) != 1 || + cmd.Start.RequestedResources[0].Type != expectedCmd.Start.RequestedResources[0].Type || + cmd.Start.RequestedResources[0].Value != expectedCmd.Start.RequestedResources[0].Value || + cmd.Start.RequestedResources[0].Mandatory != expectedCmd.Start.RequestedResources[0].Mandatory { + t.Error("Unexpected values in Start") + } + + fmt.Println(cmd) +} diff --git a/payloads/startfailure.go b/payloads/startfailure.go new file mode 100644 index 000000000..a580df8c6 --- /dev/null +++ b/payloads/startfailure.go @@ -0,0 +1,72 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type StartFailureReason string + +const ( + FullCloud StartFailureReason = "full_cloud" + FullComputeNode = "full_cn" + NoComputeNodes = "no_cn" + NoNetworkNodes = "no_net_cn" + InvalidPayload = "invalid_payload" + InvalidData = "invalid_data" + AlreadyRunning = "already_running" + InstanceExists = "instance_exists" + ImageFailure = "image_failure" + LaunchFailure = "launch_failure" + NetworkFailure = "network_failure" +) + +type ErrorStartFailure struct { + InstanceUUID string `yaml:"instance_uuid"` + Reason StartFailureReason `yaml:"reason"` +} + +func (s *ErrorStartFailure) Init() { + s.InstanceUUID = "" + s.Reason = "" +} + +func (r StartFailureReason) String() string { + switch r { + case FullCloud: + return "Cloud is full" + case FullComputeNode: + return "Compute node is full" + case NoComputeNodes: + return "No compute node available" + case NoNetworkNodes: + return "No network node available" + case InvalidPayload: + return "YAML payload is corrupt" + case InvalidData: + return "Command section of YAML payload is corrupt or missing required information" + case AlreadyRunning: + return "Instance is already running" + case InstanceExists: + return "Instance already exists" + case ImageFailure: + return "Failed to create instance image" + case LaunchFailure: + return "Failed to launch instance" + case NetworkFailure: + return "Failed to create VNIC for instance" + } + + return "" +} diff --git a/payloads/startfailure_test.go b/payloads/startfailure_test.go new file mode 100644 index 000000000..eba8aa7ad --- /dev/null +++ b/payloads/startfailure_test.go @@ -0,0 +1,56 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "fmt" + "github.com/docker/distribution/uuid" + "gopkg.in/yaml.v2" + "testing" +) + +func TestStartFailureUnmarshal(t *testing.T) { + startFailureYaml := `instance_uuid: 2400bce6-ccc8-4a45-b2aa-b5cc3790077b +reason: full_cloud +` + var error ErrorStartFailure + err := yaml.Unmarshal([]byte(startFailureYaml), &error) + if err != nil { + t.Error(err) + } + + if error.InstanceUUID != "2400bce6-ccc8-4a45-b2aa-b5cc3790077b" { + t.Error("Wrong UUID field") + } + + if error.Reason != FullCloud { + t.Error("Wrong Error field") + } +} + +func TestStartFailureMarshal(t *testing.T) { + error := ErrorStartFailure{ + InstanceUUID: uuid.Generate().String(), + Reason: FullCloud, + } + + y, err := yaml.Marshal(&error) + if err != nil { + t.Error(err) + } + fmt.Println(string(y)) +} diff --git a/payloads/stats.go b/payloads/stats.go new file mode 100644 index 000000000..b8a93268b --- /dev/null +++ b/payloads/stats.go @@ -0,0 +1,83 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type InstanceStat struct { + InstanceUUID string `yaml:"instance_uuid"` + State string `yaml:"state"` + + // IP address to use to connect to instance via SSH. This + // is actually the IP address of the CNCI VM. + // Will be "" if the instance is itself a CNCI VM. + SSHIP string `yaml:"ssh_ip"` + + // Port number used to access the SSH service running on the + // VM. This number is computed from the VM's IP address. + // Will be 0 if the instance is itself a CNCI VM. + SSHPort int `yaml:"ssh_port"` + + // Memory usage in MB. May be -1 if State != Running. + MemoryUsageMB int `yaml:"memory_usage_mb"` + + // Disk usage in MB. May be -1 if State = Pending. + DiskUsageMB int `yaml:"disk_usage_mb"` + + // Percentage of CPU Usage for VM, normalized for VCPUs. + // May be -1 if State != Running or if launcher has not + // acquired enough samples to compute the CPU usage. + // Assuming CPU usage can be computed it will be a value + // between 0 and 100% regardless of the number of VPCUs. + // 100% means all your VCPUs are maxed out. + CPUUsage int `yaml:"cpu_usage"` +} + +type NetworkStat struct { + NodeIP string `yaml:"ip"` + NodeMAC string `yaml:"mac"` +} + +type Stat struct { + NodeUUID string `yaml:"node_uuid"` + Status string `yaml:"status"` + MemTotalMB int `yaml:"mem_total_mb"` + MemAvailableMB int `yaml:"mem_available_mb"` + DiskTotalMB int `yaml:"disk_total_mb"` + DiskAvailableMB int `yaml:"disk_available_mb"` + Load int `yaml:"load"` + CpusOnline int `yaml:"cpus_online"` + NodeHostName string `yaml:"hostname"` + Networks []NetworkStat + Instances []InstanceStat +} + +const ( + Pending = "pending" + Running = "running" + Exited = "exited" + ExitFailed = "exit_failed" + ExitPaused = "exit_paused" +) + +func (s *Stat) Init() { + s.NodeUUID = "" + s.MemTotalMB = -1 + s.MemAvailableMB = -1 + s.DiskTotalMB = -1 + s.DiskAvailableMB = -1 + s.Load = -1 + s.CpusOnline = -1 +} diff --git a/payloads/stats_test.go b/payloads/stats_test.go new file mode 100644 index 000000000..63bd5d6b0 --- /dev/null +++ b/payloads/stats_test.go @@ -0,0 +1,196 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "fmt" + "testing" + + "github.com/docker/distribution/uuid" + "gopkg.in/yaml.v2" +) + +func TestStatsUnmarshal(t *testing.T) { + statsYaml := `node_uuid: 2400bce6-ccc8-4a45-b2aa-b5cc3790077b +status: READY +mem_total_mb: 3896 +mem_available_mb: 3896 +disk_total_mb: 500000 +disk_available_mb: 256000 +load: 0 +cpus_online: 4 +hostname: test +networks: + - ip: 192.168.1.1 + mac: 02:00:15:03:6f:49 + - ip: 10.168.1.1 + mac: 02:00:8c:ba:f9:45 +instances: + - instance_uuid: fe2970fa-7b36-460b-8b79-9eb4745e62f2 + state: running + memory_usage_mb: 40 + disk_usage_mb: 2 + cpu_usage: 90 + ssh_ip: "" + ssh_port: 0 + - instance_uuid: cbda5bd8-33bd-4d39-9f52-ace8c9f0b99c + state: running + memory_usage_mb: 50 + disk_usage_mb: 10 + cpu_usage: 0 + ssh_ip: 172.168.2.2 + ssh_port: 8768 + - instance_uuid: 1f5b2fe6-4493-4561-904a-8f4e956218d9 + state: exited + memory_usage_mb: -1 + disk_usage_mb: 2 + cpu_usage: -1 +` + var cmd Stat + err := yaml.Unmarshal([]byte(statsYaml), &cmd) + if err != nil { + t.Error(err) + } +} + +func TestStatsMarshal(t *testing.T) { + nstats := NetworkStat{ + NodeIP: "192.168.1.1", + NodeMAC: "02:00:0f:57:39:45", + } + istats := InstanceStat{ + InstanceUUID: uuid.Generate().String(), + State: Running, + MemoryUsageMB: 40, + DiskUsageMB: 20, + CPUUsage: 70, + SSHIP: "172.168.0.4", + SSHPort: 33004, + } + cmd := Stat{ + NodeUUID: uuid.Generate().String(), + Status: "READY", + MemTotalMB: 3896, + MemAvailableMB: 3896, + DiskTotalMB: 500000, + DiskAvailableMB: 256000, + Load: 0, + CpusOnline: 4, + NodeHostName: "test", + } + cmd.Instances = append(cmd.Instances, istats) + cmd.Networks = append(cmd.Networks, nstats) + + y, err := yaml.Marshal(&cmd) + if err != nil { + t.Error(err) + } + fmt.Println(string(y)) +} + +// make sure the yaml can be unmarshaled into the Stat struct with +// no instances present +func TestStatsNodeOnly(t *testing.T) { + statsYaml := `node_uuid: 2400bce6-ccc8-4a45-b2aa-b5cc3790077b +mem_total_mb: 3896 +mem_available_mb: 3896 +disk_total_mb: 500000 +disk_available_mb: 256000 +load: 0 +cpus_online: 4 +hostname: test +networks: + - ip: 192.168.1.1 + mac: 02:00:15:03:6f:49 +` + var cmd Stat + err := yaml.Unmarshal([]byte(statsYaml), &cmd) + if err != nil { + t.Error(err) + } + + expectedCmd := Stat{ + NodeUUID: "2400bce6-ccc8-4a45-b2aa-b5cc3790077b", + MemTotalMB: 3896, + MemAvailableMB: 3896, + DiskTotalMB: 500000, + DiskAvailableMB: 256000, + Load: 0, + CpusOnline: 4, + NodeHostName: "test", + Networks: []NetworkStat{ + { + NodeIP: "192.168.1.1", + NodeMAC: "02:00:15:03:6f:49", + }, + }, + } + if cmd.NodeUUID != expectedCmd.NodeUUID || + cmd.MemTotalMB != expectedCmd.MemTotalMB || + cmd.MemAvailableMB != expectedCmd.MemAvailableMB || + cmd.DiskTotalMB != expectedCmd.DiskTotalMB || + cmd.DiskAvailableMB != expectedCmd.DiskAvailableMB || + cmd.Load != expectedCmd.Load || + cmd.CpusOnline != expectedCmd.CpusOnline || + cmd.NodeHostName != expectedCmd.NodeHostName || + len(cmd.Networks) != 1 || + cmd.Networks[0] != expectedCmd.Networks[0] || + cmd.Instances != nil { + t.Error("Unexpected values in Stat") + } + + fmt.Println(cmd) +} + +// make sure the yaml can be unmarshaled into the Stat struct +// when only some node stats are present +func TestStatsNodeNotAllStats(t *testing.T) { + statsYaml := `node_uuid: 2400bce6-ccc8-4a45-b2aa-b5cc3790077b +load: 1 +` + var cmd Stat + cmd.Init() + + err := yaml.Unmarshal([]byte(statsYaml), &cmd) + if err != nil { + t.Error(err) + } + + expectedCmd := Stat{ + NodeUUID: "2400bce6-ccc8-4a45-b2aa-b5cc3790077b", + MemTotalMB: -1, + MemAvailableMB: -1, + DiskTotalMB: -1, + DiskAvailableMB: -1, + Load: 1, + CpusOnline: -1, + } + if cmd.NodeUUID != expectedCmd.NodeUUID || + cmd.MemTotalMB != expectedCmd.MemTotalMB || + cmd.MemAvailableMB != expectedCmd.MemAvailableMB || + cmd.DiskTotalMB != expectedCmd.DiskTotalMB || + cmd.DiskAvailableMB != expectedCmd.DiskAvailableMB || + cmd.Load != expectedCmd.Load || + cmd.CpusOnline != expectedCmd.CpusOnline || + cmd.NodeHostName != expectedCmd.NodeHostName || + cmd.Networks != nil || + cmd.Instances != nil { + t.Error("Unexpected values in Stat") + } + + fmt.Println(cmd) +} diff --git a/payloads/stop.go b/payloads/stop.go new file mode 100644 index 000000000..afedd9df6 --- /dev/null +++ b/payloads/stop.go @@ -0,0 +1,30 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type StopCmd struct { + InstanceUUID string `yaml:"instance_uuid"` + WorkloadAgentUUID string `yaml:"workload_agent_uuid"` +} + +type Stop struct { + Stop StopCmd `yaml:"stop"` +} + +type Delete struct { + Delete StopCmd `yaml:"delete"` +} diff --git a/payloads/stop_test.go b/payloads/stop_test.go new file mode 100644 index 000000000..7137d8d8b --- /dev/null +++ b/payloads/stop_test.go @@ -0,0 +1,96 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "gopkg.in/yaml.v2" + "testing" +) + +const instanceUUID = "3390740c-dce9-48d6-b83a-a717417072ce" +const agentUUID = "59460b8a-5f53-4e3e-b5ce-b71fed8c7e64" + +const stopYaml = "" + + "stop:\n" + + " instance_uuid: " + instanceUUID + "\n" + + " workload_agent_uuid: " + agentUUID + "\n" +const deleteYaml = "" + + "delete:\n" + + " instance_uuid: " + instanceUUID + "\n" + + " workload_agent_uuid: " + agentUUID + "\n" + +func TestStopUnmarshal(t *testing.T) { + var stop Stop + err := yaml.Unmarshal([]byte(stopYaml), &stop) + if err != nil { + t.Error(err) + } + + if stop.Stop.InstanceUUID != instanceUUID { + t.Errorf("Wrong instance UUID field [%s]", stop.Stop.InstanceUUID) + } + + if stop.Stop.WorkloadAgentUUID != agentUUID { + t.Errorf("Wrong Agent UUID field [%s]", stop.Stop.WorkloadAgentUUID) + } +} + +func TestDeleteUnmarshal(t *testing.T) { + var delete Delete + err := yaml.Unmarshal([]byte(deleteYaml), &delete) + if err != nil { + t.Error(err) + } + + if delete.Delete.InstanceUUID != instanceUUID { + t.Errorf("Wrong instance UUID field [%s]", delete.Delete.InstanceUUID) + } + + if delete.Delete.WorkloadAgentUUID != agentUUID { + t.Errorf("Wrong Agent UUID field [%s]", delete.Delete.WorkloadAgentUUID) + } +} + +func TestStopMarshal(t *testing.T) { + var stop Stop + stop.Stop.InstanceUUID = instanceUUID + stop.Stop.WorkloadAgentUUID = agentUUID + + y, err := yaml.Marshal(&stop) + if err != nil { + t.Error(err) + } + + if string(y) != stopYaml { + t.Errorf("STOP marshalling failed\n[%s]\n vs\n[%s]", string(y), stopYaml) + } +} + +func TestDeleteMarshal(t *testing.T) { + var delete Delete + delete.Delete.InstanceUUID = instanceUUID + delete.Delete.WorkloadAgentUUID = agentUUID + + y, err := yaml.Marshal(&delete) + if err != nil { + t.Error(err) + } + + if string(y) != deleteYaml { + t.Errorf("DELETE marshalling failed\n[%s]\n vs\n[%s]", string(y), deleteYaml) + } +} diff --git a/payloads/stopfailure.go b/payloads/stopfailure.go new file mode 100644 index 000000000..54beaf8bb --- /dev/null +++ b/payloads/stopfailure.go @@ -0,0 +1,51 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type StopFailureReason string + +const ( + StopNoInstance StopFailureReason = "no_instance" + StopInvalidPayload = "invalid_payload" + StopInvalidData = "invalid_data" + StopAlreadyStopped = "already_stopped" +) + +type ErrorStopFailure struct { + InstanceUUID string `yaml:"instance_uuid"` + Reason StopFailureReason `yaml:"reason"` +} + +func (s *ErrorStopFailure) Init() { + s.InstanceUUID = "" + s.Reason = "" +} + +func (r StopFailureReason) String() string { + switch r { + case StopNoInstance: + return "Instance does not exist" + case StopInvalidPayload: + return "YAML payload is corrupt" + case StopInvalidData: + return "Command section of YAML payload is corrupt or missing required information" + case StopAlreadyStopped: + return "Instance has already shut down" + } + + return "" +} diff --git a/payloads/stopfailure_test.go b/payloads/stopfailure_test.go new file mode 100644 index 000000000..7e85c6acc --- /dev/null +++ b/payloads/stopfailure_test.go @@ -0,0 +1,57 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "fmt" + "testing" + + "github.com/docker/distribution/uuid" + "gopkg.in/yaml.v2" +) + +func TestStopFailureUnmarshal(t *testing.T) { + stopFailureYaml := `instance_uuid: 2400bce6-ccc8-4a45-b2aa-b5cc3790077b +reason: already_stopped +` + var error ErrorStopFailure + err := yaml.Unmarshal([]byte(stopFailureYaml), &error) + if err != nil { + t.Error(err) + } + + if error.InstanceUUID != "2400bce6-ccc8-4a45-b2aa-b5cc3790077b" { + t.Error("Wrong UUID field") + } + + if error.Reason != StopAlreadyStopped { + t.Error("Wrong Error field") + } +} + +func TestStopFailureMarshal(t *testing.T) { + error := ErrorStopFailure{ + InstanceUUID: uuid.Generate().String(), + Reason: StopNoInstance, + } + + y, err := yaml.Marshal(&error) + if err != nil { + t.Error(err) + } + fmt.Println(string(y)) +} diff --git a/payloads/tenantadded.go b/payloads/tenantadded.go new file mode 100644 index 000000000..22bb0989f --- /dev/null +++ b/payloads/tenantadded.go @@ -0,0 +1,35 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type TenantAddedEvent struct { + AgentUUID string `yaml:"agent_uuid"` + AgentIP string `yaml:"agent_ip"` + TenantUUID string `yaml:"tenant_uuid"` + TenantSubnet string `yaml:"tenant_subnet"` + ConcentratorUUID string `yaml:"concentrator_uuid"` + ConcentratorIP string `yaml:"concentrator_ip"` + SubnetKey int `yaml:"subnet_key"` +} + +type EventTenantAdded struct { + TenantAdded TenantAddedEvent `yaml:"tenant_added"` +} + +type EventTenantRemoved struct { + TenantRemoved TenantAddedEvent `yaml:"tenant_removed"` +} diff --git a/payloads/tenantadded_test.go b/payloads/tenantadded_test.go new file mode 100644 index 000000000..de510f8ba --- /dev/null +++ b/payloads/tenantadded_test.go @@ -0,0 +1,157 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +import ( + "testing" + + "gopkg.in/yaml.v2" +) + +const agentIP = "10.2.3.4" +const tenantSubnet = "10.2.0.0/16" +const subnetKey = "8" + +const tenantAddedYaml = "" + + "tenant_added:\n" + + " agent_uuid: " + agentUUID + "\n" + + " agent_ip: " + agentIP + "\n" + + " tenant_uuid: " + tenantUUID + "\n" + + " tenant_subnet: " + tenantSubnet + "\n" + + " concentrator_uuid: " + cnciUUID + "\n" + + " concentrator_ip: " + cnciIP + "\n" + + " subnet_key: " + subnetKey + "\n" + +const tenantRemovedYaml = "" + + "tenant_removed:\n" + + " agent_uuid: " + agentUUID + "\n" + + " agent_ip: " + agentIP + "\n" + + " tenant_uuid: " + tenantUUID + "\n" + + " tenant_subnet: " + tenantSubnet + "\n" + + " concentrator_uuid: " + cnciUUID + "\n" + + " concentrator_ip: " + cnciIP + "\n" + + " subnet_key: " + subnetKey + "\n" + +func TestTenantAddedUnmarshal(t *testing.T) { + var tenantAdded EventTenantAdded + + err := yaml.Unmarshal([]byte(tenantAddedYaml), &tenantAdded) + if err != nil { + t.Error(err) + } + + if tenantAdded.TenantAdded.AgentUUID != agentUUID { + t.Errorf("Wrong agent UUID field [%s]", tenantAdded.TenantAdded.AgentUUID) + } + + if tenantAdded.TenantAdded.AgentIP != agentIP { + t.Errorf("Wrong agent IP field [%s]", tenantAdded.TenantAdded.AgentIP) + } + + if tenantAdded.TenantAdded.TenantUUID != tenantUUID { + t.Errorf("Wrong tenant UUID field [%s]", tenantAdded.TenantAdded.TenantUUID) + } + + if tenantAdded.TenantAdded.TenantSubnet != tenantSubnet { + t.Errorf("Wrong tenant subnet field [%s]", tenantAdded.TenantAdded.TenantSubnet) + } + + if tenantAdded.TenantAdded.ConcentratorUUID != cnciUUID { + t.Errorf("Wrong CNCI UUID field [%s]", tenantAdded.TenantAdded.ConcentratorUUID) + } + + if tenantAdded.TenantAdded.ConcentratorIP != cnciIP { + t.Errorf("Wrong CNCI IP field [%s]", tenantAdded.TenantAdded.ConcentratorIP) + } + +} + +func TestTenantRemovedUnmarshal(t *testing.T) { + var tenantRemoved EventTenantRemoved + + err := yaml.Unmarshal([]byte(tenantRemovedYaml), &tenantRemoved) + if err != nil { + t.Error(err) + } + + if tenantRemoved.TenantRemoved.AgentUUID != agentUUID { + t.Errorf("Wrong agent UUID field [%s]", tenantRemoved.TenantRemoved.AgentUUID) + } + + if tenantRemoved.TenantRemoved.AgentIP != agentIP { + t.Errorf("Wrong agent IP field [%s]", tenantRemoved.TenantRemoved.AgentIP) + } + + if tenantRemoved.TenantRemoved.TenantUUID != tenantUUID { + t.Errorf("Wrong tenant UUID field [%s]", tenantRemoved.TenantRemoved.TenantUUID) + } + + if tenantRemoved.TenantRemoved.TenantSubnet != tenantSubnet { + t.Errorf("Wrong tenant subnet field [%s]", tenantRemoved.TenantRemoved.TenantSubnet) + } + + if tenantRemoved.TenantRemoved.ConcentratorUUID != cnciUUID { + t.Errorf("Wrong CNCI UUID field [%s]", tenantRemoved.TenantRemoved.ConcentratorUUID) + } + + if tenantRemoved.TenantRemoved.ConcentratorIP != cnciIP { + t.Errorf("Wrong CNCI IP field [%s]", tenantRemoved.TenantRemoved.ConcentratorIP) + } + +} + +func TestTenantAddedMarshal(t *testing.T) { + var tenantAdded EventTenantAdded + + tenantAdded.TenantAdded.AgentUUID = agentUUID + tenantAdded.TenantAdded.AgentIP = agentIP + tenantAdded.TenantAdded.TenantUUID = tenantUUID + tenantAdded.TenantAdded.TenantSubnet = tenantSubnet + tenantAdded.TenantAdded.ConcentratorUUID = cnciUUID + tenantAdded.TenantAdded.ConcentratorIP = cnciIP + tenantAdded.TenantAdded.SubnetKey = 8 + + y, err := yaml.Marshal(&tenantAdded) + if err != nil { + t.Error(err) + } + + if string(y) != tenantAddedYaml { + t.Errorf("TenantAdded marshalling failed\n[%s]\n vs\n[%s]", string(y), tenantAddedYaml) + } +} + +func TestTenantRemovedMarshal(t *testing.T) { + var tenantRemoved EventTenantRemoved + + tenantRemoved.TenantRemoved.AgentUUID = agentUUID + tenantRemoved.TenantRemoved.AgentIP = agentIP + tenantRemoved.TenantRemoved.TenantUUID = tenantUUID + tenantRemoved.TenantRemoved.TenantSubnet = tenantSubnet + tenantRemoved.TenantRemoved.ConcentratorUUID = cnciUUID + tenantRemoved.TenantRemoved.ConcentratorIP = cnciIP + tenantRemoved.TenantRemoved.SubnetKey = 8 + + y, err := yaml.Marshal(&tenantRemoved) + if err != nil { + t.Error(err) + } + + if string(y) != tenantRemovedYaml { + t.Errorf("TenantRemoved marshalling failed\n[%s]\n vs\n[%s]", string(y), tenantRemovedYaml) + } +} diff --git a/payloads/trace.go b/payloads/trace.go new file mode 100644 index 000000000..ad38fec5d --- /dev/null +++ b/payloads/trace.go @@ -0,0 +1,37 @@ +/* +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +package payloads + +type SSNTPNode struct { + SSNTPUUID string `yaml:"ssntp_node_uuid"` + SSNTPRole string `yaml:"ssntp_role"` + TxTimestamp string `yaml:"tx_timestamp"` + RxTimestamp string `yaml:"rx_timestamp"` +} + +type FrameTrace struct { + Label string `yaml:"label"` + Type string `yaml:"type"` + Operand string `yaml:"operand"` + StartTimestamp string `yaml:"start_timestamp"` + EndTimestamp string `yaml:"end_timestamp"` + Nodes []SSNTPNode +} + +type Trace struct { + Frames []FrameTrace +} diff --git a/ssntp/README.md b/ssntp/README.md new file mode 100644 index 000000000..c066121a0 --- /dev/null +++ b/ssntp/README.md @@ -0,0 +1,795 @@ +# Simple and Secure Node Transfer Protocol # + +## Overview ## + +The Simple and Secure Node Transfer Protocol (SSNTP) is a custom, fully +asynchronous and TLS based application layer protocol. All Cloud Integrated +Advanced Orchestrator (CIAO) components communicate with each others over SSNTP. + +SSNTP is designed with simplicity, efficiency and security in mind: + +* All SSNTP entities are identified by a Universal Unique IDentifier (UUID). +* All SSNTP frame headers are identical for easy parsing. +* SSNTP payloads are optional. +* SSNTP payloads are YAML formatted. +* SSNTP is a one way protocol where senders do not receive a synchronous + answer from the receivers. +* Any SSNTP entity can asynchronously send a command, status or event to + one of its peers. + +## SSNTP clients and servers ## + +The SSNTP protocol defines 2 entities: SSNTP clients and SSNTP servers. + +A SSNTP server listens for and may accept connections from many SSNTP +clients. It never initiates a connection to another SSNTP entity. + +A SSNTP client initiates a connection to a SSNTP server and can +only connect to one single server at a time. It does not accept +incoming connections from another SSNTP entity. + +Once connected, both clients and servers can initiate SSNTP transfers +at any point in time without having to wait for any kind of SSNTP +acknowledgement from the other end of the connection. SSNTP is a fully +asynchronous protocol. + +### Roles ### + +All SSNTP entities must declare their role at connection time, as part +of their signed certificate extended key usage attributes. + +SSNTP roles allow for: + +1. SSNTP frames filtering: Depending on the declared role of the sending entity, + the receiving party can choose to discard frames and optionally send a + frame rejection error back. +2. SSNTP frames routing: A SSNTP server implementation can configure frame + forwarding rules for multicasting specific received SSNTP frame types to + all connected SSNTP clients with a given role. + +There are currently 6 SSNTP different roles: + +* SERVER (0x1): A generic SSNTP server. +* Controller (0x2): The CIAO Command and Status Reporting client. +* AGENT (0x4): The CIAO compute node Agent. It receives workload + commands from the Scheduler and manages workload on a given compute + node accordingly. +* SCHEDULER (0x8): The CIAO workload Scheduler. It receives workload + related commands from the Controller and schedules them on the available compute + nodes. +* NETAGENT (0x10): The CIAO networking compute node Agent. It receives + networking workload commands from the Scheduler and manages workload on a + given networking compute node accordingly. +* CNCIAGENT (0x20): A Compute Node Concentrator Instance Agent runs within + the networking node workload and manages a specific tenant private network. + All instances for this tenant will have a GRE tunnel established between + them and the CNCI, and the CNCI acts as the tenant routing entity. + +## SSNTP connection ## +Before a SSNTP client is allowed to send any frame to a SSNTP server, +or vice versa, both need to successfully go through the SSNTP +connection protocol. +The SSNTP connection is a mandatory step for the client and the +server to verify each other's roles and also to retrieve each other's +UUIDs. + +1. SSNTP client sends a CONNECT command to the SSNTP server. This + frame contains the advertised SSNTP client and this should match + the client's certificate extended key usage attributes. The server + will verify that both match and if they don't it will send a SSNTP + error frame back with a ConnectionAborted (0x6) error code. + The CONNECT frame destination UUID is the nil UUID as the client + does not know the server UUID before getting its CONNECTED frame. + +2. The server asynchronously sends a CONNECTED status frame to the + client in order to notify him about a successful connection. The + CONNECTED frame contains the server advertised role. + The client must verify that the server role matches its certificate + extended key usage attributes. If that verification fails the client + must send a SSNTP error frame to the server where the error code is + ConnectionFailure (0x4), and then must close the TLS connection to + the server. + +3. Connection is successfully established. Both ends of the connection + can now asynchronously send SSNTP frames. + +## SSNTP frames ## + +Each SSNTP frame is composed of a fixed length, 8 bytes long header and +an an optional YAML formatted payload. + +### SSNTP header ### + +``` ++----------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | +| (1 byte) | (1 byte) | (1 byte) | (1 byte) | or Role (4 bytes) | ++----------------------------------------------------------------+ +``` + +* Major is the SSNTP version major number. It is currently 0. +* Minor is the SSNTP version minor number. It is currently 1. +* Type is the SSNTP frame type. There are 4 different frame types: + COMMAND, STATUS, EVENT and ERROR. +* Operand is the SSNTP frame sub-type. +* Payload length is the optional YAML formatted SSNTP payload length + in bytes. It is set to zero for payload less frames. +* Role is the SSNTP entity role. Only the CONNECT command and + CONNECTED status frames are using this field as a role descriptor. + +### SSNTP COMMAND frames ### + +There are 10 different SSNTP COMMAND frames: + +#### CONNECT #### +CONNECT must be the first frame SSNTP clients send when trying to +connect to a SSNTP server. Any frame sent to a SSNTP server from a +client that did not initially sent a CONNECT frame will be discarded +and the TLS connection to the client will be closed. + +The purpose of the CONNECT command frame is for the client to advertise +its role and for the server to verify that the advertised role matches +the client's certificate extended key usage attributes. + +The CONNECT frame is payloadless and its Destination UUID is the nil +UUID: + +``` ++--------------------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Role | Client UUID | Nil UUID | +| | | (0x0) | (0x0) | (bitmask of client roles) | | | ++--------------------------------------------------------------------------------------+ +``` + +#### START #### +The CIAO Controller client sends the START command to the Scheduler in +order to schedule a new workload. The [START command YAML payload] +(https://github.com/01org/ciao/blob/master/payloads/start.go) +is mandatory and contains a full workload description. + +If the Scheduler finds a compute node (CN) with enough capacity to run +this workload, it will then send a START command to the given Agent +UUID managing this CN with the same payload. + +If the Scheduler cannot find a suitable CN for this workload, it will +asynchronously send a SSNTP ERROR frame back to the Controller. The error +code should be StartFailure (0x2) and the payload must comply with the +[StartFailure YAML schema] (https://github.com/01org/ciao/blob/master/payloads/startfailure.go) +so that the Controller eventually knows that a given instance/workload UUID +could not start. + +Once the Scheduler has sent the START command to an available CN Agent, +it is up to this Agent to actually initialize and start an instance +that matches the START YAML payload. If that fails the Agent should +asynchronously sends a SSNTP ERROR back to the Scheduler and the error +code should be StartFailure (0x2). The Scheduler must then forward that +error frame to the Controller. + +The START command payload is mandatory: + +``` ++--------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted | +| | | (0x0) | (0x1) | | workload description | ++--------------------------------------------------------------------------+ +``` + +#### STOP #### +The CIAO Controller client sends the STOP command to the Scheduler in +order to stop a running instance on a given CN. The [STOP command +YAML payload] (https://github.com/01org/ciao/blob/master/payloads/stop.go) +is mandatory and contains the instance UUID to be stopped and the +agent UUID that manages this instance. + +STOPping an instance means shutting it down. Non persistent +instances are deleted as well when being STOPped. +Persistent instances metadata and disks images are stored and +can be started again through the RESTART SSNTP command. + +There are several error cases related to the STOP command: + +1. If the Scheduler cannot find the Agent identified in the STOP + command payload, it should send a SSNTP error with the + StopFailure (0x3) error code back to the Controller. + +2. If the Agent cannot actually stop the instance (Because e.g. + it's already finished), it should also send a SSNTP error with + the StopFailure (0x3) error code back to the Scheduler. It is + then the Scheduler responsibility to notify the Controller about it + by forwarding this error frame. + +``` ++--------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted | +| | | (0x0) | (0x2) | | payload | ++--------------------------------------------------------------------+ +``` + +#### STATS #### +CIAO CN Agents periodically send the STATS command to the Scheduler +in order to provide a complete view of the compute node status. It is +up to the CN Agent implementation to define the STATS sending period. + +Upon reception of Agent STATS commands, the Scheduler must forward it +to the Controller so that it can provide a complete cloud status report back to +the users. + +The STATS command comes with a mandatory [YAML formatted payload] +(https://github.com/01org/ciao/blob/master/payloads/stats.go). + +``` ++----------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted compute | +| | | (0x0) | (0x3) | | node statistics | ++----------------------------------------------------------------------------+ +``` + +#### EVACUATE #### +The CIAO Controller client sends EVACUATE commands to the Scheduler +to ask a specific CIAO Agent to evacuate its compute node, i.e. +stop and migrate all of the current workloads it is monitoring on +its node. + +The [EVACUATE YAML payload] +(https://github.com/01org/ciao/blob/master/payloads/evacuate.go) +is mandatory and describes the next state to reach after evacuation +is done. It could be 'shutdown' for shutting the node down, 'update' +for having it run a software update, 'reboot' for rebooting the node +or 'maintenance' for putting the node in maintenance mode: + +``` ++---------------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted compute | +| | | (0x0) | (0x4) | | node next state description | ++---------------------------------------------------------------------------------+ +``` + +#### DELETE #### +The CIAO Controller client may send DELETE commands in order to +completely remove an already STOPped instance from the cloud. +This command is only relevant for persistent workload based instances +as non persistent instances are implicitly deleted when being STOPed. + +Deleting a persistent instance means completely removing it from +the cloud and thus it should no longer be reachable for e.g. a +RESTART command. + +When asked to delete a non existing instance the CN Agent +must reply with a DeleteFailure error frame. + +The [DELETE YAML payload schema] +(https://github.com/01org/ciao/blob/master/payloads/stop.go) +is the same as the STOP one. + +``` ++--------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted | +| | | (0x0) | (0x5) | | payload | ++--------------------------------------------------------------------+ +``` + +#### RESTART #### +The CIAO Controller client may send RESTART commands in order to +restart previously STOPped persistent instances. +Non persistent instances cannot be RESTARTed as they are +implicitly deleted when being STOPped. + +When asked to restart a non existing instance the CN Agent +must reply with a RestartFailure error frame. + +The [RESTART YAML payload schema] +(https://github.com/01org/ciao/blob/master/payloads/start.go) +is the same as the STOP one. + +``` ++--------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted | +| | | (0x0) | (0x6) | | payload | ++--------------------------------------------------------------------+ +``` + +#### AssignPublicIP #### +AssingPublicIP is a command sent by the Controller to assign +a publically routable IP to a given instance. It is sent +to the Scheduler and must be forwarded to the right CNCI. + +The public IP is fetched from a pre-allocated pool +managed by the Controller. + +The [AssignPublicIP YAML payload schema] +(https://github.com/01org/ciao/blob/master/payloads/assignpublicIP.go) +is made of the CNC, the tenant and the instance UUIDs, +the allocated public IP and the instance private IP and MAC. + +``` ++--------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted | +| | | (0x0) | (0x7) | | payload | ++--------------------------------------------------------------------+ +``` + +#### ReleasePublicIP #### +ReleasePublicIP is a command sent by the Controller to release +a publically routable IP from a given instance. It is sent +to the Scheduler and must be forwarded to the right CNCI. + +The released public IP is added back to the Controller managed +IP pool. + +The [ReleasePublicIP YAML payload schema] +(https://github.com/01org/ciao/blob/master/payloads/assignpublicIP.go) +is made of the CNCI and a tenant UUIDs, the released +public IP, the instance private IP and MAC. + +``` ++--------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted | +| | | (0x0) | (0x8) | | payload | ++--------------------------------------------------------------------+ +``` + +#### CONFIGURE #### +CONFIGURE commands are sent to request any SSNTP entity to +configure itself according to the CONFIGURE command payload. +Controller or any SSNTP client handling user interfaces defining any +cloud setting (image service, networking configuration, identity +management...) must send this command for any configuration +change and for broadcasting the initial cloud configuration to +all CN and NN agents. + +CONFIGURE commands should be sent in the following cases: + +* At cloud boot time, as a broadcast command. +* For every cloud configuration change. +* Everytime a new agent joins the SSNTP network. + +The [CONFIGURE YAML payload] +(https://github.com/01org/ciao/blob/master/payloads/configure.go) +always includes the full cloud configuration and not only changes +compared to the last CONFIGURE command sent. + +``` ++-----------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted payload | +| | | (0x0) | (0x9) | | | ++-----------------------------------------------------------------------------+ +``` + +### SSNTP STATUS frames ### + +There are 5 different SSNTP STATUS frames: + +#### CONNECTED #### +CONNECTED is sent by SSNTP servers back to a client to notify it +that the connection successfully completed. + +From the CONNECTED frame the client will gather 2 pieces of +information: + +1. The server UUID. This UUID will be used as the destination UUID + for every frame the client sends going forward. +2. The server Role. The client must verify that the server TLS + certificate extended key usages attributes match the advertise + server Role. If it does not, the client must discard and close + the TLS connection to the server. + +The CONNECTED frame is payload less: + +``` ++-----------------------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Role | Server UUID | Client UUID | +| | | (0x1) | (0x0) | (bitmask of server roles) | | | ++-----------------------------------------------------------------------------------------+ +``` + +#### READY #### +SSNTP compute node Agents send READY status frames to let the +Scheduler know that: + +1. Their CN capacity has changed. The new capacity is described + in the [READY YAML payload] + (https://github.com/01org/ciao/blob/master/payloads/ready.go). + This is the main piece of information the Scheduler uses to + make its instances scheduling decisions. +2. They are ready to take further commands, and in particular to + start new workloads on the CN they manage. It is important to + note that a Scheduler should not send a new START commands to + a given Agent until it receives the next READY status frame + from it. + Some Scheduler implementations may implement opportunistic + heuristics and send several START commands after receiving a + STATUS frame, by forecasting CN capacities based on the START + command payloads they previously sent. This allow them to + reach shorter average instances startup times at the risk of + hitting higher than expected cloud overcommit ratios. + +The READY status payload is almost a subset of the STATS command +one as it does describe the CN capacity status without providing +any details about the currently running instances. There are +several differences between READY and STATS: + +* READY frames are asynchronous while STATS frames are periodic. + Agent implementations will typically send READY status to the + Scheduler after successfully starting a new instance on the CN + while they send STATS command frames to the Controller every so often. +* READY frames are typically much smaller than STATS ones as their + payload does not contain any instance related status. On CNs + running thousands of instances, STATS payloads can be + significantly larger than READY ones. +* Sending a STATS command does explicitly provide information + about the Agent's readiness to process any further instance + related commands. For example, an Agent may be busy starting an + instance while at the same time sending a STATS command. + +As a consequence SSNTP compute node Agents must use the READY and +FULL status frames, rather than STATs frames, to notify the scheduler +about their availability and capacity. + +The READY status frame payload is mandatory: + +``` ++----------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted compute | +| | | (0x1) | (0x1) | | node new capacity | ++----------------------------------------------------------------------------+ +``` + +#### FULL #### +Whenever the CN they manage runs out of capacity, SSNTP Agents +must send a FULL status frame to the Scheduler. + +The Scheduler must not send any START command to an Agent whose latest status is +reported to be FULL. FULL Agents who receive such commands should reply +with an SSNTP error frame to the Scheduler. +The error code should be StartFailure (0x2) + +The Scheduler may decide to resume sending START commands to a +FULL Agent after receiving the next READY status frame from it. +Any SSNTP command except for the START and CONNECT ones can be +sent to a FULL Agent. + +The FULL status frame is payloadless: + +``` ++---------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | +| | | (0x1) | (0x2) | (0x0) | ++---------------------------------------------------+ +``` + +#### OFFLINE #### +OFFLINE is a compute node status frame sent by SSNTP Agents to let +the Scheduler know that although they're running and still +connected to the SSNTP network, they are not ready to process any +kind of SSNTP commands. Agents should reply with a SSNTP error +frame to any received frame while they are OFFLINE. + +The Scheduler should forward OFFLINE status frames to the Controller +for it to immediately know about a CN not being able to process any +further commands. + +SSNTP Agents in OFFLINE mode should continue sending periodic +STATS frame. + +The OFFLINE status frame is payloadless: + +``` ++---------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | +| | | (0x1) | (0x3) | (0x0) | ++---------------------------------------------------+ +``` + +#### MAINTENANCE ### +TBD +``` ++-----------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted payload | +| | | (0x1) | (0x4) | | | ++-----------------------------------------------------------------------------+ +``` + +### SSNTP EVENT frames ### + +Unlike STATUS frames, EVENT frames are not necessarily related to +a particular compute node's status. They allow SSNTP entities to +notify each other about important events. + +There are 6 different SSNTP EVENT frames: TenantAdded, +TenantRemoved, InstanceDeleted, ConcentratorInstanceAdded, +PublicIPAssigned and TraceReport. + +#### TenantAdded #### +TenantAdded is used by CN Agents to notify Networking +Agents that the first workload for a given tenant has just started. +Networking agents need to be notified about this so that they can +forward the notification to the right CNCI (Compute Node Concentrator Instance), +i.e. the CNCI running the tenant workload. + +A [TenantAdded event payload] +(https://github.com/01org/ciao/blob/master/payloads/tenantadded.go) +is a YAML formatted one containing the tenant, the agent +and the concentrator instance (CNCI) UUID, the tenant subnet, +the agent and the CNCI IPs, the subnet key and the CNCI MAC. + +The Scheduler receives TenantAdded events from the CN Agent +and must forward them to the appropriate CNCI Agent. + +``` ++---------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted tenant | +| | | (0x3) | (0x0) | | information | ++---------------------------------------------------------------------------+ +``` + +#### TenantRemoved #### +TenantRemoved is used by CN Agents to notify Networking +Agents that the last workload for a given tenant has just +terminated. Networking agents need to be notified about +it so that they can forward it to the right CNCI (Compute +Node Concentrator Instance), i.e. the CNCI running the +tenant workload. + +A [TenantRemoved event payload] +(https://github.com/01org/ciao/blob/master/payloads/tenantadded.go) +is a YAML formatted one containing the tenant, the agent +and the concentrator instance (CNCI) UUID, the tenant subnet, +the agent and the CNCI IPs, and the subnet key. + +The Scheduler receives TenantRemoved events from the CN Agent +and must forward them to the appropriate CNCI Agent. + +``` ++---------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted tenant | +| | | (0x3) | (0x1) | | information | ++---------------------------------------------------------------------------+ +``` + +#### InstanceDeleted #### +InstanceDeleted is sent by workload agents to notify +the scheduler and the Controller that a previously running +instance has been deleted. +While the scheduler and the Controller could infer that piece +of information from the next STATS command (The deleted +instance would no longer be there) it is safer, simpler +and less error prone to explicitely send this event. + +A [InstanceDeleted event payload] +(https://github.com/01org/ciao/blob/master/payloads/instancedeleted.go) +is a YAML formatted one containing the deleted instance UUID. + +The Scheduler receives InstanceDeleted events from the +payload agents and must forward them to the Controller. + +``` ++---------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted tenant | +| | | (0x3) | (0x2) | | information | ++---------------------------------------------------------------------------+ +``` + +#### ConcentratorInstanceAdded #### +Networking node agents send this event to the Scheduler +to notify the SSNTP network that a networking concentrator +instance (CNCI) is now running on this node. +A CNCI handles the GRE tunnel concentrator for a given +tenant. Each instance started by this tenant will have a +GRE tunnel established between it and the CNCI allowing all +instances for a given tenant to be on the same private +network. + +The Scheduler must forward that event to all Controllers. The Controller +needs to know about it as it will fetch the CNCI IP and the +tenant UUID from this event's payload and pass that through +the START payload when scheduling a new instance for this +tenant. A tenant instances can not be scheduled until Controller gets +a ConcentratorInstanceAdded event as instances will be +isolated as long as the CNCI for this tenant is not running. + +A [ConcentratorInstanceAdded event payload] +(https://github.com/01org/ciao/blob/master/payloads/concentratorinstanceadded.go) +is a YAML formatted one containing the CNCI IP and the tenant +UUID on behalf of which the CNCI runs. + +``` ++---------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted CNCI | +| | | (0x3) | (0x3) | | information | ++---------------------------------------------------------------------------+ +``` + +#### PublicIPAssigned #### +Networking concentrator instances (CNCI) send PublicIPAssigned +to the Scheduler when they successfully assigned a public IP +to a given instance. +The public IP can either come from a Controller pre-allocated pool, +or from a control network DHCP server. + +The Scheduler must forward those events to the Controller. + +The [PublicIPAssigned event payload] +(https://github.com/01org/ciao/blob/master/payloads/concentratorinstanceadded.go) +contains the newly assigned public IP, the instance private IP, +the instance UUID and the concentrator UUID. + +``` ++--------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted | +| | | (0x3) | (0x4) | | payload | ++--------------------------------------------------------------------+ +``` + +#### TraceReport #### +Any SSNTP entity can decide to send a TraceReport event in order +to let the CIAO controller know about any kind of frame traces. + +It is then up to the Controller to interpret and store those traces. + +The [TraveReport event payload] +(https://github.com/01org/ciao/blob/master/payloads/tracereport.go) +contains a set of frame traces. + +``` ++----------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted payload | +| | | (0x3) | (0x5) | | | ++----------------------------------------------------------------------------+ +``` + +### SSNTP ERROR frames ### +SSNTP being a fully asynchronous protocol, SSNTP entities are +not expecting specific frames to be acknowledged or rejected. +Instead they must be ready to receive asynchronous error +frames notifying them about an application level error, not +a frame level one. + +There are 7 different SSNTP ERROR frames: + +#### InvalidFrameType #### +When a SSNTP entity receives a frame whose type it does not +support, it should send an InvalidFrameType error back +to the sender. + +The [InvalidFrameType error payload] +(https://github.com/01org/ciao/blob/master/payloads/invalidframetype.go) +only contains the SSNTP frame type that the receiver could +not process: + +``` ++-----------------------------------------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | Source UUID | Destination UUID | YAML formatted frame | +| | | (0x4) | (0x0) | | | | type information | ++-----------------------------------------------------------------------------------------------------------+ +``` + +#### StartFailure #### +The StartFailure SSNTP error frames must be sent when an +instance could not be started. For example: + +* The Scheduler receives a START command from the Controller but + all its CN Agents are busy or full. In that case the + Scheduler must send a StartFailure error frame back to + the Controller + +* An Agent receives a START command from the Scheduler but + it cannot start the instance. This could happen for many + reasons: + * Malformed START YAML payload + * Compute node is full + In that case the Agent must send a StartFailure error + frame back to the Scheduler and the Scheduler must forward + it to the Controller. + +The [StartFailure YAML payload] +(https://github.com/01org/ciao/blob/master/payloads/startfailure.go) +contains the instance UUID that failed to be started together +with an additional error string. + +``` ++--------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted frame | +| | | (0x4) | (0x1) | | error information | ++--------------------------------------------------------------------------+ +``` + +#### StopFailure #### +When the Controller client needs to stop a running instance on a given CN, +it sends a STOP SSNTP command to the Scheduler. The STOP command +payload contains the instance UUID and the CN Agent UUID where that +instance is running. + +* If the Scheduler can no longer find the CN Agent, it must send + a StopFailure error frame back to the Controller. + +* If the CN Agent cannot stop the instance because, for example, it + is no longer running, it must send a StopFailure error frame back + to the Scheduler and the Scheduler must forward it to the Controller. + +The [StopFailure YAML payload] +(https://github.com/01org/ciao/blob/master/payloads/startfailure.go) +contains the instance UUID that failed to be stopped together +with an additional error string. + +``` ++--------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted frame | +| | | (0x4) | (0x2) | | error information | ++--------------------------------------------------------------------------+ +``` + +#### ConnectionFailure #### +Both SSNTP clients and servers can send a ConnectionFailure error +frame when the initial connection could not be completed but should +be retried. ConnectionFailure is not a fatal error but represents +a transient connection error. + +The ConnectionFailure error frame is payloadless: + +``` ++---------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | +| | | (0x4) | (0x3) | (0x0) | ++---------------------------------------------------+ +``` + +#### DeleteFailure #### +When the Controller client wants to delete a stopped instance on a given CN, +it sends a DELETE SSNTP command to the Scheduler. + +* If the Scheduler can no longer find the CN Agent, it must send + a DeleteFailure error frame back to the Controller. + +* If the CN Agent cannot delete the instance because, for example, it + is no longer present, it must send a DeleteFailure error frame back + to the Scheduler and the Scheduler must forward it to the Controller. + +The [DeleteFailure YAML payload] +(https://github.com/01org/ciao/blob/master/payloads/deletefailure.go) +contains the instance UUID that failed to be stopped together +with an additional error string. +``` ++--------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted frame | +| | | (0x4) | (0x4) | | error information | ++--------------------------------------------------------------------------+ +``` + +#### RestartFailure #### +When the Controller client wants to restart a stopped instance on a given CN, +it sends a RESTART SSNTP command to the Scheduler. + +* If the Scheduler can no longer find the CN Agent, it must send + a RestartFailure error frame back to the Controller. + +* If the CN Agent cannot restart the instance because, for example, it + is no longer present, it must send a RestartFailure error frame back + to the Scheduler and the Scheduler must forward it to the Controller. + +The [RestartFailure YAML payload] +(https://github.com/01org/ciao/blob/master/payloads/startfailure.go) +contains the instance UUID that failed to be stopped together +with an additional error string. +``` ++--------------------------------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | YAML formatted frame | +| | | (0x4) | (0x5) | | error information | ++--------------------------------------------------------------------------+ +``` + +#### ConnectionAborted #### +Both SSNTP clients and servers can send a ConnectionAborted error +frame when either the CONNECT command frame or the CONNECTED status +frame contain an advertised role that does not match the peer's +certificate extended key usage attribute. + +Sending ConnectionAborted means that for security reasons the connection +will not be retried. + +The ConnectionAborted error frame is payloadless: +``` ++---------------------------------------------------+ +| Major | Minor | Type | Operand | Payload Length | +| | | (0x4) | (0x6) | (0x0) | ++---------------------------------------------------+ +``` diff --git a/ssntp/TODO b/ssntp/TODO new file mode 100644 index 000000000..d21a4964e --- /dev/null +++ b/ssntp/TODO @@ -0,0 +1,8 @@ +- Append to slice when creating command and status frames +- Fuzzy testing +- Real version check +- Server reliability: + * Extended CONNECTED frame for passing alternative servers IP. +- Fix go lint +- Connection/disconnection event forwarding +- TLS hearbeat implementation diff --git a/ssntp/ciao-cert/generate_ciao_cert.go b/ssntp/ciao-cert/generate_ciao_cert.go new file mode 100644 index 000000000..67321950e --- /dev/null +++ b/ssntp/ciao-cert/generate_ciao_cert.go @@ -0,0 +1,285 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Initial implementation based on +// golang/src/pkg/crypto/tls/generate_cert.go +// +// which is: +// +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the golang LICENSE file. +// + +package main + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "flag" + "fmt" + "github.com/01org/ciao/ssntp" + "io/ioutil" + "log" + "math/big" + "net" + "os" + "strings" + "time" +) + +var ( + host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for") + serverCert = flag.String("server-cert", "", "Server certificate for signing a client one") + isServer = flag.Bool("server", false, "Whether this cert should be a server one") + verify = flag.Bool("verify", false, "Verify client certificate") + isElliptic = flag.Bool("elliptic-key", false, "Use elliptic curve algorithms") +) + +func verifyCert(CACert string, certName string) { + bytesServerCert, err := ioutil.ReadFile(CACert) + if err != nil { + log.Printf("Could not load [%s] %s", CACert, err) + } + + bytesClientCert, err := ioutil.ReadFile(certName) + if err != nil { + log.Printf("Could not load [%s] %s", certName, err) + } + + blockClient, _ := pem.Decode(bytesClientCert) + certClient, err := x509.ParseCertificate(blockClient.Bytes) + if err != nil { + log.Printf("Could not parse [%s] %s", certName, err) + } + + roots := x509.NewCertPool() + ok := roots.AppendCertsFromPEM(bytesServerCert) + if !ok { + log.Printf("Could not add CA cert to poll") + } + + opts := x509.VerifyOptions{ + Roots: roots, + } + + if _, err = certClient.Verify(opts); err != nil { + log.Printf("Failed to verify certificate: %s", err) + } +} + +func publicKey(priv interface{}) interface{} { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &k.PublicKey + case *ecdsa.PrivateKey: + return &k.PublicKey + default: + return nil + } +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *ecdsa.PrivateKey: + b, err := x509.MarshalECPrivateKey(k) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err) + os.Exit(2) + } + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + return nil + } +} + +func instructionDisplay(server bool, CAcert string, Cert string, serverCert string) { + if server { + fmt.Printf("--------------------------------------------------------\n") + fmt.Printf("CA certificate: [%s]\n", CAcert) + fmt.Printf("Server certificate: [%s]\n", Cert) + fmt.Printf("--------------------------------------------------------\n") + fmt.Printf("You should now copy \"%s\" and \"%s\" ", CAcert, Cert) + fmt.Printf("to a safe location of your choice, and pass them to your ") + fmt.Printf("SSNTP server through its Config CAcert and Cert fields.\n") + } else { + fmt.Printf("--------------------------------------------------------\n") + fmt.Printf("Server certificate: [%s]\n", serverCert) + fmt.Printf("Client certificate: [%s]\n", Cert) + fmt.Printf("--------------------------------------------------------\n") + fmt.Printf("You should now copy \"%s\" and \"%s\" ", Cert, serverCert) + fmt.Printf("to a safe location of your choice, and pass them to your ") + fmt.Printf("SSNTP client through its Config CAcert and Cert fields.\n") + } +} + +func main() { + var priv, serverPrivKey interface{} + var err error + var CAcertName, certName string + var parentCert x509.Certificate + var role ssntp.Role + + flag.Var(&role, "role", "SSNTP role [agent, scheduler, Controller, netagent, server, cnciagent]") + flag.Parse() + + flag.Parse() + + if len(*host) == 0 { + log.Fatalf("Missing required --host parameter") + } + + if *isServer == false && len(*serverCert) == 0 { + log.Fatalf("Missing required --server-cert parameter") + } + + if *isElliptic == false { + priv, err = rsa.GenerateKey(rand.Reader, 2048) + } else { + priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + } + if err != nil { + log.Fatalf("failed to generate private key: %s", err) + } + + notBefore := time.Now() + notAfter := notBefore.Add(365 * 24 * time.Hour) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + log.Fatalf("failed to generate serial number: %s", err) + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Intel/SSG/OTC"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + EmailAddresses: []string{"ciao@lists.01.org"}, + BasicConstraintsValid: true, + } + + hosts := strings.Split(*host, ",") + firstHost := hosts[0] + for _, h := range hosts { + if ip := net.ParseIP(h); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, h) + } + } + + switch role { + case ssntp.AGENT: + template.UnknownExtKeyUsage = append(template.UnknownExtKeyUsage, ssntp.RoleAgentOID) + case ssntp.SCHEDULER: + template.UnknownExtKeyUsage = append(template.UnknownExtKeyUsage, ssntp.RoleSchedulerOID) + case ssntp.Controller: + template.UnknownExtKeyUsage = append(template.UnknownExtKeyUsage, ssntp.RoleControllerOID) + case ssntp.NETAGENT: + template.UnknownExtKeyUsage = append(template.UnknownExtKeyUsage, ssntp.RoleNetAgentOID) + case ssntp.SERVER: + template.UnknownExtKeyUsage = append(template.UnknownExtKeyUsage, ssntp.RoleServerOID) + case ssntp.CNCIAGENT: + template.UnknownExtKeyUsage = append(template.UnknownExtKeyUsage, ssntp.RoleCNCIAgentOID) + default: + break + } + + if *isServer == true { + template.IsCA = true + CAcertName = fmt.Sprintf("CAcert-server-%s.pem", firstHost) + certName = fmt.Sprintf("cert-server-%s.pem", firstHost) + parentCert = template + serverPrivKey = priv + } else { + certName = fmt.Sprintf("cert-client-%s.pem", firstHost) + // Need to fetch the public and private key from the signer + bytesCert, err := ioutil.ReadFile(*serverCert) + if err != nil { + log.Fatal("Could not load %s", *serverCert) + } + + // Parent public key first + certBlock, rest := pem.Decode(bytesCert) + if certBlock == nil { + } + cert, err := x509.ParseCertificate(certBlock.Bytes) + if err != nil { + log.Fatalf("Could not parse %s %s", *serverCert, err) + } + parentCert = *cert + + // Parent private key + privKeyBlock, _ := pem.Decode(rest) + if privKeyBlock == nil { + log.Fatalf("Invalid server certificate %s", certName) + } + if *isElliptic == false { + serverPrivKey, err = x509.ParsePKCS1PrivateKey(privKeyBlock.Bytes) + } else { + serverPrivKey, err = x509.ParseECPrivateKey(privKeyBlock.Bytes) + } + if err != nil { + log.Fatalf("Could not get server private key %s", err) + } + } + + // The certificate is created + // Self signed for the server case + // Signed by --server-cert for the client case + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &parentCert, publicKey(priv), serverPrivKey) + if err != nil { + log.Fatalf("Failed to create certificate: %s", err) + } + + // Create CA certificate, i.e. the server public key + if *isServer == true { + CAcertOut, err := os.Create(CAcertName) + if err != nil { + log.Fatalf("failed to open %s for writing: %s", certName, err) + } + pem.Encode(CAcertOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + CAcertOut.Close() + } + + // Create certificate: Concatenate public and private key + certOut, err := os.Create(certName) + if err != nil { + log.Fatalf("failed to open %s for writing: %s", certName, err) + } + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + pem.Encode(certOut, pemBlockForKey(priv)) + certOut.Close() + + if *isServer == false && *verify == true { + verifyCert(*serverCert, certName) + } + + instructionDisplay(*isServer, CAcertName, certName, *serverCert) +} diff --git a/ssntp/client.go b/ssntp/client.go new file mode 100644 index 000000000..bc5d652e8 --- /dev/null +++ b/ssntp/client.go @@ -0,0 +1,480 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ssntp + +import ( + "crypto/tls" + "fmt" + "github.com/docker/distribution/uuid" + "math/rand" + "time" +) + +// ClientNotifier is the SSNTP client notification interface. +// Any SSNTP client must implement this interface. +type ClientNotifier interface { + // ConnectNotify notifies of a successful connection to an SSNTP server. + // This notification is mostly useful for clients to know when they're + // being re-connected to the SSNTP server. + ConnectNotify() + + // DisconnectNotify notifies of a SSNTP server disconnection. + // SSNTP Client implementations are not supposed to explicitely + // reconnect, the SSNTP protocol will handle the reconnection. + DisconnectNotify() + + // StatusNotify notifies of a pending status frame from the SSNTP server. + StatusNotify(status Status, frame *Frame) + + // CommandNotify notifies of a pending command frame from the SSNTP server. + CommandNotify(command Command, frame *Frame) + + // EventNotify notifies of a pending event frame from the SSNTP server. + EventNotify(event Event, frame *Frame) + + // ErrorNotify notifies of a pending error frame from the SSNTP server. + // Error frames are always related to the last sent frame. + ErrorNotify(error Error, frame *Frame) +} + +// Client is the SSNTP client structure. +// This is an SSNTP client handle to connect to and +// disconnect from an SSNTP server, and send SSNTP +// frames to it. +// It is an entirely opaque structure, only accessible through +// its public methods. +type Client struct { + uuid uuid.UUID + lUUID lockedUUID + uris []string + role uint32 + roleVerify bool + tls *tls.Config + ntf ClientNotifier + transport string + port uint32 + session *session + status connectionStatus + closed chan struct{} + + log Logger + + trace *TraceConfig +} + +func handleSSNTPServer(client *Client) { + defer client.Close() + + for { + client.ntf.ConnectNotify() + + for { + client.log.Infof("Waiting for next frame\n") + + var frame Frame + err := client.session.Read(&frame) + if err != nil { + client.status.Lock() + if client.status.status == ssntpClosed { + client.status.Unlock() + return + } + client.status.Unlock() + + client.log.Errorf("Read error: %s\n", err) + client.ntf.DisconnectNotify() + break + } + + switch (Type)(frame.Type) { + case COMMAND: + client.ntf.CommandNotify((Command)(frame.Operand), &frame) + case STATUS: + client.ntf.StatusNotify((Status)(frame.Operand), &frame) + case EVENT: + client.ntf.EventNotify((Event)(frame.Operand), &frame) + case ERROR: + client.ntf.ErrorNotify((Error)(frame.Operand), &frame) + default: + client.SendError(InvalidFrameType, nil) + } + } + + err := client.attemptDial() + if err != nil { + client.log.Errorf("%s", err) + return + } + } +} + +func (client *Client) sendConnect() (bool, error) { + var connected ConnectFrame + client.log.Infof("Sending CONNECT\n") + + connect := client.session.connectFrame() + _, err := client.session.Write(connect) + if err != nil { + return true, err + } + + client.log.Infof("Waiting for CONNECTED\n") + err = client.session.Read(&connected) + if err != nil { + return true, err + } + + client.log.Infof("Received CONNECTED frame:\n%s\n", connected) + + switch connected.Type { + case STATUS: + if connected.Operand != (uint8)(CONNECTED) { + return true, fmt.Errorf("SSNTP Client: Invalid Connected frame") + } + case ERROR: + if connected.Operand != (uint8)(ConnectionFailure) { + return false, fmt.Errorf("SSNTP Client: Connection failure") + } + + return true, fmt.Errorf("SSNTP Client: Connection error %s\n", (Error)(connected.Operand)) + + default: + return true, fmt.Errorf("SSNTP Client: Unknown frame type %d", connected.Type) + } + + client.session.setDest(connected.Source[:16]) + if client.roleVerify == true { + oidFound, err := verifyRole(client.session.conn, connected.Role) + if oidFound == false { + fmt.Printf("%s\n", err) + client.SendError(ConnectionFailure, nil) + return false, fmt.Errorf("SSNTP Client: Connection failure") + } + } + + client.status.Lock() + client.status.status = ssntpConnected + client.status.Unlock() + + client.log.Infof("Done with connection\n") + + return true, nil +} + +func (client *Client) attemptDial() error { + delays := []int64{5, 10, 20, 40} + + if len(client.uris) == 0 { + return fmt.Errorf("No servers to connect to") + } + + client.status.Lock() + client.closed = make(chan struct{}) + client.status.Unlock() + + source := rand.NewSource(time.Now().UnixNano()) + r := rand.New(source) + + for { + URILoop: + for _, uri := range client.uris { + for d := 0; ; d++ { + client.log.Infof("%s connecting to %s\n", client.uuid, uri) + conn, err := tls.Dial(client.transport, uri, client.tls) + + client.status.Lock() + if client.status.status == ssntpClosed { + client.status.Unlock() + return fmt.Errorf("Connection closed") + } + client.status.Unlock() + + if err != nil { + client.log.Infof("Dial failed %s\n", err.Error()) + + delay := r.Int63n(delays[d%len(delays)]) + delay++ // Avoid waiting for 0 seconds + client.log.Errorf("Could not connect to %s (%s) - retrying in %d seconds\n", uri, err, delay) + + // Wait for delay before reconnecting or return if the client is closed + select { + case <-client.closed: + return fmt.Errorf("Connection closed") + case <-time.After(time.Duration(delay) * time.Second): + break + } + + continue + } + + client.log.Infof("Connected\n") + session := newSession(&client.uuid, client.role, 0, conn) + client.session = session + + break URILoop + + } + } + + if client.session == nil { + continue + } + + reconnect, err := client.sendConnect() + if err != nil { + // Dialed but could not connect, try again + client.log.Errorf("%s", err) + client.Close() + if reconnect == true { + continue + } else { + client.ntf.DisconnectNotify() + return err + } + } + + // Dialed and connected, we can proceed + break + } + + return nil +} + +// Dial attempts to connect to a SSNTP server, as specified by the config argument. +// Dial will try and retry to connect to this server and will wait for it to show +// up if it's temporarily unavailable. A client can be closed while it's still +// trying to connect to the SSNTP server, so that one can properly kill a client if +// e.g. no server will ever come alive. +// Once connected a separate routine will listen for server commands, statuses or +// errors and report them back through the SSNTP client notifier interface. +func (client *Client) Dial(config *Config, ntf ClientNotifier) error { + if config == nil { + return fmt.Errorf("SSNTP config missing") + } + + client.status.Lock() + + if client.status.status == ssntpConnected || client.status.status == ssntpConnecting { + client.status.Unlock() + return fmt.Errorf("Client already connected") + } + + if client.status.status == ssntpClosed { + client.status.Unlock() + return fmt.Errorf("Client already closed") + } + + client.status.status = ssntpConnecting + + client.status.Unlock() + + if len(config.UUID) == 0 { + var err error + client.lUUID, err = newUUID("client", config.Role) + if err != nil { + fmt.Printf("SSNTP ERROR: Client: Could not fetch a UUID, generating a random one (%s)\n", err) + client.uuid = uuid.Generate() + } else { + client.uuid = client.lUUID.uuid + } + } else { + uuid, _ := uuid.Parse(config.UUID) + client.uuid = uuid + } + + if config.Port != 0 { + client.port = config.Port + } else { + client.port = port + } + + if len(config.URI) == 0 { + client.uris = append(client.uris, fmt.Sprintf("%s:%d", defaultURL, client.port)) + } else { + client.uris = append(client.uris, fmt.Sprintf("%s:%d", config.URI, client.port)) + } + + if len(config.Transport) == 0 { + client.transport = "tcp" + } else { + if config.Transport != "tcp" && config.Transport != "unix" { + client.transport = "tcp" + } else { + client.transport = config.Transport + } + } + + client.role = config.Role + client.roleVerify = config.RoleVerification + + if len(config.CAcert) == 0 { + config.CAcert = defaultCA + } + + if len(config.Cert) == 0 { + config.Cert = defaultClientCert + } + + if config.Log == nil { + client.log = errLog + } else { + client.log = config.Log + } + + client.trace = config.Trace + client.ntf = ntf + client.tls = prepareTLSConfig(config, false) + + err := client.attemptDial() + if err != nil { + client.log.Errorf("%s", err) + return err + } + + go handleSSNTPServer(client) + + return nil +} + +// Close terminates the client connection. +func (client *Client) Close() { + client.status.Lock() + if client.status.status == ssntpClosed { + client.status.Unlock() + return + } + + if client.session != nil { + client.session.conn.Close() + } + client.status.status = ssntpClosed + if client.closed != nil { + close(client.closed) + } + client.status.Unlock() + + freeUUID(client.lUUID) +} + +func (client *Client) sendCommand(cmd Command, payload []byte, trace *TraceConfig) (int, error) { + client.status.Lock() + if client.status.status == ssntpClosed { + client.status.Unlock() + return -1, fmt.Errorf("Client not connected") + } + client.status.Unlock() + + session := client.session + frame := session.commandFrame(cmd, payload, trace) + + return session.Write(frame) +} + +func (client *Client) sendStatus(status Status, payload []byte, trace *TraceConfig) (int, error) { + client.status.Lock() + if client.status.status == ssntpClosed { + client.status.Unlock() + return -1, fmt.Errorf("Client not connected") + } + client.status.Unlock() + + session := client.session + frame := session.statusFrame(status, payload, trace) + + return session.Write(frame) +} + +func (client *Client) sendEvent(event Event, payload []byte, trace *TraceConfig) (int, error) { + client.status.Lock() + if client.status.status == ssntpClosed { + client.status.Unlock() + return -1, fmt.Errorf("Client not connected") + } + client.status.Unlock() + + session := client.session + frame := session.eventFrame(event, payload, trace) + + return session.Write(frame) +} + +func (client *Client) sendError(error Error, payload []byte, trace *TraceConfig) (int, error) { + client.status.Lock() + if client.status.status == ssntpClosed { + client.status.Unlock() + return -1, fmt.Errorf("Client not connected") + } + client.status.Unlock() + + session := client.session + frame := session.errorFrame(error, payload, trace) + + return session.Write(frame) +} + +// SendCommand sends a specific command and its payload to the SSNTP server. +func (client *Client) SendCommand(cmd Command, payload []byte) (int, error) { + return client.sendCommand(cmd, payload, client.trace) +} + +// SendStatus sends a specific status and its payload to the SSNTP server. +func (client *Client) SendStatus(status Status, payload []byte) (int, error) { + return client.sendStatus(status, payload, client.trace) +} + +// SendEvent sends a specific status and its payload to the SSNTP server. +func (client *Client) SendEvent(event Event, payload []byte) (int, error) { + return client.sendEvent(event, payload, client.trace) +} + +// SendError sends an error back to the SSNTP server. +// This is just for notification purposes, to let e.g. the server know that +// it sent an unexpected frame. +func (client *Client) SendError(error Error, payload []byte) (int, error) { + return client.sendError(error, payload, client.trace) +} + +// SendTracedCommand sends a specific command and its payload to the SSNTP server. +// The SSNTP command frame will be traced according to the trace argument. +func (client *Client) SendTracedCommand(cmd Command, payload []byte, trace *TraceConfig) (int, error) { + return client.sendCommand(cmd, payload, trace) +} + +// SendTracedStatus sends a specific status and its payload to the SSNTP server. +// The SSNTP status frame will be traced according to the trace argument. +func (client *Client) SendTracedStatus(status Status, payload []byte, trace *TraceConfig) (int, error) { + return client.sendStatus(status, payload, trace) +} + +// SendTracedEvent sends a specific status and its payload to the SSNTP server. +// The SSNTP event frame will be traced according to the trace argument. +func (client *Client) SendTracedEvent(event Event, payload []byte, trace *TraceConfig) (int, error) { + return client.sendEvent(event, payload, trace) +} + +// SendTracedError sends an error back to the SSNTP server. +// This is just for notification purposes, to let e.g. the server know that +// it sent an unexpected frame. +// The SSNTP error frame will be traced according to the trace argument. +func (client *Client) SendTracedError(error Error, payload []byte, trace *TraceConfig) (int, error) { + return client.sendError(error, payload, trace) +} + +// UUID exports the SSNTP client Universally Unique ID. +func (client *Client) UUID() string { + return client.uuid.String() +} diff --git a/ssntp/example_client_test.go b/ssntp/example_client_test.go new file mode 100644 index 000000000..501aeb984 --- /dev/null +++ b/ssntp/example_client_test.go @@ -0,0 +1,89 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ssntp + +import ( + "fmt" + "time" +) + +type ssntpEchoClient struct { + ssntp Client + name string +} + +func (client *ssntpEchoClient) ConnectNotify() { + fmt.Printf("%s Connected\n", client.name) +} + +func (client *ssntpEchoClient) DisconnectNotify() { + fmt.Printf("%s disconnected\n", client.name) +} + +func (client *ssntpEchoClient) StatusNotify(status Status, frame *Frame) { + n, err := client.ssntp.SendStatus(status, frame.Payload) + if err != nil { + fmt.Printf("%s\n", err) + } + + fmt.Printf("Echoed %d status bytes\n", n) +} + +func (client *ssntpEchoClient) CommandNotify(command Command, frame *Frame) { + n, err := client.ssntp.SendCommand(command, frame.Payload) + if err != nil { + fmt.Printf("%s\n", err) + } + + fmt.Printf("Echoed %d command bytes\n", n) +} + +func (client *ssntpEchoClient) EventNotify(event Event, frame *Frame) { + n, err := client.ssntp.SendEvent(event, frame.Payload) + if err != nil { + fmt.Printf("%s\n", err) + } + + fmt.Printf("Echoed %d event bytes\n", n) +} + +func (client *ssntpEchoClient) ErrorNotify(error Error, frame *Frame) { + fmt.Printf("ERROR %s\n", error) +} + +func ExampleClient_Dial() { + var config Config + + client := &ssntpEchoClient{ + name: "CIAO Agent", + } + + config.URI = "myCIAOserver.local" + config.CAcert = "CIAOCA.crt" + config.Cert = "agent.pem" + config.Role = uint32(AGENT) + + if client.ssntp.Dial(&config, client) != nil { + fmt.Printf("Could not connect to an SSNTP server\n") + return + } + + // Loop and wait for notifications + for { + time.Sleep(time.Duration(10) * time.Second) + } +} diff --git a/ssntp/example_server_test.go b/ssntp/example_server_test.go new file mode 100644 index 000000000..9415bf68b --- /dev/null +++ b/ssntp/example_server_test.go @@ -0,0 +1,99 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ssntp + +import ( + "fmt" +) + +type logger struct{} + +func (l logger) Infof(format string, args ...interface{}) { + fmt.Printf("INFO: SSNTP Server: "+format, args...) +} + +func (l logger) Errorf(format string, args ...interface{}) { + fmt.Printf("ERROR: SSNTP Server: "+format, args...) +} + +func (l logger) Warningf(format string, args ...interface{}) { + fmt.Printf("WARNING: SSNTP Server: "+format, args...) +} + +type ssntpDumpServer struct { + ssntp Server + name string +} + +func (server *ssntpDumpServer) ConnectNotify(uuid string, role uint32) { + fmt.Printf("%s: %s connected (role 0x%x)\n", server.name, uuid, role) +} + +func (server *ssntpDumpServer) DisconnectNotify(uuid string) { + fmt.Printf("%s: %s disconnected\n", server.name, uuid) +} + +func (server *ssntpDumpServer) StatusNotify(uuid string, status Status, frame *Frame) { + fmt.Printf("%s: STATUS %s from %s\n", server.name, status, uuid) +} + +func (server *ssntpDumpServer) CommandNotify(uuid string, command Command, frame *Frame) { + fmt.Printf("%s: COMMAND %s from %s\n", server.name, command, uuid) +} + +func (server *ssntpDumpServer) EventNotify(uuid string, event Event, frame *Frame) { + fmt.Printf("%s: EVENT %s from %s\n", server.name, event, uuid) +} + +func (server *ssntpDumpServer) ErrorNotify(uuid string, error Error, frame *Frame) { + fmt.Printf("%s: ERROR (%s) from %s\n", server.name, error, uuid) +} + +const agentUUID = "3390740c-dce9-48d6-b83a-a717417072ce" + +func (server *ssntpDumpServer) CommandForward(uuid string, command Command, frame *Frame) (dest ForwardDestination) { + dest.AddRecipient(agentUUID) + + return +} + +func ExampleServer_Serve() { + var config Config + + server := &ssntpDumpServer{ + name: "CIAO Echo Server", + } + + config.Log = logger{} + config.CAcert = "MyServer.crt" + config.ForwardRules = []FrameForwardRule{ + + /* All STATS commands forwarded to Controllers. */ + { + Operand: STATS, + Dest: Controller, + }, + + /* For START commands, server.CommandForward will decide where to forward them. */ + { + Operand: START, + CommandForward: server, + }, + } + + server.ssntp.Serve(&config, server) +} diff --git a/ssntp/examples/client.go b/ssntp/examples/client.go new file mode 100644 index 000000000..3f234d313 --- /dev/null +++ b/ssntp/examples/client.go @@ -0,0 +1,155 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build ignore + +package main + +import ( + "flag" + "fmt" + "github.com/01org/ciao/ssntp" + "math/rand" + "os" + "runtime/pprof" + "sync" + "time" +) + +type logger struct{} + +func (l logger) Infof(format string, args ...interface{}) { + fmt.Printf("INFO: Client example: "+format, args...) +} + +func (l logger) Errorf(format string, args ...interface{}) { + fmt.Printf("ERROR: Client example: "+format, args...) +} + +func (l logger) Warningf(format string, args ...interface{}) { + fmt.Printf("WARNING: Client example: "+format, args...) +} + +type ssntpClient struct { + ssntp ssntp.Client + name string + nCommands int +} + +func (client *ssntpClient) ConnectNotify() { + fmt.Printf("%s connected\n", client.name) +} + +func (client *ssntpClient) DisconnectNotify() { + fmt.Printf("%s disconnected\n", client.name) +} + +func (client *ssntpClient) StatusNotify(status ssntp.Status, frame *ssntp.Frame) { + fmt.Printf("STATUS %s for %s\n", status, client.name) +} + +func (client *ssntpClient) CommandNotify(command ssntp.Command, frame *ssntp.Frame) { + client.nCommands++ +} + +func (client *ssntpClient) EventNotify(event ssntp.Event, frame *ssntp.Frame) { + fmt.Printf("EVENT %s for %s\n", event, client.name) +} + +func (client *ssntpClient) ErrorNotify(error ssntp.Error, frame *ssntp.Frame) { + fmt.Printf("ERROR (%s) for %s\n", error, client.name) +} + +func clientThread(config *ssntp.Config, n int, threads int, nFrames int, delay int, wg *sync.WaitGroup, payloadLen int) { + defer wg.Done() + + client := &ssntpClient{ + name: "CIAO module", + nCommands: 0, + } + + payload := make([]byte, payloadLen) + + fmt.Printf("----- Client [%d] delay [%d] frames [%d] payload [%d bytes] -----\n", n, delay, nFrames, payloadLen) + + if threads > 1 { + source := rand.NewSource(time.Now().UnixNano()) + r := rand.New(source) + time.Sleep(time.Duration(r.Int63n(2000)) * time.Millisecond) + } + + if client.ssntp.Dial(config, client) != nil { + fmt.Printf("Could not connect to an SSNTP server\n") + return + } + fmt.Printf("Client [%d]: Connected\n", n) + + sentFrames := 0 + for i := 0; i < nFrames; i++ { + _, err := client.ssntp.SendCommand(ssntp.STATS, payload) + if err != nil { + fmt.Printf("Could not send STATS: %s\n", err) + } + time.Sleep(time.Duration(delay) * time.Millisecond) + if err == nil { + sentFrames++ + } + } + + fmt.Printf("Client [%d]: Done\n", n) + + client.ssntp.Close() + + fmt.Printf("Sent %d commands, received %d\n", sentFrames, client.nCommands) +} + +func main() { + var serverURL = flag.String("url", "localhost", "Server URL") + var cert = flag.String("cert", "/etc/pki/ciao/client.pem", "Client certificate") + var CAcert = flag.String("cacert", "/etc/pki/ciao/ciao_ca_cert.crt", "CA certificate") + var nFrames = flag.Int("frames", 10, "Number of frames to send") + var delay = flag.Int("delay", 500, "Delay(ms) between frames") + var threads = flag.Int("threads", 1, "Number of client threads") + var cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file") + var payloadLen = flag.Int("payload-len", 0, "Frame payload length") + var role ssntp.Role = ssntp.AGENT + var config ssntp.Config + var wg sync.WaitGroup + + flag.Var(&role, "role", "Client role") + flag.Parse() + + config.URI = *serverURL + config.CAcert = *CAcert + config.Cert = *cert + config.Role = uint32(role) + config.Log = logger{} + + if len(*cpuprofile) != 0 { + f, err := os.Create(*cpuprofile) + if err != nil { + fmt.Print(err) + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + for i := 0; i < *threads; i++ { + wg.Add(1) + go clientThread(&config, i, *threads, *nFrames, *delay, &wg, *payloadLen) + } + + wg.Wait() +} diff --git a/ssntp/examples/server.go b/ssntp/examples/server.go new file mode 100644 index 000000000..6c8295938 --- /dev/null +++ b/ssntp/examples/server.go @@ -0,0 +1,119 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build ignore + +package main + +import ( + "flag" + "fmt" + "github.com/01org/ciao/ssntp" + "os" + "runtime/pprof" +) + +type ssntpEchoServer struct { + ssntp ssntp.Server + name string + nConnections int + nCommands int + nStatuses int + nErrors int +} + +type logger struct{} + +func (l logger) Infof(format string, args ...interface{}) { + fmt.Printf("INFO: Server example: "+format, args...) +} + +func (l logger) Errorf(format string, args ...interface{}) { + fmt.Printf("ERROR: Server example: "+format, args...) +} + +func (l logger) Warningf(format string, args ...interface{}) { + fmt.Printf("WARNING: Server example: "+format, args...) +} + +func (server *ssntpEchoServer) ConnectNotify(uuid string, role uint32) { + server.nConnections++ + fmt.Printf("%s: %s connected (role 0x%x, current connections %d)\n", server.name, uuid, role, server.nConnections) +} + +func (server *ssntpEchoServer) DisconnectNotify(uuid string) { + server.nConnections-- + fmt.Printf("%s: %s disconnected (current connections %d)\n", server.name, uuid, server.nConnections) +} + +func (server *ssntpEchoServer) StatusNotify(uuid string, status ssntp.Status, frame *ssntp.Frame) { + server.nStatuses++ + fmt.Printf("%s: STATUS (#%d) from %s\n", server.name, server.nStatuses, uuid) + + server.ssntp.SendStatus(uuid, status, frame.Payload) +} + +func (server *ssntpEchoServer) CommandNotify(uuid string, command ssntp.Command, frame *ssntp.Frame) { + server.nCommands++ + + server.ssntp.SendCommand(uuid, command, frame.Payload) +} + +func (server *ssntpEchoServer) EventNotify(uuid string, event ssntp.Event, frame *ssntp.Frame) { +} + +func (server *ssntpEchoServer) ErrorNotify(uuid string, error ssntp.Error, frame *ssntp.Frame) { + server.nErrors++ + fmt.Printf("%s: ERROR (#%d)from %s\n", server.name, server.nErrors, uuid) +} + +func main() { + var cert = flag.String("cert", "/etc/pki/ciao/client.pem", "Client certificate") + var CAcert = flag.String("cacert", "/etc/pki/ciao/ciao_ca_cert.crt", "CA certificate") + var cpuprofile = flag.String("cpuprofile", "", "Write cpu profile to file") + var config ssntp.Config + + flag.Parse() + server := &ssntpEchoServer{ + name: "CIAO Echo Server", + nConnections: 0, + nCommands: 0, + nStatuses: 0, + nErrors: 0, + } + + if len(*cpuprofile) != 0 { + f, err := os.Create(*cpuprofile) + if err != nil { + fmt.Print(err) + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + config.Log = logger{} + config.CAcert = *CAcert + config.Cert = *cert + // config.DebugInterface = true + // Forward STATS to all Controllers + config.ForwardRules = []ssntp.FrameForwardRule{ + { + Operand: ssntp.STATS, + Dest: ssntp.Controller, + }, + } + + server.ssntp.Serve(&config, server) +} diff --git a/ssntp/forward.go b/ssntp/forward.go new file mode 100644 index 000000000..916024ac8 --- /dev/null +++ b/ssntp/forward.go @@ -0,0 +1,349 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ssntp + +import ( + "sync" +) + +// ForwardDecision tells SSNTP how it should forward a frame. +// Callers set that value as part of the ForwardDestination +// structure. +type ForwardDecision uint8 + +const ( + // Forward the frame. The recipients are defined by the ForwardDecision + // UUIDs field. + Forward ForwardDecision = iota + + // Discard the frame. The frame will be discarded by SSNTP. + Discard + + // Queue the frame. SSNTP will queue the frame and the caller will have to call + // into the SSNTP Server queueing API to fetch it back. + Queue +) + +// ForwardDestination is returned by the forwading interfaces +// and allows the interface implementer to let SSNTP know what +// to do next with a received frame. +// The interface implementer needs to specify if the frame +// should be forwarded, discarded or queued (Decision). +// If the implementer decision is to forward the frame, it +// should also provide a list of recipients to forward it to (UUIDs) +type ForwardDestination struct { + decision ForwardDecision + recipientUUIDs []string +} + +// AddRecipient adds a recipient to a ForwardDestination structure. +// AddRecipient implicitely sets the forwarding decision to Forward +// since adding a recipient means the frame must be forwarded. +func (d *ForwardDestination) AddRecipient(uuid string) { + d.decision = Forward + d.recipientUUIDs = append(d.recipientUUIDs, uuid) +} + +// SetDecision is a helper for setting the ForwardDestination Decision field. +func (d *ForwardDestination) SetDecision(decision ForwardDecision) { + d.decision = decision +} + +// CommandForwarder is the SSNTP Command forwarding interface. +// The uuid argument is the sender's UUID. +type CommandForwarder interface { + CommandForward(uuid string, command Command, frame *Frame) ForwardDestination +} + +// StatusForwarder is the SSNTP Status forwarding interface. +// The uuid argument is the sender's UUID. +type StatusForwarder interface { + StatusForward(uuid string, status Status, frame *Frame) ForwardDestination +} + +// ErrorForwarder is the SSNTP Error forwarding interface. +// The uuid argument is the sender's UUID. +type ErrorForwarder interface { + ErrorForward(uuid string, error Error, frame *Frame) ForwardDestination +} + +// EventForwarder is the SSNTP Event forwarding interface. +// The uuid argument is the sender's UUID. +type EventForwarder interface { + EventForward(uuid string, event Event, frame *Frame) ForwardDestination +} + +// FrameForwardRule defines a forwarding rule for a SSNTP frame. +// The rule creator can either choose to forward this frame to +// all clients playing a specified SSNTP role (Dest), or can return +// a forwarding decision back to SSNTP depending on the frame payload (*Forwarder). +// If a frame forwarder interface implementation is provided, the +// Dest field will be ignored. +type FrameForwardRule struct { + // Operand is the SSNTP frame operand to which this rule applies. + Operand interface{} + + // A frame which operand is Operand will be forwarded to all + // SSNTP clients playing the Dest SSNTP role. + // This field is ignored if a forwarding interface is provided. + Dest Role + + // The SSNTP Command forwarding interface implementation for this SSNTP frame. + CommandForward CommandForwarder + + // The SSNTP Status forwarding interface implementation for this SSNTP frame. + StatusForward StatusForwarder + + // The SSNTP Error forwarding interface implementation for this SSNTP frame. + ErrorForward ErrorForwarder + + // The SSNTP Event forwarding interface implementation for this SSNTP frame. + EventForward EventForwarder +} + +type frameForward struct { + forwardRules []FrameForwardRule + forwardMutex sync.RWMutex + forwardCommandDest map[Command][]*session + forwardErrorDest map[Error][]*session + forwardEventDest map[Event][]*session + forwardStatusDest map[Status][]*session + + forwardCommandFunc map[Command]CommandForwarder + forwardStatusFunc map[Status]StatusForwarder + forwardErrorFunc map[Error]ErrorForwarder + forwardEventFunc map[Event]EventForwarder +} + +func (f *frameForward) init(rules []FrameForwardRule) { + /* TODO Validate rules, e.g. look for duplicates */ + f.forwardCommandDest = make(map[Command][]*session) + f.forwardErrorDest = make(map[Error][]*session) + f.forwardEventDest = make(map[Event][]*session) + f.forwardStatusDest = make(map[Status][]*session) + f.forwardCommandFunc = make(map[Command]CommandForwarder) + f.forwardStatusFunc = make(map[Status]StatusForwarder) + f.forwardErrorFunc = make(map[Error]ErrorForwarder) + f.forwardEventFunc = make(map[Event]EventForwarder) + + f.forwardMutex.Lock() + + for _, r := range rules { + switch op := r.Operand.(type) { + case Command: + if r.CommandForward != nil { + f.forwardCommandFunc[op] = r.CommandForward + } + case Status: + if r.StatusForward != nil { + f.forwardStatusFunc[op] = r.StatusForward + } + case Error: + if r.ErrorForward != nil { + f.forwardErrorFunc[op] = r.ErrorForward + } + case Event: + if r.EventForward != nil { + f.forwardEventFunc[op] = r.EventForward + } + } + } + + f.forwardMutex.Unlock() +} + +func (f *frameForward) addForwardDestination(session *session) { + f.forwardMutex.Lock() + + for _, r := range f.forwardRules { + if r.Dest == UNKNOWN { + continue + } + + switch op := r.Operand.(type) { + case Command: + if session.destRole&(uint32)(r.Dest) == (uint32)(r.Dest) { + f.forwardCommandDest[op] = append(f.forwardCommandDest[op], session) + } + case Status: + if session.destRole&(uint32)(r.Dest) == (uint32)(r.Dest) { + f.forwardStatusDest[op] = append(f.forwardStatusDest[op], session) + } + case Error: + if session.destRole&(uint32)(r.Dest) == (uint32)(r.Dest) { + f.forwardErrorDest[op] = append(f.forwardErrorDest[op], session) + } + case Event: + if session.destRole&(uint32)(r.Dest) == (uint32)(r.Dest) { + f.forwardEventDest[op] = append(f.forwardEventDest[op], session) + } + } + } + + f.forwardMutex.Unlock() +} + +func (f *frameForward) deleteForwardDestination(dest *session) { + var sessions []*session + + f.forwardMutex.Lock() + + for _, r := range f.forwardRules { + switch op := r.Operand.(type) { + case Command: + sessions = f.forwardCommandDest[op] + for i, s := range sessions { + if s != dest { + continue + } + + f.forwardCommandDest[op] = append(sessions[:i], sessions[i+1:]...) + break + } + case Status: + sessions = f.forwardStatusDest[op] + for i, s := range sessions { + if s != dest { + continue + } + + f.forwardStatusDest[op] = append(sessions[:i], sessions[i+1:]...) + break + } + case Error: + sessions = f.forwardErrorDest[op] + for i, s := range sessions { + if s != dest { + continue + } + + f.forwardErrorDest[op] = append(sessions[:i], sessions[i+1:]...) + break + } + case Event: + sessions = f.forwardEventDest[op] + for i, s := range sessions { + if s != dest { + continue + } + + f.forwardEventDest[op] = append(sessions[:i], sessions[i+1:]...) + break + } + } + } + + f.forwardMutex.Unlock() +} + +func forwardDestination(destination ForwardDestination, server *Server, frame *Frame) { + /* TODO Handle queueing */ + if destination.decision == Discard || destination.recipientUUIDs == nil { + return + } + + server.sessionMutex.RLock() + for _, uuid := range destination.recipientUUIDs { + session := server.sessions[uuid] + if session == nil { + continue + } + + session.Write(frame) + } + server.sessionMutex.RUnlock() +} + +func commandForward(uuid string, f CommandForwarder, cmd Command, server *Server, frame *Frame) { + dest := f.CommandForward(uuid, cmd, frame) + + forwardDestination(dest, server, frame) +} + +func statusForward(uuid string, f StatusForwarder, status Status, server *Server, frame *Frame) { + dest := f.StatusForward(uuid, status, frame) + + forwardDestination(dest, server, frame) +} + +func errorForward(uuid string, f ErrorForwarder, error Error, server *Server, frame *Frame) { + dest := f.ErrorForward(uuid, error, frame) + + forwardDestination(dest, server, frame) +} + +func eventForward(uuid string, f EventForwarder, event Event, server *Server, frame *Frame) { + dest := f.EventForward(uuid, event, frame) + + forwardDestination(dest, server, frame) +} + +func (f *frameForward) forwardFrame(server *Server, source *session, operand interface{}, frame *Frame) { + var sessions []*session + src := source.dest.String() + + f.forwardMutex.RLock() + defer f.forwardMutex.RUnlock() + + switch op := operand.(type) { + case Command: + forwarder := f.forwardCommandFunc[op] + if forwarder != nil { + go commandForward(src, forwarder, op, server, frame) + return + } + + sessions = f.forwardCommandDest[op] + case Status: + forwarder := f.forwardStatusFunc[op] + if forwarder != nil { + go statusForward(src, forwarder, op, server, frame) + return + } + + sessions = f.forwardStatusDest[op] + case Error: + forwarder := f.forwardErrorFunc[op] + if forwarder != nil { + go errorForward(src, forwarder, op, server, frame) + return + } + + sessions = f.forwardErrorDest[op] + case Event: + forwarder := f.forwardEventFunc[op] + if forwarder != nil { + go eventForward(src, forwarder, op, server, frame) + return + } + + sessions = f.forwardEventDest[op] + default: + sessions = nil + } + + if sessions == nil { + return + } + + for _, s := range sessions { + if s == source { + continue + } + s.Write(frame) + } +} diff --git a/ssntp/frame.go b/ssntp/frame.go new file mode 100644 index 000000000..7f5f66658 --- /dev/null +++ b/ssntp/frame.go @@ -0,0 +1,259 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ssntp + +import ( + "fmt" + "github.com/docker/distribution/uuid" + "time" + + "github.com/01org/ciao/payloads" +) + +// TraceConfig is the SSNTP tracing configuration to be used +// when calling into the client SendTraced* APIs. +type TraceConfig struct { + // Label places a a label in the SSNTP frame sent + // using this config. + Label []byte + + // Start is defined by the API caller to specify when + // operations related to that frames actually started. + // Together with SetEndStamp, this allows for an + // end-to-end timestamping. + Start time.Time + + // PathTrace turns frame timestamping on or off. + PathTrace bool +} + +// Node represent an SSNTP networking node. +type Node struct { + UUID []byte + Role uint32 + TxTimestamp time.Time + RxTimestamp time.Time +} + +// FrameTrace gathers all SSNTP frame tracing information, +// including frame labelling, per Node timestamping and both +// start and end timestamps as provided by the frame API callers. +type FrameTrace struct { + Label []byte + StartTimestamp time.Time + EndTimestamp time.Time + PathLength uint8 + Path []Node +} + +// Frame represents an SSNTP frame structure. +type Frame struct { + Major uint8 + Minor uint8 + Type Type + Operand uint8 + PayloadLength uint32 + Trace *FrameTrace + Payload []byte +} + +// ConnectFrame is the SSNPT connection frame structure. +type ConnectFrame struct { + Major uint8 + Minor uint8 + Type Type + Operand uint8 + Role uint32 + Source []byte + Destination []byte +} + +const majorMask = 0x7f +const pathTraceEnabled = 1 << 7 + +// PathTrace tells if an SSNTP frames contains tracing information or not. +func (f Frame) PathTrace() bool { + if f.Trace == nil { + return false + } + + return (f.Major & pathTraceEnabled) == pathTraceEnabled +} + +func (f *Frame) setTrace(trace *TraceConfig) { + if trace == nil || (len(trace.Label) == 0 && trace.PathTrace == false) { + f.Major = f.Major &^ pathTraceEnabled + return + } + + f.Trace = &FrameTrace{Label: trace.Label} + + if trace.PathTrace == true { + f.Major |= pathTraceEnabled + f.Trace.StartTimestamp = trace.Start + } +} + +func (f Frame) major() uint8 { + return f.Major & majorMask +} + +func (f Frame) String() string { + var node uuid.UUID + var op string + t := f.Type + + switch t { + case COMMAND: + op = (Command)(f.Operand).String() + case STATUS: + op = (Status)(f.Operand).String() + case EVENT: + op = (Event)(f.Operand).String() + case ERROR: + op = fmt.Sprintf("%d", f.Operand) + } + + if f.PathTrace() == true { + path := "" + for i, n := range f.Trace.Path { + ts := "" + copy(node[:], n.UUID[:16]) + + if n.RxTimestamp.IsZero() == false { + ts = ts + fmt.Sprintf("\t\tRx %q\n", n.RxTimestamp.Format(time.StampNano)) + } + + if n.TxTimestamp.IsZero() == false { + ts = ts + fmt.Sprintf("\t\tTx %q\n", n.TxTimestamp.Format(time.StampNano)) + } + + path = path + fmt.Sprintf("\n\t\tNode #%d\n\t\tUUID %s\n", i, node) + ts + } + + return fmt.Sprintf("\n\tMajor %d\n\tMinor %d\n\tType %s\n\tOp %s\n\tPayload len %d\n\tPath %s\n", + f.major(), f.Minor, t, op, f.PayloadLength, path) + } + + return fmt.Sprintf("\n\tMajor %d\n\tMinor %d\n\tType %s\n\tOp %s\n\tPayload len %d\n", + f.major(), f.Minor, t, op, f.PayloadLength) +} + +func (f ConnectFrame) String() string { + var dest, src uuid.UUID + var op string + t := f.Type + + switch t { + case COMMAND: + op = (Command)(f.Operand).String() + case STATUS: + op = (Status)(f.Operand).String() + case ERROR: + op = fmt.Sprintf("%d", f.Operand) + } + + copy(src[:], f.Source[:16]) + copy(dest[:], f.Destination[:16]) + + return fmt.Sprintf("\tMajor %d\n\tMinor %d\n\tType %s\n\tOp %s\n\tRole %s\n\tSource %s\n\tDestination %s\n", + f.Major, f.Minor, (Type)(f.Type), op, (*Role)(&f.Role), src, dest) +} + +func (f *Frame) addPathNode(session *session) { + if f.PathTrace() == false { + return + } + + node := Node{ + UUID: session.src[:], + Role: session.srcRole, + } + + f.Trace.Path = append(f.Trace.Path, node) + f.Trace.PathLength++ +} + +// Duration returns the time spent between the first frame transmission +// and its last reception. +func (f Frame) Duration() (time.Duration, error) { + if f.PathTrace() != true { + return 0, fmt.Errorf("Timestamps not available") + } + + return f.Trace.Path[f.Trace.PathLength-1].RxTimestamp.Sub(f.Trace.Path[0].TxTimestamp), nil +} + +// SetEndStamp adds the final timestamp to an SSNTP frame. +// This is called by the SSNTP node that believes it's the +// last frame receiver. It provides information to build the +// complete duration of the operation related to an SSNTP frame. +func (f *Frame) SetEndStamp() { + if f.PathTrace() != true { + return + } + + f.Trace.EndTimestamp = time.Now() +} + +// DumpTrace builds SSNTP frame tracing data into a FrameTrace +// payload. Callers typically marshall this structure into a +// TraceReport YAML payload. +func (f Frame) DumpTrace() (*payloads.FrameTrace, error) { + var s payloads.FrameTrace + var node uuid.UUID + + if f.PathTrace() != true { + return nil, fmt.Errorf("Traces not available") + } + + s.Label = string(f.Trace.Label) + s.StartTimestamp = f.Trace.StartTimestamp.Format(time.RFC3339Nano) + s.EndTimestamp = f.Trace.EndTimestamp.Format(time.RFC3339Nano) + s.Type = f.Type.String() + + switch f.Type { + case COMMAND: + s.Operand = (Command)(f.Operand).String() + case STATUS: + s.Operand = (Status)(f.Operand).String() + case EVENT: + s.Operand = (Event)(f.Operand).String() + case ERROR: + s.Operand = fmt.Sprintf("%d", f.Operand) + } + + for _, n := range f.Trace.Path { + copy(node[:], n.UUID[:16]) + sNode := payloads.SSNTPNode{ + SSNTPUUID: node.String(), + SSNTPRole: (*Role)(&n.Role).String(), + } + + if n.TxTimestamp.IsZero() == false { + sNode.TxTimestamp = n.TxTimestamp.Format(time.RFC3339Nano) + } + + if n.RxTimestamp.IsZero() == false { + sNode.RxTimestamp = n.RxTimestamp.Format(time.RFC3339Nano) + } + + s.Nodes = append(s.Nodes, sNode) + } + + return &s, nil +} diff --git a/ssntp/server.go b/ssntp/server.go new file mode 100644 index 000000000..e2a79fb28 --- /dev/null +++ b/ssntp/server.go @@ -0,0 +1,448 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ssntp + +import ( + "crypto/tls" + "encoding/gob" + "fmt" + "github.com/docker/distribution/uuid" + "net" + "sync" + "time" +) + +// ServerNotifier is the SSNTP server notification interface. +// Any SSNTP server must implement this interface. +type ServerNotifier interface { + // ConnectNotify notifies of a new SSNTP client connection. + ConnectNotify(uuid string, role uint32) + + // DisconnectNotify notifies of a SSNTP client having + // disconnected from us. + DisconnectNotify(uuid string) + + // StatusNotify notifies of a pending status frame. + // The frame comes from a SSNTP client identified by uuid. + StatusNotify(uuid string, status Status, frame *Frame) + + // CommandNotify notifies of a pending command frame. + // The frame comes from a SSNTP client identified by uuid. + CommandNotify(uuid string, command Command, frame *Frame) + + // EventNotify notifies of a pending event frame. + // The frame comes from a SSNTP client identified by uuid. + EventNotify(uuid string, event Event, frame *Frame) + + // ErrorNotify notifies of a pending error frame. + // The frame comes from a SSNTP client identified by uuid. + ErrorNotify(uuid string, error Error, frame *Frame) +} + +// Server is the SSNTP server structure. +// This is an SSNTP server handle to start listening and handling +// SSNTP client connections, and send SSNTP frames to them. +// It is an entirely opaque structure, only accessible through +// its public methods. +type Server struct { + uuid uuid.UUID + lUUID lockedUUID + tls *tls.Config + ntf ServerNotifier + sessionMutex sync.RWMutex + sessions map[string]*session + listener net.Listener + stopped boolFlag + stoppedChan chan struct{} + role uint32 + roleVerify bool + clientWg sync.WaitGroup + + forwardRules frameForward + + log Logger + + trace *TraceConfig +} + +func sendConnectionFailure(conn net.Conn) *session { + var session session + encoder := gob.NewEncoder(conn) + + frame := session.errorFrame(ConnectionFailure, nil, nil) + encoder.Encode(frame) + + return nil +} + +func sendConnectionAborted(conn net.Conn) *session { + var session session + encoder := gob.NewEncoder(conn) + + frame := session.errorFrame(ConnectionAborted, nil, nil) + encoder.Encode(frame) + + return nil +} + +func handleClientConnect(server *Server, conn net.Conn) *session { + var connect ConnectFrame + + decoder := gob.NewDecoder(conn) + + server.log.Infof("Waiting for CONNECT\n") + setReadTimeout(conn) + readErr := decoder.Decode(&connect) + clearReadTimeout(conn) + if readErr != nil { + server.log.Errorf("Connect error: %s\n", readErr) + return sendConnectionFailure(conn) + } + + server.log.Infof("Received CONNECT frame:\n%s\n", connect) + + if server.roleVerify == true { + tlscon, ok := conn.(*tls.Conn) + if ok { + oidFound, err := verifyRole(tlscon, connect.Role) + if oidFound == false { + server.log.Errorf("%s\n", err) + return sendConnectionAborted(conn) + } + } + } + + if connect.Type != COMMAND || connect.Operand != (uint8)(CONNECT) { + server.log.Errorf("Invalid Connect frame") + return sendConnectionFailure(conn) + } + + session := newSession(&server.uuid, server.role, connect.Role, conn) + session.setDest(connect.Source[:16]) + + connected := session.connectedFrame(server.role) + + server.log.Infof("Sending CONNECTED\n") + _, writeErr := session.Write(connected) + if writeErr != nil { + server.log.Errorf("Connected error: %s\n", writeErr) + return sendConnectionFailure(conn) + } + + return session +} + +func handleSSNTPClient(server *Server, conn net.Conn) { + defer server.clientWg.Done() + defer conn.Close() + + server.log.Infof("New client connection\n") + session := handleClientConnect(server, conn) + if session == nil { + return + } + + uuidString := session.dest.String() + server.addSession(session, uuidString) + server.forwardRules.addForwardDestination(session) + server.ntf.ConnectNotify(uuidString, session.destRole) + + for { + var frame Frame + err := session.Read(&frame) + if err != nil { + server.log.Infof("Client disconnection: %s %d\n", err) + server.ntf.DisconnectNotify(uuidString) + server.forwardRules.deleteForwardDestination(session) + server.removeSession(uuidString) + break + } + + switch frame.Type { + case COMMAND: + server.forwardRules.forwardFrame(server, session, (Command)(frame.Operand), &frame) + server.ntf.CommandNotify(uuidString, (Command)(frame.Operand), &frame) + case STATUS: + server.forwardRules.forwardFrame(server, session, (Status)(frame.Operand), &frame) + server.ntf.StatusNotify(uuidString, (Status)(frame.Operand), &frame) + case EVENT: + server.forwardRules.forwardFrame(server, session, (Event)(frame.Operand), &frame) + server.ntf.EventNotify(uuidString, (Event)(frame.Operand), &frame) + case ERROR: + server.forwardRules.forwardFrame(server, session, (Error)(frame.Operand), &frame) + server.ntf.ErrorNotify(uuidString, (Error)(frame.Operand), &frame) + default: + server.SendError(uuidString, InvalidFrameType, nil) + } + } +} + +/* + * SSNTP Server methods + */ +func (server *Server) addSession(session *session, uuid string) { + server.sessionMutex.Lock() + server.sessions[uuid] = session + server.sessionMutex.Unlock() +} + +func (server *Server) removeSession(uuid string) { + server.sessionMutex.Lock() + delete(server.sessions, uuid) + server.sessionMutex.Unlock() +} + +func (server *Server) getSession(uuid string) *session { + server.sessionMutex.RLock() + session := server.sessions[uuid] + server.sessionMutex.RUnlock() + + return session +} + +// Serve starts an SSNTP server that will listen and serve SSNTP client +// connections. Notifiers will be called when new clients connect and +// disconnect. And also when statuses, payloads and errors are received. +func (server *Server) Serve(config *Config, ntf ServerNotifier) error { + var uri string + var serverPort uint32 + + if config == nil { + return fmt.Errorf("SSNTP config missing") + } + + if len(config.UUID) == 0 { + var err error + server.lUUID, err = newUUID("server", config.Role) + if err != nil { + fmt.Printf("SSNTP ERROR: Server: Could not fetch a UUID, generating a random one (%s)\n", err) + server.uuid = uuid.Generate() + } else { + server.uuid = server.lUUID.uuid + } + } else { + uuid, _ := uuid.Parse(config.UUID) + server.uuid = uuid + } + + if len(config.CAcert) == 0 { + config.CAcert = defaultCA + } + + if len(config.Cert) == 0 { + config.Cert = defaultServerCert + } + + if config.Port != 0 { + serverPort = config.Port + } else { + serverPort = port + } + + if len(config.URI) == 0 { + uri = "" + } else { + uri = config.URI + } + + var transport string + + if len(config.Transport) == 0 { + transport = "tcp" + } else { + if config.Transport != "tcp" && config.Transport != "unix" { + transport = "tcp" + } else { + transport = config.Transport + } + } + + if config.Log == nil { + server.log = errLog + } else { + server.log = config.Log + } + + server.ntf = ntf + server.sessions = make(map[string]*session) + server.forwardRules.init(config.ForwardRules) + server.tls = prepareTLSConfig(config, true) + server.forwardRules.forwardRules = config.ForwardRules + server.role = config.Role + server.roleVerify = config.RoleVerification + server.trace = config.Trace + server.stoppedChan = make(chan struct{}) + + service := fmt.Sprintf("%s:%d", uri, serverPort) + listener, err := tls.Listen(transport, service, server.tls) + if err != nil { + server.log.Errorf("Failed to start listener (err=%s) on %s\n", err, service) + return err + } + server.log.Infof("Listening on %s\n", service) + + server.listener = listener + defer listener.Close() + + for { + conn, err := listener.Accept() + if err != nil { + server.stopped.Lock() + if server.stopped.flag == true { + server.stopped.Unlock() + break + } + server.stopped.Unlock() + continue + } + + server.clientWg.Add(1) + go handleSSNTPClient(server, conn) + } + + if server.stoppedChan != nil { + close(server.stoppedChan) + } + + return nil +} + +// Stop terminates the server listening operation +// and closes all client connections. +func (server *Server) Stop() { + server.stopped.Lock() + server.stopped.flag = true + server.stopped.Unlock() + if server.listener != nil { + server.listener.Close() + } + + server.sessionMutex.RLock() + for uuid, session := range server.sessions { + server.log.Infof("Closing connection for %s\n", uuid) + session.conn.Close() + } + server.sessionMutex.RUnlock() + + server.clientWg.Wait() + + select { + case <-server.stoppedChan: + break + case <-time.After(2 * time.Second): + server.log.Errorf("Timeout waiting for main server thread\n") + } + + freeUUID(server.lUUID) +} + +func (server *Server) sendCommand(uuid string, cmd Command, payload []byte, trace *TraceConfig) (int, error) { + session := server.getSession(uuid) + if session == nil { + return -1, fmt.Errorf("Unknown UUID %s", uuid) + } + + frame := session.commandFrame(cmd, payload, trace) + return session.Write(frame) +} + +func (server *Server) sendStatus(uuid string, status Status, payload []byte, trace *TraceConfig) (int, error) { + session := server.getSession(uuid) + if session == nil { + return -1, fmt.Errorf("Unknown UUID %s", uuid) + } + + frame := session.statusFrame(status, payload, trace) + return session.Write(frame) +} + +func (server *Server) sendEvent(uuid string, event Event, payload []byte, trace *TraceConfig) (int, error) { + session := server.getSession(uuid) + if session == nil { + return -1, fmt.Errorf("Unknown UUID %s", uuid) + } + + frame := session.eventFrame(event, payload, trace) + return session.Write(frame) +} + +func (server *Server) sendError(uuid string, error Error, payload []byte, trace *TraceConfig) (int, error) { + session := server.getSession(uuid) + if session == nil { + return -1, fmt.Errorf("Unknown UUID %s", uuid) + } + + frame := session.errorFrame(error, payload, trace) + return session.Write(frame) +} + +// SendCommand sends a specific command and its payload to a client. +// The client is specified by its uuid +func (server *Server) SendCommand(uuid string, cmd Command, payload []byte) (int, error) { + return server.sendCommand(uuid, cmd, payload, server.trace) +} + +// SendStatus sends a specific status and its payload to a client. +// The client is specified by its uuid +func (server *Server) SendStatus(uuid string, status Status, payload []byte) (int, error) { + return server.sendStatus(uuid, status, payload, server.trace) +} + +// SendEvent sends a specific status and its payload to a client. +// The client is specified by its uuid +func (server *Server) SendEvent(uuid string, event Event, payload []byte) (int, error) { + return server.sendEvent(uuid, event, payload, server.trace) +} + +// SendError sends an error back to a client. +// The client is specified by its uuid +func (server *Server) SendError(uuid string, error Error, payload []byte) (int, error) { + return server.sendError(uuid, error, payload, server.trace) +} + +// SendTracedCommand sends a specific command and its payload to a client. +// The SSNTP command frame will be traced according to the trace argument. +// The client is specified by its uuid +func (server *Server) SendTracedCommand(uuid string, cmd Command, payload []byte, trace *TraceConfig) (int, error) { + return server.sendCommand(uuid, cmd, payload, trace) +} + +// SendTracedStatus sends a specific status and its payload to a client. +// The SSNTP status frame will be traced according to the trace argument. +// The client is specified by its uuid +func (server *Server) SendTracedStatus(uuid string, status Status, payload []byte, trace *TraceConfig) (int, error) { + return server.sendStatus(uuid, status, payload, trace) +} + +// SendTracedEvent sends a specific event and its payload to a client. +// The SSNTP event frame will be traced according to the trace argument. +// The client is specified by its uuid +func (server *Server) SendTracedEvent(uuid string, event Event, payload []byte, trace *TraceConfig) (int, error) { + return server.sendEvent(uuid, event, payload, trace) +} + +// SendTracedError sends an error back to a client. +// The SSNTP error frame will be traced according to the trace argument. +// The client is specified by its uuid +func (server *Server) SendTracedError(uuid string, error Error, payload []byte, trace *TraceConfig) (int, error) { + return server.sendError(uuid, error, payload, trace) +} + +// UUID exports the SSNTP server Universally Unique ID. +func (server *Server) UUID() string { + return server.uuid.String() +} diff --git a/ssntp/session.go b/ssntp/session.go new file mode 100644 index 000000000..257e8a1b1 --- /dev/null +++ b/ssntp/session.go @@ -0,0 +1,207 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ssntp + +import ( + "encoding/gob" + "github.com/docker/distribution/uuid" + "net" + "time" +) + +func setReadTimeout(conn net.Conn) { + conn.SetReadDeadline(time.Now().Add(readTimeout * time.Second)) +} + +func clearReadTimeout(conn net.Conn) { + conn.SetReadDeadline(time.Time{}) +} + +func setWriteTimeout(conn net.Conn) { + conn.SetWriteDeadline(time.Now().Add(readTimeout * time.Second)) +} + +func clearWriteTimeout(conn net.Conn) { + conn.SetWriteDeadline(time.Time{}) +} + +type session struct { + src uuid.UUID + dest uuid.UUID + srcRole uint32 + destRole uint32 + conn net.Conn + + encoder *gob.Encoder + decoder *gob.Decoder +} + +/* + * session methods + */ +func newSession(src *uuid.UUID, srcRole uint32, destRole uint32, netConn net.Conn) *session { + var session session + + if src != nil { + copy(session.src[:], src[:16]) + } + + session.srcRole = srcRole + session.destRole = destRole + + session.conn = netConn + session.encoder = gob.NewEncoder(netConn) + session.decoder = gob.NewDecoder(netConn) + + return &session +} + +func (session *session) setDest(uuid []byte) { + copy(session.dest[:], uuid[:16]) +} + +func (session *session) connectedFrame(serverRole uint32) (f *ConnectFrame) { + f = &ConnectFrame{ + Major: major, + Minor: minor, + Type: STATUS, + Operand: byte(CONNECTED), + Role: serverRole, + Source: session.src[:], + Destination: session.dest[:], + } + + return f +} + +func (session *session) connectFrame() (f *ConnectFrame) { + f = &ConnectFrame{ + Major: major, + Minor: minor, + Type: COMMAND, + Operand: byte(CONNECT), + Role: session.srcRole, + Source: session.src[:], + Destination: session.dest[:], + } + + return +} + +func (session *session) commandFrame(cmd Command, payload []byte, trace *TraceConfig) (f *Frame) { + f = &Frame{ + Major: major, + Minor: minor, + Type: COMMAND, + Operand: byte(cmd), + PayloadLength: (uint32)(len(payload)), + Payload: payload, + } + + f.setTrace(trace) + f.addPathNode(session) + + return +} + +func (session *session) statusFrame(status Status, payload []byte, trace *TraceConfig) (f *Frame) { + f = &Frame{ + Major: major, + Minor: minor, + Type: STATUS, + Operand: byte(status), + PayloadLength: (uint32)(len(payload)), + Payload: payload, + } + + f.setTrace(trace) + f.addPathNode(session) + + return +} + +func (session *session) eventFrame(event Event, payload []byte, trace *TraceConfig) (f *Frame) { + f = &Frame{ + Major: major, + Minor: minor, + Type: EVENT, + Operand: byte(event), + PayloadLength: (uint32)(len(payload)), + Payload: payload, + } + + f.setTrace(trace) + f.addPathNode(session) + + return +} + +func (session *session) errorFrame(error Error, payload []byte, trace *TraceConfig) (f *Frame) { + f = &Frame{ + Major: major, + Minor: minor, + Type: ERROR, + Operand: byte(error), + PayloadLength: (uint32)(len(payload)), + Payload: payload, + } + + f.setTrace(trace) + f.addPathNode(session) + + return +} + +func (session *session) Write(frame interface{}) (int, error) { + switch f := frame.(type) { + case *Frame: + if f.PathTrace() == false { + break + } + + f.Trace.Path[f.Trace.PathLength-1].TxTimestamp = time.Now() + } + + setWriteTimeout(session.conn) + err := session.encoder.Encode(frame) + clearWriteTimeout(session.conn) + + return 0, err +} + +func (session *session) Read(frame interface{}) error { + err := session.decoder.Decode(frame) + + switch f := frame.(type) { + case *Frame: + if f.PathTrace() == false { + break + } + + node := Node{ + UUID: session.src[:], + Role: session.srcRole, + RxTimestamp: time.Now(), + } + + f.Trace.Path = append(f.Trace.Path, node) + f.Trace.PathLength++ + } + + return err + +} diff --git a/ssntp/ssntp.go b/ssntp/ssntp.go new file mode 100644 index 000000000..810c62c4d --- /dev/null +++ b/ssntp/ssntp.go @@ -0,0 +1,951 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ssntp + +import ( + "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "sync" + "syscall" + + "github.com/docker/distribution/uuid" + "github.com/golang/glog" +) + +// Type is the SSNTP frame type. +// It can be COMMAND, STATUS, ERROR or EVENT. +type Type uint8 + +// Command is the SSNTP Command operand. +// It can be CONNECT, START, STOP, STATS, EVACUATE, DELETE, RESTART, +// AssignPublicIP, ReleasePublicIP or CONFIGURE. +type Command uint8 + +// Status is the SSNTP Status operand. +// It can be CONNECTED, READY, FULL, OFFLINE or MAINTENANCE +type Status uint8 + +// Role describes the SSNTP role for the frame sender. +// It can be UNKNOWN, SERVER, Controller, AGENT, SCHEDULER, NETAGENT or CNCIAGENT. +type Role uint32 + +// Error is the SSNTP Error operand. +// It can be InvalidFrameType Error, StartFailure, +// StopFailure, ConnectionFailure, RestartFailure, +// DeleteFailure or ConnectionAborted +type Error uint8 + +// Event is the SSNTP Event operand. +// It can be TenantAdded, TenantRemoval, InstanceDeleted, +// ConcentratorInstanceAdded, PublicIPAssigned or TraceReport. +type Event uint8 + +const ( + // COMMAND frames are meant for SSNTP clients to send commands. + // For example the Controller sends START or STOP commands to launch and + // pause workloads. + // SSNTP being asynchronous SSNTP commands are not replied to. + COMMAND Type = iota + + // STATUS frames are mostly used by the launcher agent to report + // about the node status. It is used by the scheduler as an indication + // for its next scheduling decisions. Status frames can be seen as + // a way of building flow control between the scheduler and the launchers. + STATUS + + // ERROR frames contain error reports. Combining the error operand together + // with the Error frame YAML payload allows for building a complete error + // interpretation and description. + // ERROR frames are typically sent for command failures. + ERROR + + // EVENT frames carry asynchronous events that the receiver can decide to + // broadcast or not. + // EVENT frames describe a general, non erratic cluster event. + EVENT +) + +const ( + // CONNECT is the first frame sent by an SSNTP client to establish the SSNTP + // connection. A server will ignore any clients until it sends its first CONNECT + // frame: + // SSNTP CONNECT Command frame + // + // +-------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Role | + // | | | (0x0) | (0x0) | (bitmask of client roles) | + // +-------------------------------------------------------------+ + CONNECT Command = iota + + // START is a command that should reach CIAO agents for scheduling a new + // on the compute node (CN) they manage. It should typically come from the Controller + // entity directly or via the main server: + // SSNTP START Command frame + // + // +-----------------------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted workload description | + // | | | (0x0) | (0x1) | | | + // +-----------------------------------------------------------------------------------------+ + START + + // STOP is used to ask a CIAO agent to stop a running workload. The workload + // is identified by its UUID, as part of the YAML formatted payload: + // SSNTP STOP Command frame + // + // +----------------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted workload UUID | + // | | | (0x0) | (0x2) | | | + // +----------------------------------------------------------------------------------+ + STOP + + // STATS is a command sent by CIAO agents to update the SSNTP network + // about their compute node statistics. Agents can send that command to either + // the main server or to the Controllers directly. In the former case the server will + // be responsible for forwarding it to the known Controllers. + // The conpute node statistics form the YAML formatted payload for this command: + // SSNTP STATS Command frame + // + // +----------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted compute | + // | | | (0x0) | (0x3) | | node statistics | + // +----------------------------------------------------------------------------+ + STATS + + // EVACUATE is intented to ask a specific CIAO agent to evacuate its compute + // node, i.e. stop and migrate all of the current workloads he's monitoring on + // this node. The payload for this command is a YAML formatted description of the + // next state to reach after evacuation is done. It could be 'shutdown' for shutting + // the node down, 'update' for having it run a software update, 'reboot' for rebooting + // the node or 'maintenance' for putting the node in maintenance mode: + // +---------------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted compute | + // | | | (0x0) | (0x4) | | node next state description | + // +---------------------------------------------------------------------------------+ + EVACUATE + + // DELETE is a command sent to a CIAO CN Agent in order to completely delete a + // running instance. This is only relevant for persistent workloads after they were + // STOPPED. Non persistent workload get deleted when they are STOPPED. + // It is up to the CN Agent implementation to decide what exactly needs to be deleted + // on the CN but a deleted instance will no longer be able to boot. + // The DELETE command payload uses the same YAML schema as the STOP command one, i.e. + // an instance UUID and an agent UUID. + // SSNTP DELETE Command frame + // +------------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted payload | + // | | | (0x0) | (0x5) | | instance and agent UUIDs | + // +------------------------------------------------------------------------------+ + DELETE + + // RESTART is a command sent to CIAO CN Agents for restarting an instance that was + // previously STOPped. This command is only relevant for persistent workloads since + // non persistent ones are implicitely deleted when STOPped and thus can not be + // RESTARTed. + // The RESTART command payload uses the same YAML schema as the STOP command one, i.e. + // an instance UUID and an agent UUID. + // SSNTP DELETE Command frame + // +------------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted payload | + // | | | (0x0) | (0x6) | | instance and agent UUIDs | + // +------------------------------------------------------------------------------+ + RESTART + + // AssignPublicIP is a command sent by the Controller to assign + // a publically routable IP to a given instance. It is sent + // to the Scheduler and must be forwarded to the right CNCI. + // + // The public IP is fetched from a pre-allocated pool + // managed by the Controller. + // + // The AssignPublicIP YAML payload schema is made of the + // CNCI and a tenant UUIDs, the allocated public IP, the + // instance private IP and MAC. + // + // SSNTP AssignPublicIP Command frame + // +----------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted payload | + // | | | (0x0) | (0x7) | | | + // +----------------------------------------------------------------------------+ + AssignPublicIP + + // ReleasePublicIP is a command sent by the Controller to release + // a publically routable IP from a given instance. It is sent + // to the Scheduler and must be forwarded to the right CNCI. + // + // The released public IP is added back to the Controller managed + // IP pool. + // + // The ReleasePublicIP YAML payload schema is made of the + // CNCI and a tenant UUIDs, the released public IP, the + // instance private IP and MAC. + // + // SSNTP ReleasePublicIP Command frame + // +--------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted | + // | | | (0x0) | (0x8) | | payload | + // +--------------------------------------------------------------------+ + ReleasePublicIP + + // CONFIGURE commands are sent to request any SSNTP entity to + // configure itself according to the CONFIGURE command payload. + // Controller or any SSNTP client handling user interfaces defining any + // cloud setting (image service, networking configuration, identity + // management...) must send this command for any configuration + // change and for broadcasting the initial cloud configuration to + // all CN and NN agents. + // + // The CONFIGURE command payload always include the full cloud + // configuration and not only changes compared to the last CONFIGURE + // command sent. + // + // SSNTP CONFIGURE Command frame + // +-----------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted payload | + // | | | (0x0) | (0x9) | | | + // +-----------------------------------------------------------------------------+ + CONFIGURE +) + +const ( + // CONNECTED is the reply to a client CONNECT command and thus only SSNTP servers can + // send such frame. The CONNECTED status confirms the client that it's connected and + // that it should be prepared to process and send commands and statuses: + // SSNTP CONNECTED Status frame + // + // +-----------------------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Role | Server UUID | Client UUID | + // | | | (0x1) | (0x0) | (bitmask of server roles) | | | + // +-----------------------------------------------------------------------------------------+ + CONNECTED Status = iota + + // READY is a status command CIAO agents send to the scheduler to notify them about + // their readiness to launch some more work (Virtual machines, containers or bare metal + // ones). It is the only way for an agent to notify the CIAO scheduler about its + // compute node capacity change and thus its readiness to take some more work. The new + // CN capacity is described in this frame's payload: + // SSNTP READY Status frame + // + // +----------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted compute | + // | | | (0x1) | (0x1) | | node new capacity | + // +----------------------------------------------------------------------------+ + READY + + // FULL is a status command CIAO agents send to the scheduler to let it know that + // the compute node they control is now running at full capacity, i.e. it can temporarily + // not run any additional work. The scheduler should stop sending START commands to such + // agent until it receives a new READY status with some available capacity from it. + // SSNTP FULL Status frame + // + // +---------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | + // | | | (0x1) | (0x2) | (0x0) | + // +---------------------------------------------------+ + FULL + + // OFFLINE is used by agents to let everyone know that although they're still running + // and connected to the SSNTP network they are not ready to receive any kind of command, + // be it START, STOP or EVACUATE ones. + // + // SSNTP OFFLINE Status frame + // + // +---------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | + // | | | (0x1) | (0x3) | (0x0) | + // +---------------------------------------------------+ + OFFLINE + + // MAINTENANCE is used by agents to let the scheduler know that it entered maintenance + // mode. + // + // SSNTP MAINTENANCE Status frame + // + // +---------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | + // | | | (0x1) | (0x4) | (0x0) | + // +---------------------------------------------------+ + MAINTENANCE +) + +const ( + // TenantAdded is used by workload agents to notify networking agents that the first + // workload for a given tenant has just started. Networking agents need to know about that + // so that they can forward it to the right CNCI (Compute Node Concentrator Instance), i.e. + // the CNCI running the tenant workload. + // SSNTP TenantAdded Event frame + // + // +---------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted tenant | + // | | | (0x3) | (0x0) | | information | + // +---------------------------------------------------------------------------+ + TenantAdded Event = iota + + // TenantRemoved is used by workload agents to notify networking agents that the last + // workload for a given tenant has just terminated. Networking agents need to know about that + // so that they can forward it to the right CNCI (Compute Node Concentrator Instance), i.e. + // the CNCI running the tenant workload. + // SSNTP TenantRemoved Event frame + // + // +--------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted tenant | + // | | | (0x3) | (0x1) | | information | + // +---------------------------------------------------------------------------+ + TenantRemoved + + // InstanceDeleted is sent by workload agents to notify the scheduler and the Controller that a + // previously running instance has been deleted. While the scheduler and the Controller could infer + // that information from the next STATS command (The deleted instance would no longer be there) + // it is safer, simpler and less error prone to explicitely send this event. + // + // SSNTP InstanceDeleted Event frame + // + // +---------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted | + // | | | (0x3) | (0x2) | | instance information | + // +---------------------------------------------------------------------------+ + InstanceDeleted + + // ConcentratorInstanceAdded events are sent by networking nodes + // agents to the Scheduler in order to notify the SSNTP network + // that a networking concentrator instance (CNCI) is now running + // on this node. + // A CNCI handles the GRE tunnel concentrator for a given + // tenant. Each instance started by this tenant will have a + // GRE tunnel established between it and the CNCI allowing all + // instances for a given tenant to be on the same private + // network. + // + // The Scheduler must forward that event to all Controllers. The Controller + // needs to know about it as it will fetch the CNCI IP and the + // tenant UUID from this event's payload and pass that through + // the START payload when scheduling a new instance for this + // tenant. A tenant instances can not be scheduled until Controller gets + // a ConcentratorInstanceAdded event as instances will be + // isolated as long as the CNCI for this tenant is not running. + // + // SSNTP ConcentratorInstanceAdded Event frame + // + // +--------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted | + // | | | (0x3) | (0x3) | | CNCI information | + // +--------------------------------------------------------------------------+ + ConcentratorInstanceAdded + + // PublicIPAssigned events are sent by Networking concentrator + // instances (CNCI) to the Scheduler when they successfully + // assigned a public IP to a given instance. + // The public IP can either come from a Controller pre-allocated pool, + // or from a control network DHCP server. + // + // The Scheduler must forward those events to the Controller. + // + // The PublicIPAssigned event payload contains the newly assigned + // public IP, the instance private IP and the instance UUID. + // + // SSNTP PublicIPAssigned Event frame + // + // +----------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted payload | + // | | | (0x3) | (0x4) | | | + // +----------------------------------------------------------------------------+ + PublicIPAssigned + + // TraceReport events carry a tracing report payload from one + // of the SSNTP clients. + // + // SSNTP TraceReport Event frame + // + // +----------------------------------------------------------------------------+ + // | Major | Minor | Type | Operand | Payload Length | YAML formatted payload | + // | | | (0x3) | (0x5) | | | + // +----------------------------------------------------------------------------+ + TraceReport +) + +// SSNTP clients and servers can have one or several roles and are expected to declare their +// roles during the SSNTP connection procedure. +const ( + UNKNOWN Role = 0x0 + SERVER = 0x1 + + // The Command and Status Reporter. This is a client role. + Controller = 0x2 + + // The cloud compute node agent. This is a client role. + AGENT = 0x4 + + // The workload scheduler. This is a server role. + SCHEDULER = 0x8 + + // The networking compute node agent. This is a client role. + NETAGENT = 0x10 + + // The networking compute node concentrator instance (CNCI) agent. This is a client role. + CNCIAGENT = 0x20 +) + +// We use SSL extended key usage attributes for specifying and verifying SSNTP +// client and server claimed roles. +// For example if a client claims to be a Controller, then its client certificate +// extended key usage attribute should contain the right OID for that role. +var ( + // RoleAgentOID is the SSNTP Agent Role Object ID. + RoleAgentOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 343, 8, 1} + + // RoleSchedulerOID is the SSNTP Scheduler Role Object ID. + RoleSchedulerOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 343, 8, 2} + + // RoleControllerOID is the SSNTP Controller Role Object ID. + RoleControllerOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 343, 8, 3} + + // RoleNetAgentOID is the SSNTP Networking Agent Role Object ID. + RoleNetAgentOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 343, 8, 4} + + // RoleAgentOID is the SSNTP Server Role Object ID. + RoleServerOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 343, 8, 5} + + // RoleCNCIAgentOID is the SSNTP Compute Node Concentrator Instance Agent Role Object ID. + RoleCNCIAgentOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 343, 8, 6} +) + +const ( + // InvalidFrameType is sent when receiving an unsupported frame type. + InvalidFrameType Error = iota + + // StartFailure is sent by launcher agents to report a workload start failure. + StartFailure + + // StopFailure is sent by launcher agents to report a workload pause failure. + StopFailure + + // ConnectionFailure is sent to report an SSNTP connection failure. + // It can be sent by servers and clients. + ConnectionFailure + + // RestartFailure is sent by launcher agents to report a workload re-start failure. + RestartFailure + + // DeleteFailure is sent by launcher agents to report a workload deletion failure. + DeleteFailure + + // ConnectionAborted is sent to report an SSNTP connection abortion. + // This is used for example when receiving bad certificates. + ConnectionAborted +) + +const major = 0 +const minor = 1 +const defaultURL = "localhost" +const port = 8888 +const readTimeout = 30 +const writeTimeout = 30 + +const defaultCA = "/etc/pki/ciao/ca_cert.crt" +const defaultServerCert = "/etc/pki/ciao/server.pem" +const defaultClientCert = "/etc/pki/ciao/client.pem" +const uuidPrefix = "/var/lib/ciao/local/uuid-storage/role" +const uuidLockPrefix = "/tmp/lock/ciao" + +func (t Type) String() string { + switch t { + case COMMAND: + return "COMMAND" + case STATUS: + return "STATUS" + case EVENT: + return "EVENT" + case ERROR: + return "ERROR" + } + + return "" +} + +func (command Command) String() string { + switch command { + case CONNECT: + return "CONNECT" + case START: + return "START" + case STOP: + return "STOP" + case STATS: + return "STATISTICS" + case EVACUATE: + return "EVACUATE" + case DELETE: + return "DELETE" + case RESTART: + return "RESTART" + case AssignPublicIP: + return "Assign public IP" + case ReleasePublicIP: + return "Release public IP" + case CONFIGURE: + return "CONFIGURE" + } + + return "" +} + +func (status Status) String() string { + switch status { + case CONNECTED: + return "CONNECTED" + case READY: + return "READY" + case FULL: + return "FULL" + case OFFLINE: + return "OFFLINE" + case MAINTENANCE: + return "MAINTENANCE" + } + + return "" +} + +func (status Event) String() string { + switch status { + case TenantAdded: + return "Tenant Added" + case TenantRemoved: + return "Tenant Removed" + case InstanceDeleted: + return "Instance Deleted" + case ConcentratorInstanceAdded: + return "Network Concentrator Instance Added" + case PublicIPAssigned: + return "Public IP Assigned" + case TraceReport: + return "Trace Report" + } + + return "" +} + +func (error Error) String() string { + switch error { + case InvalidFrameType: + return "Invalid SSNTP frame type" + case StartFailure: + return "Could not start instance" + case StopFailure: + return "Could not stop instance" + case ConnectionFailure: + return "SSNTP Connection failed" + case RestartFailure: + return "Could not restart instance" + case DeleteFailure: + return "Could not delete instance" + case ConnectionAborted: + return "SSNTP Connection aborted" + } + + return "" +} + +func (role *Role) String() string { + switch *role { + case UNKNOWN: + return "Unknown" + case SERVER: + return "Server" + case Controller: + return "Controller" + case AGENT: + return "CN Agent" + case SCHEDULER: + return "Scheduler" + case NETAGENT: + return "Networking Agent" + case CNCIAGENT: + return "Concentrator Instance Agent" + } + + return "" +} + +// Set sets an SSNTP role based on the input string. +func (role *Role) Set(value string) error { + for _, r := range strings.Split(value, ",") { + if r == "unknown" { + *role = UNKNOWN + return nil + } else if r == "server" { + *role = SERVER + return nil + } else if r == "Controller" { + *role = Controller + return nil + } else if r == "agent" { + *role = AGENT + return nil + } else if r == "netagent" { + *role = NETAGENT + return nil + } else if r == "scheduler" { + *role = SCHEDULER + return nil + } else if r == "cnciagent" { + *role = CNCIAGENT + return nil + } + + return errors.New("Unknown role") + } + + return nil +} + +// A Config structure is used to configure a SSNTP client or server. +// It is mandatory to provide an SSNTP configuration when starting +// an SSNTP server or when connecting to one as a client. +type Config struct { + // UUID is the client or server UUID string. If set to "", + // the SSNTP package will generate a random one. + UUID string + + // URI semantic differs between servers and clients. + // For clients it represents the the SSNTP server URI + // they want to connect to. + // For servers it represents the URI they will be + // listening on. + // When set to "" SSNTP servers will listen on all interfaces + // and IPs on the running host. + URI string + + // Role is a bitmask of SSNTP roles the client or server intends + // to run. + Role uint32 + + // CACert is the Certification Authority certificate path + // to use when verifiying the peer identity. + // If set to "", /etc/pki/ciao/ciao_ca_cert.crt will be used. + CAcert string + + // Cert is the client or server x509 signed certificate path. + // If set to "", /etc/pki/ciao/client.pem and /etc/pki/ciao/ciao.pem + // will be used for SSNTP clients and server, respectively. + Cert string + + // Transport is the underlying transport protocol. Only "tcp" and "unix" + // transports are supported. The default is "tcp". + Transport string + + // ForwardRules is optional and contains a list of frame forwarding rules. + ForwardRules []FrameForwardRule + + // Log is the SSNTP logging interface. + // If not set, only error messages will be logged. + // The SSNTP Log implementation provides a default logger. + Log Logger + + // When RoleVerification is true the peer declared role will be + // verified by checking that the received certificate extended + // key usage attributes contains the right OID. + RoleVerification bool + + // TCP port to connect (Client) or to listen to (Server). + // This is optional, the default SSNTP port is 8888. + Port uint32 + + // Trace configures the desired level of SSNTP frame tracing. + Trace *TraceConfig +} + +// Logger is an interface for SSNTP users to define their own +// SSNTP tracing routines. +// By default we use errLog and we also provide Log, a glog based +// SSNTPLogger implementation. +type Logger interface { + Errorf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Infof(format string, args ...interface{}) +} + +type errorLog struct{} + +func (l errorLog) Errorf(format string, args ...interface{}) { + log.Printf("SSNTP Error: "+format, args...) +} + +func (l errorLog) Warningf(format string, args ...interface{}) { +} + +func (l errorLog) Infof(format string, args ...interface{}) { +} + +var errLog errorLog + +type glogLog struct{} + +func (l glogLog) Infof(format string, args ...interface{}) { + if glog.V(2) { + glog.Infof("SSNTP Info: "+format, args...) + } +} + +func (l glogLog) Errorf(format string, args ...interface{}) { + glog.Errorf("SSNTP Error: "+format, args...) +} + +func (l glogLog) Warningf(format string, args ...interface{}) { + if glog.V(1) { + glog.Warningf("SSNTP Warning: "+format, args...) + } +} + +// Log is a glog based SSNTP Logger implementation. +// Error message will be logged unconditionally. +// Warnings are logged if glog's V >= 1. +// Info messages are logged if glog's V >= 2. +var Log glogLog + +type boolFlag struct { + sync.Mutex + flag bool +} + +type ssntpStatus uint32 + +const ( + ssntpIdle ssntpStatus = iota + ssntpConnecting + ssntpConnected + ssntpClosed +) + +type connectionStatus struct { + sync.Mutex + status ssntpStatus +} + +func prepareTLSConfig(config *Config, server bool) *tls.Config { + caPEM, err := ioutil.ReadFile(config.CAcert) + if err != nil { + log.Fatalf("SSNTP: Load CA certificate: %s", err) + } + + certPEM, err := ioutil.ReadFile(config.Cert) + if err != nil { + log.Fatalf("SSNTP: Load Certificate: %s", err) + } + + return prepareTLS(caPEM, certPEM, server) +} + +func prepareTLS(caPEM, certPEM []byte, server bool) *tls.Config { + cert, err := tls.X509KeyPair(certPEM, certPEM) + if err != nil { + log.Printf("SSNTP: Load Key: %s", err) + return nil + } + + certPool := x509.NewCertPool() + if certPool.AppendCertsFromPEM(caPEM) != true { + log.Print("SSNTP: Could not append CA") + return nil + } + + if server == true { + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: certPool, + ClientCAs: certPool, + Rand: rand.Reader, + ClientAuth: tls.RequireAndVerifyClientCert, + InsecureSkipVerify: true, + } + } + + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: certPool, + InsecureSkipVerify: true, + } +} + +func getRoleOID(role uint32) (*asn1.ObjectIdentifier, error) { + switch role { + case AGENT: + return &RoleAgentOID, nil + case SCHEDULER: + return &RoleSchedulerOID, nil + case Controller: + return &RoleControllerOID, nil + case NETAGENT: + return &RoleNetAgentOID, nil + case SERVER: + return &RoleServerOID, nil + case CNCIAGENT: + return &RoleCNCIAgentOID, nil + default: + return nil, fmt.Errorf("Unknown role 0x%x", role) + } + +} + +func verifyRole(conn interface{}, role uint32) (bool, error) { + var oidError = fmt.Errorf("**** TEMPORARY WARNING ****\n*** Wrong certificate or missing/mismatched role OID ***\nIn order to fix this, use the -role option when generating your certificates with the ciao-cert tool.\n") + switch tlsConn := conn.(type) { + case *tls.Conn: + state := tlsConn.ConnectionState() + roleOID, err := getRoleOID(role) + if err != nil { + return false, oidError + } + + var oidFound = false + for _, oid := range state.PeerCertificates[0].UnknownExtKeyUsage { + if oid.Equal(*roleOID) { + oidFound = true + break + } + } + + if oidFound == false { + return false, oidError + } + + return true, nil + } + + return false, oidError +} + +const nullUUID = "00000000-0000-0000-0000-000000000000" + +type lockedUUID struct { + lockFd int + uuid uuid.UUID +} + +func newUUID(prefix string, role uint32) (lockedUUID, error) { + uuidFile := fmt.Sprintf("%s/%s/0x%x", uuidPrefix, prefix, role) + uuidLockFile := fmt.Sprintf("%s/%s-role-0x%x", uuidLockPrefix, prefix, role) + _nUUID, _ := uuid.Parse(nullUUID) + nUUID := lockedUUID{ + uuid: _nUUID, + lockFd: -1, + } + + randomUUID := lockedUUID{ + uuid: uuid.Generate(), + lockFd: -1, + } + + /* Create UUID directory if necessary */ + err := os.MkdirAll(uuidPrefix+"/"+prefix, 0755) + if err != nil { + fmt.Printf("Unable to create %s %v\n", uuidPrefix, err) + } + + /* Create CIAO lock directory if necessary */ + err = os.MkdirAll(uuidLockPrefix, 0777) + if err != nil { + fmt.Printf("Unable to create %s %v\n", uuidLockPrefix, err) + return nUUID, err + } + + fd, err := syscall.Open(uuidFile, syscall.O_CREAT|syscall.O_RDWR, syscall.S_IWUSR|syscall.S_IRUSR) + if err != nil { + fmt.Printf("Unable to open UUID file %s %v\n", uuidFile, err) + return nUUID, err + } + + defer func() { _ = syscall.Close(fd) }() + + lockFd, err := syscall.Open(uuidLockFile, syscall.O_CREAT, syscall.S_IWUSR|syscall.S_IRUSR) + if err != nil { + fmt.Printf("Unable to open UUID lock file %s %v\n", uuidLockFile, err) + return nUUID, err + } + + if syscall.Flock(lockFd, syscall.LOCK_EX|syscall.LOCK_NB) != nil { + /* File is already locked, we need to generate a random UUID */ + syscall.Close(lockFd) + return randomUUID, nil + } + + uuidArray := make([]byte, 36) + n, err := syscall.Read(fd, uuidArray) + if err != nil { + fmt.Printf("Could not read %s\n", uuidFile) + syscall.Close(lockFd) + return nUUID, err + } + + if n == 0 || n != 36 { + /* 2 cases: */ + /* 1) File was just created or is empty: Write a new UUID */ + /* Or */ + /* 2) File contains garbage - Overwrite with a new UUID */ + newUUID := uuid.Generate() + _, err := syscall.Write(fd, []byte(newUUID.String())) + if err != nil { + fmt.Printf("Could not write %s on %s (%s)\n", newUUID.String(), uuidFile, err) + syscall.Close(lockFd) + return nUUID, err + } + + newLockedUUID := lockedUUID{ + uuid: newUUID, + lockFd: lockFd, + } + + return newLockedUUID, nil + } else if n == 36 { + newUUID, err := uuid.Parse(string(uuidArray[:36])) + if err != nil { + fmt.Printf("Could not parse UUID\n") + syscall.Close(lockFd) + return nUUID, err + } + + newLockedUUID := lockedUUID{ + uuid: newUUID, + lockFd: lockFd, + } + + return newLockedUUID, nil + } + + return nUUID, err +} + +func freeUUID(uuid lockedUUID) error { + if uuid.lockFd == -1 { + return nil + } + + err := syscall.Flock(uuid.lockFd, syscall.LOCK_UN) + if err != nil { + fmt.Printf("Unable to unlock UUID %v\n", err) + return err + } + + return nil +} diff --git a/ssntp/ssntp_test.go b/ssntp/ssntp_test.go new file mode 100644 index 000000000..fb0a3dc5c --- /dev/null +++ b/ssntp/ssntp_test.go @@ -0,0 +1,1996 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ssntp + +import ( + "bytes" + "encoding/asn1" + "flag" + "fmt" + "io/ioutil" + "os" + "path" + "sync" + "testing" + "time" +) + +type ssntpEchoServer struct { + ssntp Server + t *testing.T + + roleChannel chan string + majorChannel chan struct{} +} + +func (server *ssntpEchoServer) ConnectNotify(uuid string, role uint32) { + if server.roleChannel != nil { + sRole := (Role)(role) + server.roleChannel <- sRole.String() + } +} + +func (server *ssntpEchoServer) DisconnectNotify(uuid string) { +} + +func (server *ssntpEchoServer) StatusNotify(uuid string, status Status, frame *Frame) { + server.ssntp.SendStatus(uuid, status, frame.Payload) +} + +func (server *ssntpEchoServer) CommandNotify(uuid string, command Command, frame *Frame) { + if server.majorChannel != nil { + if frame.major() == major { + close(server.majorChannel) + } + } + + server.ssntp.SendCommand(uuid, command, frame.Payload) +} + +func (server *ssntpEchoServer) EventNotify(uuid string, event Event, frame *Frame) { + server.ssntp.SendEvent(uuid, event, frame.Payload) +} + +func (server *ssntpEchoServer) ErrorNotify(uuid string, error Error, frame *Frame) { + server.ssntp.SendError(uuid, error, frame.Payload) +} + +type ssntpEchoFwderServer struct { + ssntp Server + t *testing.T +} + +func (server *ssntpEchoFwderServer) ConnectNotify(uuid string, role uint32) { +} + +func (server *ssntpEchoFwderServer) DisconnectNotify(uuid string) { +} + +func (server *ssntpEchoFwderServer) StatusNotify(uuid string, status Status, frame *Frame) { +} + +func (server *ssntpEchoFwderServer) CommandNotify(uuid string, command Command, frame *Frame) { +} + +func (server *ssntpEchoFwderServer) EventNotify(uuid string, event Event, frame *Frame) { +} + +func (server *ssntpEchoFwderServer) ErrorNotify(uuid string, error Error, frame *Frame) { +} + +func (server *ssntpEchoFwderServer) CommandForward(uuid string, command Command, frame *Frame) (dest ForwardDestination) { + dest.AddRecipient(uuid) + + return +} + +func (server *ssntpEchoFwderServer) EventForward(uuid string, event Event, frame *Frame) (dest ForwardDestination) { + dest.AddRecipient(uuid) + + return +} + +func (server *ssntpEchoFwderServer) StatusForward(uuid string, status Status, frame *Frame) (dest ForwardDestination) { + dest.AddRecipient(uuid) + + return +} + +func (server *ssntpEchoFwderServer) ErrorForward(uuid string, error Error, frame *Frame) (dest ForwardDestination) { + dest.AddRecipient(uuid) + + return +} + +type ssntpServer struct { + ssntp Server + t *testing.T +} + +func (server *ssntpServer) ConnectNotify(uuid string, role uint32) { +} + +func (server *ssntpServer) DisconnectNotify(uuid string) { +} + +func (server *ssntpServer) StatusNotify(uuid string, status Status, frame *Frame) { +} + +func (server *ssntpServer) CommandNotify(uuid string, command Command, frame *Frame) { +} + +func (server *ssntpServer) EventNotify(uuid string, event Event, frame *Frame) { +} + +func (server *ssntpServer) ErrorNotify(uuid string, error Error, frame *Frame) { +} + +func (server *ssntpServer) CommandForward(uuid string, command Command, frame *Frame) (dest ForwardDestination) { + dest.AddRecipient(controllerUUID) + + return +} + +func (server *ssntpServer) EventForward(uuid string, event Event, frame *Frame) (dest ForwardDestination) { + dest.AddRecipient(controllerUUID) + + return +} + +func (server *ssntpServer) StatusForward(uuid string, status Status, frame *Frame) (dest ForwardDestination) { + dest.AddRecipient(controllerUUID) + + return +} + +func (server *ssntpServer) ErrorForward(uuid string, error Error, frame *Frame) (dest ForwardDestination) { + dest.AddRecipient(controllerUUID) + + return +} + +type ssntpClient struct { + ssntp Client + t *testing.T + payload []byte + disconnected chan struct{} + connected chan struct{} + typeChannel chan string + cmdChannel chan string + staChannel chan string + evtChannel chan string + errChannel chan string + + cmdTracedChannel chan string + cmdDurationChannel chan time.Duration + cmdDumpChannel chan struct{} + staTracedChannel chan string + evtTracedChannel chan string + errTracedChannel chan string +} + +func (client *ssntpClient) ConnectNotify() { + if client.connected != nil { + close(client.connected) + } +} + +func (client *ssntpClient) DisconnectNotify() { + if client.disconnected != nil { + close(client.disconnected) + } +} + +func (client *ssntpClient) StatusNotify(status Status, frame *Frame) { + if client.typeChannel != nil { + client.typeChannel <- STATUS.String() + } + + if client.staChannel != nil && bytes.Equal(frame.Payload, client.payload) == true { + client.staChannel <- status.String() + } +} + +func (client *ssntpClient) CommandNotify(command Command, frame *Frame) { + if client.typeChannel != nil { + client.typeChannel <- COMMAND.String() + } + + if client.cmdChannel != nil && bytes.Equal(frame.Payload, client.payload) == true { + client.cmdChannel <- command.String() + } + + if client.cmdDumpChannel != nil { + trace, err := frame.DumpTrace() + if err == nil && trace.Type == COMMAND.String() { + close(client.cmdDumpChannel) + } + } + + if client.cmdTracedChannel != nil { + if frame.Trace.Label != nil { + client.cmdTracedChannel <- string(frame.Trace.Label) + } else if frame.PathTrace() == true { + client.cmdTracedChannel <- string(frame.Trace.PathLength) + duration, _ := frame.Duration() + client.cmdDurationChannel <- duration + } else { + close(client.cmdTracedChannel) + } + } else { + if client.cmdDurationChannel != nil { + _, err := frame.Duration() + if err != nil { + client.cmdDurationChannel <- 0 + } + } + } +} + +func (client *ssntpClient) EventNotify(event Event, frame *Frame) { + if client.typeChannel != nil { + client.typeChannel <- EVENT.String() + } + + if client.evtChannel != nil && bytes.Equal(frame.Payload, client.payload) == true { + client.evtChannel <- event.String() + } +} + +func (client *ssntpClient) ErrorNotify(error Error, frame *Frame) { + if client.typeChannel != nil { + client.typeChannel <- ERROR.String() + } + + if client.errChannel != nil && bytes.Equal(frame.Payload, client.payload) == true { + client.errChannel <- error.String() + } +} + +// Test client UUID generation code +// +// Test that two consecutive SSNTP clients get the same UUID. +// This test verifies that the client UUID permanent storage +// code path works fine. +// +// Test is expected to pass. +func TestUUID(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client1, client2 ssntpClient + + server.t = t + client1.t = t + client2.t = t + serverConfig.Transport = *transport + serverConfig.Role = SERVER + clientConfig.Transport = *transport + clientConfig.Role = AGENT + + go server.ssntp.Serve(&serverConfig, &server) + + time.Sleep(500 * time.Millisecond) + client1.ssntp.Dial(&clientConfig, &client1) + client1.ssntp.Close() + + err := client2.ssntp.Dial(&clientConfig, &client2) + if err != nil { + t.Fatalf("Failed to connect %s", err) + } + + if client1.ssntp.UUID() != client2.ssntp.UUID() { + client2.ssntp.Close() + t.Fatalf("Wrong client UUID %s vs %s", client1.ssntp.UUID(), client2.ssntp.UUID()) + } + + client2.ssntp.Close() + server.ssntp.Stop() +} + +// Test SSNTP OID matches +// +// Test that each SSNTP defined role matches the righ OID. +// +// Test is expected to pass. +func TestGetRoleOID(t *testing.T) { + roleOID := []struct { + role uint32 + oid asn1.ObjectIdentifier + }{ + { + role: AGENT, + oid: RoleAgentOID, + }, + { + role: SCHEDULER, + oid: RoleSchedulerOID, + }, + { + role: Controller, + oid: RoleControllerOID, + }, + { + role: NETAGENT, + oid: RoleNetAgentOID, + }, + { + role: SERVER, + oid: RoleServerOID, + }, + { + role: CNCIAGENT, + oid: RoleCNCIAgentOID, + }, + } + + for _, r := range roleOID { + oid, err := getRoleOID(r.role) + if err != nil { + t.Fatalf("Error getting OID for %d\n", r.role) + } + + if !r.oid.Equal(*oid) { + t.Fatalf("OID mismatch %v vs %v\n", r.role, *oid) + } + } + + _, err := getRoleOID(0xffff) + if err == nil { + t.Fatalf("Got OID for an invalid role\n") + } +} + +// Test SSNTP client connection +// +// Test that an SSNTP client can connect to an SSNTP server. +// +// Test is expected to pass. +func TestConnect(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + client.t = t + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + + client.ssntp.Close() + server.ssntp.Stop() + + if err != nil { + t.Fatalf("Failed to connect") + } +} + +func testConnectRole(t *testing.T, role Role) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + server.roleChannel = make(chan string) + client.t = t + serverConfig.Transport = *transport + clientConfig.Transport = *transport + clientConfig.Role = (uint32)(role) + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + clientRole := <-server.roleChannel + if clientRole != role.String() { + t.Fatalf("Wrong role") + } + + client.ssntp.Close() + server.ssntp.Stop() +} + +// Test the SSNTP client role from the server connection. +// +// Test that an SSNTP client acting as a SERVER can +// connect to an SSNTP server, and that the server sees +// the right role. +// +// Test is expected to pass. +func TestConnectRoleServer(t *testing.T) { + testConnectRole(t, SERVER) +} + +// Test the SSNTP client role from the server connection. +// +// Test that an SSNTP client acting as a Controller can +// connect to an SSNTP server, and that the server sees +// the right role. +// +// Test is expected to pass. +func TestConnectRoleController(t *testing.T) { + testConnectRole(t, Controller) +} + +// Test the SSNTP client role from the server connection. +// +// Test that an SSNTP client acting as an AGENT can +// connect to an SSNTP server, and that the server sees +// the right role. +// +// Test is expected to pass. +func TestConnectRoleAgent(t *testing.T) { + testConnectRole(t, AGENT) +} + +// Test the SSNTP client role from the server connection. +// +// Test that an SSNTP client acting as a SCHEDULER can +// connect to an SSNTP server, and that the server sees +// the right role. +// +// Test is expected to pass. +func TestConnectRoleScheduler(t *testing.T) { + testConnectRole(t, SCHEDULER) +} + +// Test the SSNTP client role from the server connection. +// +// Test that an SSNTP client acting as a NETAGENT can +// connect to an SSNTP server, and that the server sees +// the right role. +// +// Test is expected to pass. +func TestConnectRoleNetAgent(t *testing.T) { + testConnectRole(t, NETAGENT) +} + +// Test the SSNTP client role from the server connection. +// +// Test that an SSNTP client acting as a CNCIAGENT can +// connect to an SSNTP server, and that the server sees +// the right role. +// +// Test is expected to pass. +func TestConnectRoleCNCIAgent(t *testing.T) { + testConnectRole(t, CNCIAGENT) +} + +func TestMajor(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + server.majorChannel = make(chan struct{}) + client.t = t + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + client.payload = []byte{'Y', 'A', 'M', 'L'} + client.ssntp.SendCommand(START, client.payload) + + select { + case <-server.majorChannel: + break + case <-time.After(time.Second): + t.Fatalf("Did not receive the major frame") + } + + client.ssntp.Close() + server.ssntp.Stop() +} + +/* Mark D. Ryan FTW ! */ +func getCertPaths(tmpDir, caCert, serverCert, clientCert string) (string, string, string) { + var caPath, serverPath, clientPath string + + caPath = path.Join(tmpDir, "CACert") + serverPath = path.Join(tmpDir, "ServerCert") + clientPath = path.Join(tmpDir, "ClientCert") + + for _, s := range []struct{ path, data string }{{caPath, caCert}, {serverPath, serverCert}, {clientPath, clientCert}} { + err := ioutil.WriteFile(s.path, []byte(s.data), 0755) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to create certfile %s %v\n", s.path, err) + os.Exit(1) + } + } + + return caPath, serverPath, clientPath +} + +func validRoles(serverRole, clientRole Role) bool { + if serverRole == SCHEDULER && clientRole == AGENT { + return true + } + + return false +} + +func testConnectVerifyCertificate(t *testing.T, serverRole, clientRole Role) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + tmpDir, err := ioutil.TempDir("", "ssntp-test-certs") + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to create temporary Dir %v\n", err) + os.Exit(1) + } + defer func() { + _ = os.RemoveAll(tmpDir) + }() + + CACert, serverCert, clientCert := getCertPaths(tmpDir, testCACertScheduler, testCertScheduler, testCertAgent) + + server.t = t + serverConfig.Transport = *transport + serverConfig.RoleVerification = true + serverConfig.CAcert = CACert + serverConfig.Cert = serverCert + serverConfig.Role = (uint32)(serverRole) + + client.t = t + clientConfig.Transport = *transport + clientConfig.RoleVerification = true + clientConfig.CAcert = CACert + clientConfig.Cert = clientCert + clientConfig.Role = (uint32)(clientRole) + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err = client.ssntp.Dial(&clientConfig, &client) + + client.ssntp.Close() + server.ssntp.Stop() + + if validRoles(serverRole, clientRole) && err != nil { + t.Fatalf("Failed to connect") + } + + if !validRoles(serverRole, clientRole) && err == nil { + t.Fatalf("Wrong certificate, connection should not be allowed") + } +} + +// Test that an SSNTP verified link can be established. +// +// Test that an SSNTP client can connect to an SSNTP server +// when both are using SSNTP specific certificates. +// +// Test is expected to pass. +func TestConnectVerifyCertificatePositive(t *testing.T) { + testConnectVerifyCertificate(t, SCHEDULER, AGENT) +} + +// Test that an SSNTP verified link with the wrong client +// certificate should not be established. +// +// Test that an SSNTP client can not connect to an SSNTP server +// when both are using SSNTP specific certificates and the client +// has not defined the right role. +// +// Test is expected to pass. +func TestConnectVerifyClientCertificateNegative(t *testing.T) { + testConnectVerifyCertificate(t, SCHEDULER, Controller) +} + +// Test that an SSNTP verified link with the wrong server +// certificate should not be established. +// +// Test that an SSNTP client can not connect to an SSNTP server +// when both are using SSNTP specific certificates and the client +// has not defined the right role. +// +// Test is expected to pass. +func TestConnectVerifyServerCertificateNegative(t *testing.T) { + testConnectVerifyCertificate(t, SERVER, AGENT) +} + +// Test SSNTP client connection to an alternative port +// +// Test that an SSNTP client can connect to an SSNTP server +// listening to a non standard SSNTP port (i.e. different than +// 8888). +// +// Test is expected to pass. +func TestConnectPort(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + client.t = t + serverConfig.Transport = *transport + serverConfig.Port = 9999 + clientConfig.Transport = *transport + clientConfig.Port = 9999 + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + client.ssntp.Close() + server.ssntp.Stop() +} + +// Test SSNTP client connection closure before Dial. +// +// Test that an SSNTP client can close itself before Dialing +// into the server. We verifiy that the subsequent Dial() call +// should fail. +// +// Test is expected to pass. +func TestClientCloseBeforeDial(t *testing.T) { + var clientConfig Config + var client ssntpClient + + client.t = t + clientConfig.Transport = *transport + + client.ssntp.Close() + err := client.ssntp.Dial(&clientConfig, &client) + if err == nil { + t.Fatalf("Initiated connection while closed") + } +} + +// Test SSNTP client connection closure after Dial. +// +// Test that an SSNTP client can close itself after Dialing +// into the server. +// +// Test is expected to pass. +func TestClientCloseAfterDial(t *testing.T) { + var clientConfig Config + var client ssntpClient + + client.t = t + clientConfig.Transport = *transport + + go client.ssntp.Dial(&clientConfig, &client) + time.Sleep(1000 * time.Millisecond) + client.ssntp.Close() + time.Sleep(5000 * time.Millisecond) +} + +// Test SSNTP client reconnection to a server. +// +// Test that an SSNTP client eventually reconnects to +// a SSNTP server that restarts. +// +// Test is expected to pass. +func TestClientReconnect(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + client.t = t + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + client.connected = make(chan struct{}) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + select { + case <-client.connected: + break + case <-time.After(time.Second): + t.Fatalf("Did not receive the connection notification") + } + + client.disconnected = make(chan struct{}) + + server.ssntp.Stop() + + select { + case <-client.disconnected: + break + case <-time.After(3 * time.Second): + t.Fatalf("Did not receive the disconnection notification") + } + + client.connected = make(chan struct{}) + go server.ssntp.Serve(&serverConfig, &server) + + select { + case <-client.connected: + break + case <-time.After(10 * time.Second): + t.Fatalf("Did not receive the disconnection notification") + } + + client.ssntp.Close() + server.ssntp.Stop() +} + +// Test SSNTP server Stop() +// +// Test that an SSNTP client properly receives its disconnection +// notification when its server stops. +// +// Test is expected to pass. +func TestServerStop(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + client.t = t + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + client.connected = make(chan struct{}) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + select { + case <-client.connected: + break + case <-time.After(time.Second): + t.Fatalf("Did not receive the connection notification") + } + + client.disconnected = make(chan struct{}) + + server.ssntp.Stop() + + select { + case <-client.disconnected: + break + case <-time.After(3 * time.Second): + t.Fatalf("Did not receive the disconnection notification") + } + + client.ssntp.Close() + time.Sleep(500 * time.Millisecond) +} + +// Test SSNTP Command frame +// +// Test that an SSNTP client can send a Command frame to an echo +// server and then receives it back consistently. +// +// Test is expected to pass. +func TestCommand(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + client.t = t + client.cmdChannel = make(chan string) + client.typeChannel = make(chan string) + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + client.payload = []byte{'Y', 'A', 'M', 'L'} + client.ssntp.SendCommand(START, client.payload) + + defer func() { + client.ssntp.Close() + server.ssntp.Stop() + }() + + select { + case frameType := <-client.typeChannel: + if frameType != COMMAND.String() { + t.Fatalf("Did not receive the right frame type") + } + case <-time.After(time.Second): + t.Fatalf("Did not receive the command notification") + } + + select { + case check := <-client.cmdChannel: + if check != START.String() { + t.Fatalf("Did not receive the right payload") + } + case <-time.After(time.Second): + t.Fatalf("Did not receive the command notification") + } +} + +// Test SSNTP Command traced frame label +// +// Test that an SSNTP client can send a traced Command frame to an echo +// server and then receives it back consistently. +// We test that the label is received back as expected. +// +// Test is expected to pass. +func TestTracedLabelCommand(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoFwderServer + var client ssntpClient + + server.t = t + client.t = t + client.cmdTracedChannel = make(chan string) + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: START, + CommandForward: &server, + }, + } + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + clientLabel := "LabelClient" + client.payload = []byte{'Y', 'A', 'M', 'L'} + client.ssntp.SendTracedCommand(START, client.payload, + &TraceConfig{ + Label: []byte(clientLabel), + }, + ) + + check := <-client.cmdTracedChannel + + client.ssntp.Close() + server.ssntp.Stop() + + if check != clientLabel { + t.Fatalf("Did not receive the right payload") + } +} + +// Test SSNTP Command traced frame networking path +// +// Test that an SSNTP client can send a traced Command frame to an echo +// server and then receives it back consistently. +// We test that the number of networking nodes received as part of the +// echo server reply is the right one. +// +// Test is expected to pass. +func TestTracedPathCommand(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoFwderServer + var client ssntpClient + + server.t = t + client.t = t + client.cmdTracedChannel = make(chan string) + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: START, + CommandForward: &server, + }, + + { + Operand: READY, + StatusForward: &server, + }, + } + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + client.payload = []byte{'Y', 'A', 'M', 'L'} + client.ssntp.SendTracedCommand(START, client.payload, + &TraceConfig{ + PathTrace: true, + }, + ) + + check := <-client.cmdTracedChannel + + client.ssntp.Close() + server.ssntp.Stop() + + /* We should get 3 nodes */ + if check != string(3) { + t.Fatalf("Did not receive the right payload %s", check) + } +} + +// Test SSNTP Command traced frame dump +// +// Test that an SSNTP client can send a traced Command frame to an echo +// server, receives it back consistently and dump it. +// +// Test is expected to pass. +func TestDumpTracedCommand(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoFwderServer + var client ssntpClient + + server.t = t + client.t = t + client.cmdDumpChannel = make(chan struct{}) + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: START, + CommandForward: &server, + }, + + { + Operand: READY, + StatusForward: &server, + }, + } + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + client.payload = []byte{'Y', 'A', 'M', 'L'} + client.ssntp.SendTracedCommand(START, client.payload, + &TraceConfig{ + PathTrace: true, + }, + ) + + defer func() { + client.ssntp.Close() + server.ssntp.Stop() + }() + + select { + case <-client.cmdDumpChannel: + break + case <-time.After(time.Second): + t.Fatalf("Did not receive the dump notification") + } +} + +// Test SSNTP Command traced frame duration +// +// Test that an SSNTP client can send a traced Command frame to an echo +// server and then receives it back consistently. +// We test that the frame duration is not zero. +// +// Test is expected to pass. +func TestCommandDuration(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoFwderServer + var client ssntpClient + + server.t = t + client.t = t + client.cmdTracedChannel = make(chan string) + client.cmdDurationChannel = make(chan time.Duration) + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: START, + CommandForward: &server, + }, + + { + Operand: READY, + StatusForward: &server, + }, + } + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + client.payload = []byte{'Y', 'A', 'M', 'L'} + client.ssntp.SendTracedCommand(START, client.payload, + &TraceConfig{ + PathTrace: true, + }, + ) + + check := <-client.cmdTracedChannel + duration := <-client.cmdDurationChannel + + client.ssntp.Close() + server.ssntp.Stop() + + /* We should get 3 nodes */ + if check != string(3) { + t.Fatalf("Wrong number of nodes %s", check) + } + + /* We should get a non zero duration */ + if duration == 0 { + t.Fatalf("Zero duration") + } +} + +// Test the lack of duration on a non traced Command frame +// +// Test that we can not compute a duration on a non traced +// frame that comes back to the client. +// +// Test is expected to pass. +func TestCommandNoDuration(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + client.t = t + client.cmdChannel = make(chan string) + client.cmdDurationChannel = make(chan time.Duration) + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + client.payload = []byte{'Y', 'A', 'M', 'L'} + client.ssntp.SendCommand(RESTART, client.payload) + + defer func() { + client.ssntp.Close() + server.ssntp.Stop() + }() + + select { + case check := <-client.cmdChannel: + if check != RESTART.String() { + t.Fatalf("Did not receive the right payload") + } + case <-time.After(time.Second): + t.Fatalf("Did not receive the command notification") + } + + select { + case duration := <-client.cmdDurationChannel: + if duration != 0 { + t.Fatalf("Should not receive a duration") + } + case <-time.After(time.Second): + t.Fatalf("Did not receive the duration notification") + } +} + +// Test sending consecutive frames +// +// Test that an SSNTP client can send several SSNTP frames to an echo +// sever and then receives it back consistently and in order. +// +// Test is expected to pass. +func TestConsecutiveFrames(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoFwderServer + var client ssntpClient + + server.t = t + client.t = t + client.cmdChannel = make(chan string) + client.staChannel = make(chan string) + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: DELETE, + CommandForward: &server, + }, + { + Operand: READY, + StatusForward: &server, + }, + } + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + client.payload = []byte{'Y', 'A', 'M', 'L'} + client.ssntp.SendStatus(READY, client.payload) + + client.payload = []byte{'D', 'E', 'L', 'E', 'T', 'E'} + client.ssntp.SendCommand(DELETE, client.payload) + + check := <-client.cmdChannel + + client.ssntp.Close() + server.ssntp.Stop() + + if check != DELETE.String() { + t.Fatalf("Did not receive the right payload") + } +} + +// Test SSNTP Status frame +// +// Test that an SSNTP client can send a Status frame to an echo +// server and then receives it back consistently. +// +// Test is expected to pass. +func TestStatus(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + client.t = t + client.typeChannel = make(chan string) + client.staChannel = make(chan string) + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + client.payload = []byte{'R', 'E', 'A', 'D', 'Y'} + client.ssntp.SendStatus(READY, client.payload) + + frameType := <-client.typeChannel + if frameType != STATUS.String() { + t.Fatalf("Did not receive the right frame type") + } + + check := <-client.staChannel + + client.ssntp.Close() + server.ssntp.Stop() + + if check != READY.String() { + t.Fatalf("Did not receive the right payload") + } +} + +// Test SSNTP Event frame +// +// Test that an SSNTP client can send an Event frame to an echo +// server and then receives it back consistently. +// +// Test is expected to pass. +func TestEvent(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + client.t = t + client.typeChannel = make(chan string) + client.evtChannel = make(chan string) + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + client.payload = []byte{'T', 'E', 'N', 'A', 'N', 'T'} + client.ssntp.SendEvent(TenantAdded, client.payload) + + frameType := <-client.typeChannel + if frameType != EVENT.String() { + t.Fatalf("Did not receive the right frame type") + } + + check := <-client.evtChannel + + client.ssntp.Close() + server.ssntp.Stop() + + if check != TenantAdded.String() { + t.Fatalf("Did not receive the right payload") + } +} + +// Test SSNTP Error frame +// +// Test that an SSNTP client can send an Error frame to an echo +// server and then receives it back consistently. +// +// Test is expected to pass. +func TestError(t *testing.T) { + var serverConfig Config + var clientConfig Config + var server ssntpEchoServer + var client ssntpClient + + server.t = t + client.t = t + client.typeChannel = make(chan string) + client.errChannel = make(chan string) + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := client.ssntp.Dial(&clientConfig, &client) + if err != nil { + t.Fatalf("Failed to connect") + } + + client.payload = []byte{'E', 'R', 'R', 'O', 'R'} + client.ssntp.SendError(InvalidFrameType, client.payload) + + frameType := <-client.typeChannel + if frameType != ERROR.String() { + t.Fatalf("Did not receive the right frame type") + } + + check := <-client.errChannel + + client.ssntp.Close() + server.ssntp.Stop() + + if check != InvalidFrameType.String() { + t.Fatalf("Did not receive the right payload") + } +} + +// Test SSNTP Command forwarding +// +// Start an SSNTP server with a set of forwarding rules, an SSNTP +// agent and an SSNTP Controller. +// Then verify that the Controller receives the right frames sent by the agent, +// as specified by the server forwarding rules. +// +// Test is expected to pass. +func TestCmdFwd(t *testing.T) { + var serverConfig Config + var controllerConfig, agentConfig Config + var server ssntpServer + var controller, agent ssntpClient + command := STOP + + server.t = t + serverConfig.Transport = *transport + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: command, + Dest: Controller, + }, + } + + controller.t = t + controller.cmdChannel = make(chan string) + controllerConfig.Transport = *transport + controllerConfig.Role = Controller + + agent.t = t + agentConfig.Transport = *transport + agentConfig.Role = AGENT + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := controller.ssntp.Dial(&controllerConfig, &controller) + if err != nil { + t.Fatalf("Controller failed to connect") + } + + err = agent.ssntp.Dial(&agentConfig, &agent) + if err != nil { + t.Fatalf("Agent failed to connect") + } + + payload := []byte{'S', 'T', 'A', 'T', 'S'} + controller.payload = payload + agent.payload = payload + agent.ssntp.SendCommand(command, agent.payload) + + check := <-controller.cmdChannel + + agent.ssntp.Close() + controller.ssntp.Close() + server.ssntp.Stop() + + if check != command.String() { + t.Fatalf("Did not receive the forwarded STATS") + } +} + +const controllerUUID = "3390740c-dce9-48d6-b83a-a717417072ce" + +// Test SSNTP Command forwarder implementation +// +// Start an SSNTP server with a set of forwarding rules implemented +// by a command forwarder, an SSNTP agent and an SSNTP Controller. +// Then verify that the Controller receives the right frames sent by the agent, +// as implemented by the server forwarder. +// +// Test is expected to pass. +func TestCmdFwder(t *testing.T) { + var serverConfig Config + var controllerConfig, agentConfig Config + var server ssntpServer + var controller, agent ssntpClient + command := EVACUATE + + server.t = t + serverConfig.Transport = *transport + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: command, + CommandForward: &server, + }, + } + + controller.t = t + controller.cmdChannel = make(chan string) + controllerConfig.Transport = *transport + controllerConfig.Role = Controller + controllerConfig.UUID = controllerUUID + + agent.t = t + agentConfig.Transport = *transport + agentConfig.Role = AGENT + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := controller.ssntp.Dial(&controllerConfig, &controller) + if err != nil { + t.Fatalf("Controller failed to connect") + } + + err = agent.ssntp.Dial(&agentConfig, &agent) + if err != nil { + t.Fatalf("Agent failed to connect") + } + + payload := []byte{'E', 'V', 'A', 'C', 'U', 'A', 'T', 'E'} + controller.payload = payload + agent.payload = payload + agent.ssntp.SendCommand(command, agent.payload) + + check := <-controller.cmdChannel + + agent.ssntp.Close() + controller.ssntp.Close() + server.ssntp.Stop() + + if check != command.String() { + t.Fatalf("Did not receive the forwarded STATS") + } +} + +// Test SSNTP Event forwarding +// +// Start an SSNTP server with a set of forwarding rules, an SSNTP +// agent and an SSNTP Controller. +// Then verify that the Controller receives the right frames sent by the agent, +// as specified by the server forwarding rules. +// +// Test is expected to pass. +func TestEventFwd(t *testing.T) { + var serverConfig Config + var controllerConfig, agentConfig Config + var server ssntpServer + var controller, agent ssntpClient + event := TenantAdded + + server.t = t + serverConfig.Transport = *transport + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: event, + Dest: Controller, + }, + } + + controller.t = t + controller.evtChannel = make(chan string) + controllerConfig.Transport = *transport + controllerConfig.Role = Controller + + agent.t = t + agentConfig.Transport = *transport + agentConfig.Role = AGENT + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := controller.ssntp.Dial(&controllerConfig, &controller) + if err != nil { + t.Fatalf("Controller failed to connect") + } + + err = agent.ssntp.Dial(&agentConfig, &agent) + if err != nil { + t.Fatalf("Agent failed to connect") + } + + payload := []byte{'T', 'E', 'N', 'A', 'N', 'T'} + controller.payload = payload + agent.payload = payload + agent.ssntp.SendEvent(event, agent.payload) + + check := <-controller.evtChannel + + agent.ssntp.Close() + controller.ssntp.Close() + server.ssntp.Stop() + + if check != event.String() { + t.Fatalf("Did not receive the forwarded STATS") + } +} + +// Test SSNTP Event forwarder implementation +// +// Start an SSNTP server with a set of forwarding rules implemented +// by an event forwarder, an SSNTP agent and an SSNTP Controller. +// Then verify that the Controller receives the right frames sent by the agent, +// as implemented by the server forwarder. +// +// Test is expected to pass. +func TestEventFwder(t *testing.T) { + var serverConfig Config + var controllerConfig, agentConfig Config + var server ssntpServer + var controller, agent ssntpClient + event := TenantRemoved + + server.t = t + serverConfig.Transport = *transport + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: event, + EventForward: &server, + }, + } + + controller.t = t + controller.evtChannel = make(chan string) + controllerConfig.Transport = *transport + controllerConfig.Role = Controller + controllerConfig.UUID = controllerUUID + + agent.t = t + agentConfig.Transport = *transport + agentConfig.Role = AGENT + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := controller.ssntp.Dial(&controllerConfig, &controller) + if err != nil { + t.Fatalf("Controller failed to connect") + } + + err = agent.ssntp.Dial(&agentConfig, &agent) + if err != nil { + t.Fatalf("Agent failed to connect") + } + + payload := []byte{'T', 'E', 'N', 'A', 'N', 'T'} + controller.payload = payload + agent.payload = payload + agent.ssntp.SendEvent(event, agent.payload) + + check := <-controller.evtChannel + + agent.ssntp.Close() + controller.ssntp.Close() + server.ssntp.Stop() + + if check != event.String() { + t.Fatalf("Did not receive the forwarded STATS") + } +} + +// Test SSNTP Error forwarding +// +// Start an SSNTP server with a set of forwarding rules, an SSNTP +// agent and an SSNTP Controller. +// Then verify that the Controller receives the right frames sent by the agent, +// as specified by the server forwarding rules. +// +// Test is expected to pass. +func TestErrorFwd(t *testing.T) { + var serverConfig Config + var controllerConfig, agentConfig Config + var server ssntpServer + var controller, agent ssntpClient + error := StartFailure + + server.t = t + serverConfig.Transport = *transport + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: error, + Dest: Controller, + }, + } + + controller.t = t + controller.errChannel = make(chan string) + controllerConfig.Transport = *transport + controllerConfig.Role = Controller + + agent.t = t + agentConfig.Transport = *transport + agentConfig.Role = AGENT + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := controller.ssntp.Dial(&controllerConfig, &controller) + if err != nil { + t.Fatalf("Controller failed to connect") + } + + err = agent.ssntp.Dial(&agentConfig, &agent) + if err != nil { + t.Fatalf("Agent failed to connect") + } + + payload := []byte{'F', 'A', 'I', 'L', 'E', 'D'} + controller.payload = payload + agent.payload = payload + agent.ssntp.SendError(error, agent.payload) + + check := <-controller.errChannel + + agent.ssntp.Close() + controller.ssntp.Close() + server.ssntp.Stop() + + if check != error.String() { + t.Fatalf("Did not receive the forwarded STATS") + } +} + +// Test SSNTP Error forwarder implementation +// +// Start an SSNTP server with a set of forwarding rules implemented +// by an error forwarder, an SSNTP agent and an SSNTP Controller. +// Then verify that the Controller receives the right frames sent by the agent, +// as implemented by the server forwarder. +// +// Test is expected to pass. +func TestErrorFwder(t *testing.T) { + var serverConfig Config + var controllerConfig, agentConfig Config + var server ssntpServer + var controller, agent ssntpClient + error := StopFailure + + server.t = t + serverConfig.Transport = *transport + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: error, + ErrorForward: &server, + }, + } + + controller.t = t + controller.errChannel = make(chan string) + controllerConfig.Transport = *transport + controllerConfig.Role = Controller + controllerConfig.UUID = controllerUUID + + agent.t = t + agentConfig.Transport = *transport + agentConfig.Role = AGENT + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := controller.ssntp.Dial(&controllerConfig, &controller) + if err != nil { + t.Fatalf("Controller failed to connect") + } + + err = agent.ssntp.Dial(&agentConfig, &agent) + if err != nil { + t.Fatalf("Agent failed to connect") + } + + payload := []byte{'F', 'A', 'I', 'L', 'E', 'D'} + controller.payload = payload + agent.payload = payload + agent.ssntp.SendError(error, agent.payload) + + check := <-controller.errChannel + + agent.ssntp.Close() + controller.ssntp.Close() + server.ssntp.Stop() + + if check != error.String() { + t.Fatalf("Did not receive the forwarded STATS") + } +} + +// Test SSNTP Command forwarding +// +// Start an SSNTP server with a set of forwarding rules, an SSNTP +// agent and an SSNTP Controller. +// Then verify that the Controller receives the right frames sent by the agent, +// as specified by the server forwarding rules. +// +// Test is expected to pass. +func TestStatusFwd(t *testing.T) { + var serverConfig Config + var controllerConfig, agentConfig Config + var server ssntpServer + var controller, agent ssntpClient + status := FULL + + server.t = t + serverConfig.Transport = *transport + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: status, + Dest: Controller, + }, + } + + controller.t = t + controller.staChannel = make(chan string) + controllerConfig.Transport = *transport + controllerConfig.Role = Controller + + agent.t = t + agentConfig.Transport = *transport + agentConfig.Role = AGENT + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := controller.ssntp.Dial(&controllerConfig, &controller) + if err != nil { + t.Fatalf("Controller failed to connect") + } + + err = agent.ssntp.Dial(&agentConfig, &agent) + if err != nil { + t.Fatalf("Agent failed to connect") + } + + payload := []byte{'F', 'U', 'L', 'L'} + controller.payload = payload + agent.payload = payload + agent.ssntp.SendStatus(status, agent.payload) + + check := <-controller.staChannel + + agent.ssntp.Close() + controller.ssntp.Close() + server.ssntp.Stop() + + if check != status.String() { + t.Fatalf("Did not receive the forwarded STATS") + } +} + +// Test SSNTP Status forwarder implementation +// +// Start an SSNTP server with a set of forwarding rules implemented +// by a status forwarder, an SSNTP agent and an SSNTP Controller. +// Then verify that the Controller receives the right frames sent by the agent, +// as implemented by the server forwarder. +// +// Test is expected to pass. +func TestStatusFwder(t *testing.T) { + var serverConfig Config + var controllerConfig, agentConfig Config + var server ssntpServer + var controller, agent ssntpClient + status := OFFLINE + + server.t = t + serverConfig.Transport = *transport + serverConfig.ForwardRules = []FrameForwardRule{ + { + Operand: status, + StatusForward: &server, + }, + } + + controller.t = t + controller.staChannel = make(chan string) + controllerConfig.Transport = *transport + controllerConfig.Role = Controller + controllerConfig.UUID = controllerUUID + + agent.t = t + agentConfig.Transport = *transport + agentConfig.Role = AGENT + + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + err := controller.ssntp.Dial(&controllerConfig, &controller) + if err != nil { + t.Fatalf("Controller failed to connect") + } + + err = agent.ssntp.Dial(&agentConfig, &agent) + if err != nil { + t.Fatalf("Agent failed to connect") + } + + payload := []byte{'O', 'F', 'F', 'L', 'I', 'N', 'E'} + controller.payload = payload + agent.payload = payload + agent.ssntp.SendStatus(status, agent.payload) + + check := <-controller.staChannel + + agent.ssntp.Close() + controller.ssntp.Close() + server.ssntp.Stop() + + if check != status.String() { + t.Fatalf("Did not receive the forwarded STATS") + } +} + +var ( + transport = flag.String("transport", "tcp", "SSNTP transport, must be tcp or unix") + clients = flag.Int("clients", 100, "Number of clients to create for benchmarking") + delay = flag.Int("delay", 10, "Milliseconds between each client transmission") + frames = flag.Int("frames", 1000, "Number of frames per client to send") + payloadSize = flag.Int("payload", 1<<11, "Frames payload size") +) + +func TestMain(m *testing.M) { + flag.Parse() + + if *transport != "tcp" && *transport != "unix" { + *transport = "tcp" + } + + os.Exit(m.Run()) +} + +type ssntpNullServer struct { + ssntp Server + b *testing.B + nCmds int + wg sync.WaitGroup + done chan struct{} +} + +func (server *ssntpNullServer) ConnectNotify(uuid string, role uint32) { +} + +func (server *ssntpNullServer) DisconnectNotify(uuid string) { +} + +func (server *ssntpNullServer) StatusNotify(uuid string, status Status, frame *Frame) { + server.wg.Done() +} + +func (server *ssntpNullServer) CommandNotify(uuid string, command Command, frame *Frame) { + server.nCmds++ + if server.nCmds == server.b.N { + server.nCmds = 0 + if server.done != nil { + close(server.done) + } + } +} + +func (server *ssntpNullServer) EventNotify(uuid string, event Event, frame *Frame) { +} + +func (server *ssntpNullServer) ErrorNotify(uuid string, error Error, frame *Frame) { +} + +type benchmarkClient struct { + ssntp Client + b *testing.B +} + +func (client *benchmarkClient) ConnectNotify() { +} + +func (client *benchmarkClient) DisconnectNotify() { +} + +func (client *benchmarkClient) StatusNotify(status Status, frame *Frame) { +} + +func (client *benchmarkClient) CommandNotify(command Command, frame *Frame) { +} + +func (client *benchmarkClient) EventNotify(event Event, frame *Frame) { +} + +func (client *benchmarkClient) ErrorNotify(error Error, frame *Frame) { +} + +func benchmarkSingleClient(b *testing.B, payloadSize int) { + var serverConfig Config + var clientConfig Config + var server ssntpNullServer + var client benchmarkClient + payload := make([]byte, payloadSize) + + server.b = b + server.nCmds = 0 + server.done = make(chan struct{}) + client.b = b + + serverConfig.Transport = *transport + clientConfig.Transport = *transport + + time.Sleep(500 * time.Millisecond) + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + client.ssntp.Dial(&clientConfig, &client) + + b.SetBytes((int64)(payloadSize)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + client.ssntp.SendCommand(START, payload) + } + + <-server.done + + client.ssntp.Close() + server.ssntp.Stop() +} + +func benchmarkMultiClients(b *testing.B, payloadSize int, nClients int, nFrames int, delay int) { + var serverConfig Config + var server ssntpNullServer + payload := make([]byte, payloadSize) + + server.b = b + server.nCmds = 0 + + for i := 0; i < payloadSize; i++ { + payload[i] = (byte)(i) + } + + serverConfig.Transport = *transport + + time.Sleep(500 * time.Millisecond) + go server.ssntp.Serve(&serverConfig, &server) + time.Sleep(500 * time.Millisecond) + + totalFrames := nClients * nFrames * b.N + frameDelay := time.Duration(delay) * time.Millisecond + b.SetBytes((int64)(totalFrames * payloadSize)) + b.ResetTimer() + + server.wg.Add(totalFrames) + for n := 0; n < b.N; n++ { + for i := 0; i < nClients; i++ { + go func() { + client := &benchmarkClient{ + b: b, + } + + var clientConfig Config + clientConfig.Transport = *transport + + client.ssntp.Dial(&clientConfig, client) + for j := 0; j < nFrames; j++ { + client.ssntp.SendStatus(READY, payload) + time.Sleep(frameDelay) + } + client.ssntp.Close() + }() + } + } + + server.wg.Wait() + server.ssntp.Stop() + +} + +func Benchmark1Client0BFrames(b *testing.B) { + benchmarkSingleClient(b, 0) +} + +func Benchmark1Client512BFrames(b *testing.B) { + benchmarkSingleClient(b, 512) +} + +func Benchmark1Client65kBFrames(b *testing.B) { + benchmarkSingleClient(b, 1<<16) +} + +func Benchmark500Clients1Frame2kB(b *testing.B) { + benchmarkMultiClients(b, 1<<11, 500, 1, 0) +} + +func Benchmark100Clients100Frames2kBNoDelay(b *testing.B) { + benchmarkMultiClients(b, 1<<11, 500, 1000, 0) +} + +func Benchmark100Clients1Frame2kB(b *testing.B) { + benchmarkMultiClients(b, 1<<11, 500, 1, 0) +} + +func Benchmark100Clients100Frames65kB1msDelay(b *testing.B) { + benchmarkMultiClients(b, 1<<16, 100, 1000, 1) +} + +func BenchmarkDefaultMultiClientsMultiFrames(b *testing.B) { + benchmarkMultiClients(b, *payloadSize, *clients, *frames, *delay) +} diff --git a/ssntp/ssntp_test_certs.go b/ssntp/ssntp_test_certs.go new file mode 100644 index 000000000..48bf31b47 --- /dev/null +++ b/ssntp/ssntp_test_certs.go @@ -0,0 +1,137 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ssntp + +const testCACertScheduler = ` +-----BEGIN CERTIFICATE----- +MIIDMjCCAhqgAwIBAgIRAP2hffMEmIv9DeOhGX0spY8wDQYJKoZIhvcNAQELBQAw +GDEWMBQGA1UEChMNSW50ZWwvU1NHL09UQzAeFw0xNjAzMTgxNTQ1MThaFw0xNzAz +MTgxNTQ1MThaMBgxFjAUBgNVBAoTDUludGVsL1NTRy9PVEMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDVfUYTe57HUGh67jh2Yhjxr/fCZws9pl09Afw/ +Ser79ixj/Uae5iDLWDapAzxUCWkL5rEwv6KwKY52pNlO3m3dKgN9AWCmb2X9i8NE +G/EKSg8HAlXcamo8Ou0VP7/UCRsFhbE7KtyxjxJISGqm/RNA/yHU5qAvB9kGJLsn +Sh++praHaszsxj3Sf881P30Gvsx6H0hRfRdQbyNHuXmbM1BZ+ZqOgp17Za51vLkW +FHtC6YZgJjYyz7I48zzEZ+qGXu9xqwJzN11p7njTD9RKxCkn6OAsvJFiKrSnGSKm +tvFkhB61YrH+2UK0/od/IMgTwb5gTi//A78ROXb1tQY70sljAgMBAAGjdzB1MA4G +A1UdDwEB/wQEAwICpDAaBgNVHSUEEzARBgRVHSUABgkrBgEEAYJXCAIwDwYDVR0T +AQH/BAUwAwEB/zA2BgNVHREELzAtgglsb2NhbGhvc3SBIHN1cGVybm92YS1hcmNo +QGVjbGlzdHMuaW50ZWwuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQCZ4qraAdCVgEOB +YhmFRU+5bxrn6Fdohj9atVon++Crk7pGlhyItTFQl4F/gw/lyjUsIxdZytDdxzLa +Me9hG9hk/W/FeeB5nVTyH2Z3QRd7v6yTptuSx2fnTeZ3nuFJcCQSi0cSI+tPxaez +vjqCtDhO4ztKmhnmsN9fRka4OkvRhc4s83llmB7PP1J55/GBM4XvUvdhXlVhn7J4 +5QzUiG1IbJxUo4+A3dfRQgVUacbOQwl45rB5nW3kKLh+ca6ap47gt9aZJT3q7QbD +Wal3dU+c38pmgGVtoNA2YA5jJFX4q68wOtRGLVFOjJvgQBgTSb9m+tGshUUg7XlV +Hc8k8v4A +-----END CERTIFICATE-----` + +const testCertScheduler = ` +-----BEGIN CERTIFICATE----- +MIIDMjCCAhqgAwIBAgIRAP2hffMEmIv9DeOhGX0spY8wDQYJKoZIhvcNAQELBQAw +GDEWMBQGA1UEChMNSW50ZWwvU1NHL09UQzAeFw0xNjAzMTgxNTQ1MThaFw0xNzAz +MTgxNTQ1MThaMBgxFjAUBgNVBAoTDUludGVsL1NTRy9PVEMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDVfUYTe57HUGh67jh2Yhjxr/fCZws9pl09Afw/ +Ser79ixj/Uae5iDLWDapAzxUCWkL5rEwv6KwKY52pNlO3m3dKgN9AWCmb2X9i8NE +G/EKSg8HAlXcamo8Ou0VP7/UCRsFhbE7KtyxjxJISGqm/RNA/yHU5qAvB9kGJLsn +Sh++praHaszsxj3Sf881P30Gvsx6H0hRfRdQbyNHuXmbM1BZ+ZqOgp17Za51vLkW +FHtC6YZgJjYyz7I48zzEZ+qGXu9xqwJzN11p7njTD9RKxCkn6OAsvJFiKrSnGSKm +tvFkhB61YrH+2UK0/od/IMgTwb5gTi//A78ROXb1tQY70sljAgMBAAGjdzB1MA4G +A1UdDwEB/wQEAwICpDAaBgNVHSUEEzARBgRVHSUABgkrBgEEAYJXCAIwDwYDVR0T +AQH/BAUwAwEB/zA2BgNVHREELzAtgglsb2NhbGhvc3SBIHN1cGVybm92YS1hcmNo +QGVjbGlzdHMuaW50ZWwuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQCZ4qraAdCVgEOB +YhmFRU+5bxrn6Fdohj9atVon++Crk7pGlhyItTFQl4F/gw/lyjUsIxdZytDdxzLa +Me9hG9hk/W/FeeB5nVTyH2Z3QRd7v6yTptuSx2fnTeZ3nuFJcCQSi0cSI+tPxaez +vjqCtDhO4ztKmhnmsN9fRka4OkvRhc4s83llmB7PP1J55/GBM4XvUvdhXlVhn7J4 +5QzUiG1IbJxUo4+A3dfRQgVUacbOQwl45rB5nW3kKLh+ca6ap47gt9aZJT3q7QbD +Wal3dU+c38pmgGVtoNA2YA5jJFX4q68wOtRGLVFOjJvgQBgTSb9m+tGshUUg7XlV +Hc8k8v4A +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA1X1GE3uex1Boeu44dmIY8a/3wmcLPaZdPQH8P0nq+/YsY/1G +nuYgy1g2qQM8VAlpC+axML+isCmOdqTZTt5t3SoDfQFgpm9l/YvDRBvxCkoPBwJV +3GpqPDrtFT+/1AkbBYWxOyrcsY8SSEhqpv0TQP8h1OagLwfZBiS7J0ofvqa2h2rM +7MY90n/PNT99Br7Meh9IUX0XUG8jR7l5mzNQWfmajoKde2Wudby5FhR7QumGYCY2 +Ms+yOPM8xGfqhl7vcasCczddae540w/USsQpJ+jgLLyRYiq0pxkiprbxZIQetWKx +/tlCtP6HfyDIE8G+YE4v/wO/ETl29bUGO9LJYwIDAQABAoIBAQC8d9tlV7zUOCgE +Xkl6OR/MPYx8EnvZ8QRe12HYfWj1Bl4p525w2Lgay8V1b2XSynvyBbZnUsZMvmMG +WoF9Ht/eXzi1IoIwbCedrcS0W/ZvHvIlPeqOq2MdyOeD2sN+bItuVJgho7UxCx0Y +stV0lfZpatJzISZIXqU4xzRxev6LTwNreIfumTK0GlJFbKxm8dDbwL/mVSSXJTI6 +cuX3DkmK1XZPw3wadNG0dV5DLae5XNXptrKTNEqrkBGcNyLe58uc0go4I1ZNGDPO +stfWhyjF+I6bCIbPV17SA2MLPnpt9rMWTF/c2tlnryhDcq+/DHC4XieC3C1A8llq +ory1IkABAoGBAP+VMSWAZ6PXo7rujcjjBacbhG31VuAV+yvgQuxrjs9x1fenA9r6 +PYBcOKmoSXI2nRqB/SOPvkbN0CLdQI95hGm1jb8eJOtFGEJDnKhAw6HYup2P6Cb5 +7kgtKivFUCDQ9jJ0SofhUGprQVa3Eiekp4CpSSos0tx3hDedQJmi5eRRAoGBANXW +fayhdj3iuzORvjn/6Hlqa7jFZrvYtehBWhbpduhxEF/oX0YNrxB3OBVopJ2MR0Gk +5q6g6eyxiDYGjHUBep4RgVaLPf4bSS6+ASkqbmhiSjRd3DHylJLOdGSxHJhPFtas +tRfPHY9/gaalY6OctlkppOG12yZvqBqvsWsTaGlzAoGBAOKfaF+nVr6J081J/4CU +BIwjMy7I70ptUzSZet3llVDN0HrypdGCOAxcOYX+CXnzgSRjBJGGwKJJlneDld5i +Uo3lBRXk5bmTn6oKB5uNKaqV4Qsa8i+1R5rKKW18XMtsXhFo2jlYCRUDm4EHfVqX +fkrwUEbMTgBdHWiuz/wq3AuxAoGAbcuOB793qNrr59SjacBeK0lbOioRT847yJ8V +lMIXDc5P7A6yLJQGGVw9bbCnJUXLqIb08yEoOyIEoEjIgaNaxCKU2EzybbCw6NEi +Y3Kn8ezV0QGIeBTn+GENHk5aMlIZlexjp2/u66k6dshg2rcyYaGiSUpI1flqxRkE +7cevSsUCgYAJxfvJFeZDGg9K7rI22brGHNSOpbvzghsjGh714vf5RoEWHinAVkRF +bT8G3EFXLypD7sD8jpUTXwoB/bahilp7qvYRCNrLwN5gmY5iG2L4kuDW0UJY/k6Z +CTh6uDEAPW/Tr9+DzvbJSnHISfJebGJ9117NN/NLyFsyCrRwfMuNSw== +-----END RSA PRIVATE KEY-----` + +const testCertAgent = ` +-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIQYRNv2VQIDjHzXcBouotxijANBgkqhkiG9w0BAQsFADAY +MRYwFAYDVQQKEw1JbnRlbC9TU0cvT1RDMB4XDTE2MDMxODE1NDczMVoXDTE3MDMx +ODE1NDczMVowGDEWMBQGA1UEChMNSW50ZWwvU1NHL09UQzCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAL9UKGh74IY4UMBkVDdLm8iSKi/+0QvksGfWHEWB +lE99MVrgNmS1SrxmdnfPnyNY0QW1G69gfXBdyTCySR8NLH1C7/nYofC/hcPHX0x3 +OZoPhaa3uNbx4DpwSE53+Q0xANGTN8srmlS4BLOZjGn7e43jUxo+OpkpL10L+npJ +W/M49XiVU21+ybmCkio/FusMbL+/lSpD4nwbkzlmRpb+4UBpZlyOPw2RolbCTq1T +19odsZccqiJEmwz0D6cXooggKbQ9++vDeqBElpu9bud8scyIofYQj+CbmpR8Qp+O +DpPRZhgc5THTvGSvH1vV2F9MKX1uBIloFOPEn7CQ2C7TJq0CAwEAAaN0MHIwDgYD +VR0PAQH/BAQDAgKkMBoGA1UdJQQTMBEGBFUdJQAGCSsGAQQBglcIATAMBgNVHRMB +Af8EAjAAMDYGA1UdEQQvMC2CCWxvY2FsaG9zdIEgc3VwZXJub3ZhLWFyY2hAZWNs +aXN0cy5pbnRlbC5jb20wDQYJKoZIhvcNAQELBQADggEBAC/2MHux58u9nCqM4RTy +fqza3yPEUTMgAn9QHi7i/yYLLJWSfkiWQ0G2gwf+oMLWeuyxnqdQ4gsRswUne9E3 +Xodt0BRlSlwDGvshQBBJ18Jc0LSOeGjMEI943wrL5TeOMzstjx4QbizrWJshoyhK +i0KQQL3eKgGfy4Xhqw/PL4H2fWkTjkd1LE2km5xSkinstHuytEEbrPcW1qftdBAo +sKSZM3lAEiBQ2NnJdLM8ENRWrwzeVTuFGrxbdzsxlJH7cUooK9BRx7H4Xg08Ndrp +028vQ0uqgfzmYR/3Xc0J3TQIntbKThrKg7vdMCw/UXkCokrSt65VRZl1UD5D9MU2 +iLY= +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAv1QoaHvghjhQwGRUN0ubyJIqL/7RC+SwZ9YcRYGUT30xWuA2 +ZLVKvGZ2d8+fI1jRBbUbr2B9cF3JMLJJHw0sfULv+dih8L+Fw8dfTHc5mg+Fpre4 +1vHgOnBITnf5DTEA0ZM3yyuaVLgEs5mMaft7jeNTGj46mSkvXQv6eklb8zj1eJVT +bX7JuYKSKj8W6wxsv7+VKkPifBuTOWZGlv7hQGlmXI4/DZGiVsJOrVPX2h2xlxyq +IkSbDPQPpxeiiCAptD3768N6oESWm71u53yxzIih9hCP4JualHxCn44Ok9FmGBzl +MdO8ZK8fW9XYX0wpfW4EiWgU48SfsJDYLtMmrQIDAQABAoIBAFR4TVbDyj63wj8O +jHfVM7P7hBCoiZacop1VVCoDqXzmotGiR6FywMoo2ojO601puu6wJMbq7LadUWPk +co/4+vlagiRmVii6Bc6HyTUzNgUkcTdHyZ1sMDjcta+fHB+M0PW1714NpBdfHwRD ++FAaLVRZVqkxbCGOo1CLJ0yx8pyQdgnGahVCi8Iq8Vh8qbuLLKwwVEMvh94JoAq1 +GFabpIaMQd85zcLBrpk/EQ26cOKnfZ5+KC7AoYL+SF/SZU9y1RRFrJg8I1veoUoY +KaCeADWOSwyNLtmA7ihnGDQF6XPEvHo03LIhnCNPVk6eBz07htZzR+jtvlfT4py/ +3FZo8akCgYEA/CcbbAxgfTbXa+0fJuASG3cBLXjfWP/gSuUh1GYB1ywEbItzLdXv +IsnHm1zjZpLx8lZ6bSVBKwG5BLnjffDFJUjeQnJ3HfUKaCHfVQ9yvHMMo+A4exCN +3xYnQv4g4fsOlQgB6KPGj6etoLpXqxeo+1Eq3wW0vhml7r60yIAVCCMCgYEAwj95 +1ZSkVTXUJ23twF6ZM/freKrLTYl0Xgndm8vFSefLhGmAdBOqM5Nw68qBwsqOZZJ1 +nz47TMMxvY+oo8UPxYUhv1TyAzxrInRVcQW/4MGHdC1ggDNGIXptjfH1GPmtYqKP +CwDWz91N/tWxBGRnR9lkurPw/xO2lcJKQkV/Gu8CgYANyxHb8j1g/BwComD+3mj/ +KZ5d32vQUYbyceBM3xugqp6/VU2Cp15Cd+k8XXNvDADCMerWh6At+xUk5gpEvGP8 +ayUFW3+amunr0laGL6cmGeEKIdzVFo28M1kIeCBSXEpq6po4IKVe/FwqG6dqC4xJ +2yHOO2CsuXhzO+llwmPkIwKBgQCkiA2TejcDsfeEellyou4TlcBO0iQoBfCaIrMh +3qS6Z+r7uj1ClNGKpC12m23z54xbvoWAn76s030TOla2eRlVzeF65eGkGg4I/g7N +D/ip0GMCZdkiXcveWYvoOnTHddyF+nk88bYCFUqWOryYos7UjfADMJ1GAZWbHhm2 +LpVt4wKBgFtKO5kLeGbleYZ6Kxg4LBJtCDxJIpsDJ9y2KNAvAckyexxSeixovkWF +P6O2qeLKaT6YrbtTDyyHvMSMnL3bNiWznxpemAYMRgjmfuhmfTuHZQgiW0AIDyhk +H2+xzwHWiSjYUnwwegoApG/c/vkcumsy/UlCsSlVs97Glp8OPYde +-----END RSA PRIVATE KEY-----` diff --git a/template.go b/template.go new file mode 100644 index 000000000..6b91d29ed --- /dev/null +++ b/template.go @@ -0,0 +1,22 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build ignore + +package main + +import ( + "github.com/01org/ciao/ssntp" +) diff --git a/test-cases/README.md b/test-cases/README.md new file mode 100644 index 000000000..1b7d2753f --- /dev/null +++ b/test-cases/README.md @@ -0,0 +1,2 @@ +This runs go test across components and reports status. Handy for A/B +red/green testing both for individual developers and in an org's CI/CD. diff --git a/test-cases/test-cases.go b/test-cases/test-cases.go new file mode 100644 index 000000000..19203e92f --- /dev/null +++ b/test-cases/test-cases.go @@ -0,0 +1,314 @@ +// +// Copyright (c) 2016 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "flag" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "os/exec" + "path" + "regexp" + "strings" + "text/template" +) + +type PackageInfo struct { + Name string `json:"name"` + Path string `json:"path"` + Files []string `json:"files"` + XFiles []string `json:"xfiles"` +} + +type TestInfo struct { + Name string + Summary string + Description string + ExpectedResult string + Pass bool + Result string + TimeTaken string +} + +type PackageTests struct { + Name string + Coverage string + Tests []*TestInfo +} + +type testResults struct { + result string + timeTaken string +} + +const goListTemplate = `{ +"name" : "{{.ImportPath}}", +"path" : "{{.Dir}}", +"files" : [ {{range $index, $elem := .TestGoFiles }}{{if $index}}, "{{$elem}}"{{else}}"{{$elem}}"{{end}}{{end}} ], +"xfiles" : [ {{range $index, $elem := .XTestGoFiles }}{{if $index}}, "{{$elem}}"{{else}}"{{$elem}}"{{end}}{{end}} ] +}, +` + +const htmlTemplate = ` + + +Test Cases + + + +{{range .Tests}} +

{{.Name}}

+

Coverage: {{.Coverage}}

+ + +{{range .Tests}} + +{{end}} +
NameSummaryDescriptionExpectedResultResultTime Taken
{{.Name}}{{.Summary}}{{.Description}}{{.ExpectedResult}}{{.Result}}{{.TimeTaken}}
+{{end}} + + +` + +var resultRegexp *regexp.Regexp +var coverageRegexp *regexp.Regexp + +var cssPath string + +func init() { + flag.StringVar(&cssPath, "css", "", "Full path to CSS file") + resultRegexp = regexp.MustCompile(`--- (FAIL|PASS): ([^\s]+) \(([^\)]+)\)`) + coverageRegexp = regexp.MustCompile(`^coverage: ([^\s]+)`) +} + +func parseCommentGroup(ti *TestInfo, comment string) { + groups := regexp.MustCompile("\n\n").Split(comment, 4) + fields := []*string{&ti.Summary, &ti.Description, &ti.ExpectedResult} + for i, c := range groups { + *fields[i] = c + } +} + +func isTestingFunc(decl *ast.FuncDecl) bool { + if !strings.HasPrefix(decl.Name.String(), "Test") { + return false + } + + paramList := decl.Type.Params.List + if len(paramList) != 1 { + return false + } + + recType, ok := paramList[0].Type.(*ast.StarExpr) + if !ok { + return false + } + + pt, ok := recType.X.(*ast.SelectorExpr) + if !ok { + return false + } + + id, ok := pt.X.(*ast.Ident) + if !ok { + return false + } + + return id.Name == "testing" && pt.Sel.Name == "T" +} + +func parseTestFile(filePath string) ([]*TestInfo, error) { + tests := make([]*TestInfo, 0, 32) + fs := token.NewFileSet() + tr, err := parser.ParseFile(fs, filePath, nil, parser.ParseComments) + if err != nil { + return nil, err + } + + for _, decl := range tr.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok { + if !isTestingFunc(decl) { + continue + } + + ti := &TestInfo{Name: decl.Name.String()} + tests = append(tests, ti) + + if decl.Doc == nil { + continue + } + + parseCommentGroup(ti, decl.Doc.Text()) + } + } + + return tests, nil +} + +func extractTests(packages []PackageInfo) []*PackageTests { + pts := make([]*PackageTests, 0, len(packages)) + for _, p := range packages { + if len(p.Files) == 0 || strings.Contains(p.Name, "/vendor/") { + continue + } + packageTest := &PackageTests{ + Name: p.Name, + } + + files := make([]string, 0, len(p.Files)+len(p.XFiles)) + files = append(files, p.Files...) + files = append(files, p.XFiles...) + for _, f := range files { + filePath := path.Join(p.Path, f) + ti, err := parseTestFile(filePath) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to parse %s: %s\n", + filePath, err) + continue + } + packageTest.Tests = append(packageTest.Tests, ti...) + } + pts = append(pts, packageTest) + } + return pts +} + +func findTestFiles(pack string) ([]PackageInfo, error) { + var output bytes.Buffer + fmt.Fprintln(&output, "[") + cmd := exec.Command("go", "list", "-f", goListTemplate, pack) + cmd.Stdout = &output + err := cmd.Run() + if err != nil { + return nil, err + } + lastComma := bytes.LastIndex(output.Bytes(), []byte{','}) + if lastComma != -1 { + output.Truncate(lastComma) + } + fmt.Fprintln(&output, "]") + var testPackages []PackageInfo + err = json.Unmarshal(output.Bytes(), &testPackages) + if err != nil { + return nil, err + } + return testPackages, nil +} + +func runPackageTests(p *PackageTests) { + var output bytes.Buffer + var coverage string + + results := make(map[string]*testResults) + + cmd := exec.Command("go", "test", p.Name, "-v", "-cover") + cmd.Stdout = &output + _ = cmd.Run() + + scanner := bufio.NewScanner(&output) + for scanner.Scan() { + line := scanner.Text() + matches := resultRegexp.FindStringSubmatch(line) + if matches != nil && len(matches) == 4 { + results[matches[2]] = &testResults{matches[1], matches[3]} + continue + } + + if coverage == "" { + matches := coverageRegexp.FindStringSubmatch(line) + if matches == nil || len(matches) != 2 { + continue + } + coverage = matches[1] + } + } + + for _, t := range p.Tests { + res := results[t.Name] + if res == nil { + t.Result = "NOT RUN" + t.TimeTaken = "N/A" + } else { + t.Result = res.result + t.Pass = res.result == "PASS" + t.TimeTaken = res.timeTaken + } + } + + if coverage != "" { + p.Coverage = coverage + } else { + p.Coverage = "Unknown" + } +} + +func main() { + + flag.Parse() + + pack := flag.Arg(0) + if pack == "" { + pack = "." + } + + var css string + if cssPath != "" { + cssBytes, err := ioutil.ReadFile(cssPath) + if err != nil { + log.Printf("Unable to read css file %s : %v", + cssPath, err) + } else { + css = string(cssBytes) + } + } + + packages, err := findTestFiles(pack) + if err != nil { + log.Fatalf("Unable to discover test files: %s", err) + } + + tests := extractTests(packages) + for _, p := range tests { + runPackageTests(p) + } + + tmpl, err := template.New("tests").Parse(htmlTemplate) + if err != nil { + log.Fatalf("Unable to parse html template: %s\n", err) + } + + err = tmpl.Execute(os.Stdout, &struct { + Tests []*PackageTests + CSS string + }{ + tests, + css, + }) + if err != nil { + log.Fatalf("Unable to generate report: %s\n", err) + } +}