From b484ed25aea2f3a2196c3be0b155176b9d78ce41 Mon Sep 17 00:00:00 2001 From: Damian Martinez Date: Fri, 25 Aug 2017 13:45:59 -0700 Subject: [PATCH 1/5] Trying to golang/dep tool --- .gitignore | 1 + Gopkg.lock | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ Gopkg.toml | 28 ++++++++++++++++++++++++++ 3 files changed, 88 insertions(+) create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml diff --git a/.gitignore b/.gitignore index 6726f08..a976e00 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ rbd-docker-plugin *.tpkg +vendor diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 0000000..3e9217d --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,59 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/Microsoft/go-winio" + packages = ["."] + revision = "78439966b38d69bf38227fbf57ac8a6fee70f69a" + version = "v0.4.5" + +[[projects]] + name = "github.com/coreos/go-systemd" + packages = ["activation"] + revision = "d2196463941895ee908e13531a23a39feb9e1243" + version = "v15" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "04cdfd42973bb9c8589fd6a731800cf222fde1a9" + +[[projects]] + branch = "master" + name = "github.com/docker/go-connections" + packages = ["sockets"] + revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" + +[[projects]] + branch = "master" + name = "github.com/docker/go-plugins-helpers" + packages = ["sdk","volume"] + revision = "a9ef19c479cb60e751efa55f7f2b265776af1abf" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "d8ed2627bdf02c080bf22230dbb337003b7aba2d" + +[[projects]] + branch = "master" + name = "github.com/stretchr/testify" + packages = ["assert","require"] + revision = "890a5c3458b43e6104ff5da8dfa139d013d77544" + +[[projects]] + name = "golang.org/x/net" + packages = ["proxy"] + revision = "c8c74377599bd978aee1cf3b9b63a8634051cec2" + +[[projects]] + name = "golang.org/x/sys" + packages = ["windows"] + revision = "07c182904dbd53199946ba614a412c61d3c548f5" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "55b46cf66d18e7169bbd365d3503b3c5ca803827c37963e8dab140b71b3b6098" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 0000000..0c59e5b --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,28 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/docker/go-plugins-helpers" + +[[constraint]] + name = "github.com/stretchr/testify" From 213ba620b02cceeb739de7220d20fedf45f7fa70 Mon Sep 17 00:00:00 2001 From: Damian Martinez Date: Fri, 25 Aug 2017 13:48:25 -0700 Subject: [PATCH 2/5] Updating fresh. first version was conversion from glide --- Gopkg.lock | 42 ++++++++++++++++++++++++++++++++---------- Gopkg.toml | 2 ++ 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 3e9217d..69bc9f1 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -7,6 +7,12 @@ revision = "78439966b38d69bf38227fbf57ac8a6fee70f69a" version = "v0.4.5" +[[projects]] + name = "github.com/Sirupsen/logrus" + packages = ["."] + revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e" + version = "v1.0.3" + [[projects]] name = "github.com/coreos/go-systemd" packages = ["activation"] @@ -16,13 +22,14 @@ [[projects]] name = "github.com/davecgh/go-spew" packages = ["spew"] - revision = "04cdfd42973bb9c8589fd6a731800cf222fde1a9" + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" [[projects]] - branch = "master" name = "github.com/docker/go-connections" packages = ["sockets"] - revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" + revision = "990a1a1a70b0da4c4cb70e117971a4f0babfbf1a" + version = "v0.2.1" [[projects]] branch = "master" @@ -30,30 +37,45 @@ packages = ["sdk","volume"] revision = "a9ef19c479cb60e751efa55f7f2b265776af1abf" +[[projects]] + name = "github.com/opencontainers/runc" + packages = ["libcontainer/user"] + revision = "baf6536d6259209c3edfa2b22237af82942d3dfa" + version = "v0.1.1" + [[projects]] name = "github.com/pmezard/go-difflib" packages = ["difflib"] - revision = "d8ed2627bdf02c080bf22230dbb337003b7aba2d" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" [[projects]] - branch = "master" name = "github.com/stretchr/testify" packages = ["assert","require"] - revision = "890a5c3458b43e6104ff5da8dfa139d013d77544" + revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" + version = "v1.1.4" + +[[projects]] + branch = "master" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + revision = "eb71ad9bd329b5ac0fd0148dd99bd62e8be8e035" [[projects]] + branch = "master" name = "golang.org/x/net" packages = ["proxy"] - revision = "c8c74377599bd978aee1cf3b9b63a8634051cec2" + revision = "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5" [[projects]] + branch = "master" name = "golang.org/x/sys" - packages = ["windows"] - revision = "07c182904dbd53199946ba614a412c61d3c548f5" + packages = ["unix","windows"] + revision = "2d6f6f883a06fc0d5f4b14a81e4c28705ea64c15" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "55b46cf66d18e7169bbd365d3503b3c5ca803827c37963e8dab140b71b3b6098" + inputs-digest = "3de365ff1ec407fda506c9fcc6b7cb6680ff3aee5be4694ddb4f3d797a9da5fa" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 0c59e5b..39dd351 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -22,7 +22,9 @@ [[constraint]] + branch = "master" name = "github.com/docker/go-plugins-helpers" [[constraint]] name = "github.com/stretchr/testify" + version = "1.1.4" From b0d199109e658108894603608b05d46a2087d53e Mon Sep 17 00:00:00 2001 From: Damian Martinez Date: Fri, 25 Aug 2017 13:50:49 -0700 Subject: [PATCH 3/5] Remove any code using go-ceph lib. Update Volume API implementation to latest from docker/go-plugins-helpers/volume lib --- driver.go | 626 +++++++++++++------------------------------------ driver_test.go | 10 +- main.go | 24 +- unlock_test.go | 98 +------- utils.go | 17 ++ 5 files changed, 191 insertions(+), 584 deletions(-) diff --git a/driver.go b/driver.go index a4f46b4..f336fe0 100644 --- a/driver.go +++ b/driver.go @@ -3,30 +3,29 @@ // license that can be found in the LICENSE file. package main -// Ceph RBD Docker VolumeDriver Plugin -// -// Run rbd-docker-plugin service, which creates a socket that can accept JSON -// HTTP POSTs from docker engine. -// -// Due to some issues using the go-ceph library for locking/unlocking, we -// reimplemented all functionality to use shell CLI commands via the 'rbd' -// executable. To re-enable old go-ceph functionality, use --go-ceph flag. -// -// System Requirements: -// - requires rbd CLI binary for shell operation (default) -// - requires ceph, rados and rbd development headers to use go-ceph -// yum install ceph-devel librados2-devel librbd1-devel -// -// Plugin name: rbd (yp-rbd? ceph-rbd?) -- now configurable via --name -// -// docker run --volume-driver=rbd -v imagename:/mnt/dir IMAGE [CMD] -// -// golang github code examples: -// - https://github.com/docker/docker/blob/master/experimental/plugins_volume.md -// - https://github.com/ceph/go-ceph -// - https://github.com/docker/go-plugins-helpers/tree/master/volume -// - https://github.com/calavera/docker-volume-glusterfs -// - https://github.com/AcalephStorage/docker-volume-ceph-rbd +/** + * Ceph RBD Docker VolumeDriver Plugin + * + * rbd-docker-plugin service creates a UNIX socket that can accept Volume + * Driver requests (JSON HTTP POSTs) from Docker Engine. + * + * Historical note: Due to some issues using the go-ceph library for + * locking/unlocking, we reimplemented all functionality to use shell CLI + * commands via the 'rbd' executable. + * + * System Requirements: + * - requires rbd CLI binary in PATH + * + * Plugin name: rbd -- configurable via --name + * + * % docker run --volume-driver=rbd -v imagename:/mnt/dir IMAGE [CMD] + * + * golang github code examples: + * - https://github.com/docker/docker/blob/master/experimental/plugins_volume.md + * - https://github.com/docker/go-plugins-helpers/tree/master/volume + * - https://github.com/calavera/docker-volume-glusterfs + * - https://github.com/AcalephStorage/docker-volume-ceph-rbd + */ import ( "errors" @@ -41,49 +40,24 @@ import ( "sync" "time" - "github.com/ceph/go-ceph/rados" - "github.com/ceph/go-ceph/rbd" - dkvolume "github.com/docker/go-plugins-helpers/volume" + "github.com/docker/go-plugins-helpers/volume" ) -// TODO: use versioned dependencies -- e.g. newest dkvolume already has breaking changes? - var ( imageNameRegexp = regexp.MustCompile(`^(([-_.[:alnum:]]+)/)?([-_.[:alnum:]]+)(@([0-9]+))?$`) // optional pool or size in image name rbdUnmapBusyRegexp = regexp.MustCompile(`^exit status 16$`) ) -// Volume is the Docker concept which we map onto a Ceph RBD Image +// Volume is our local struct to store info about Ceph RBD Image type Volume struct { - name string // RBD Image name - device string // local host kernel device (e.g. /dev/rbd1) - locker string // track the lock name - fstype string - pool string -} - -// TODO: finish modularizing and split out go-ceph and shell-cli implementations -// -// in progress: interface to abstract the ceph operations - either go-ceph lib or sh cli commands -type RbdImageDriver interface { - // shutdown() - // connect(pool string) error // ?? only go-ceph - - rbdImageExists(pool, findName string) (bool, error) - createRBDImage(pool string, name string, size int, fstype string) error - rbdImageIsLocked(pool, name string) (bool, error) - lockImage(pool, imagename string) (string, error) - unlockImage(pool, imagename, locker string) error - removeRBDImage(pool, name string) error - renameRBDImage(pool, name, newname string) error - // mapImage(pool, name string) - // unmapImageDevice(device string) - // mountDevice(device, mount, fstype string) - // unmountDevice(device string) + Name string // RBD Image name + Device string // local host kernel device (e.g. /dev/rbd1) + Locker string // track the lock name + FStype string + Pool string + ID string } -// - // our driver type for impl func type cephRBDVolumeDriver struct { // - using default ceph cluster name ("ceph") @@ -102,55 +76,49 @@ type cephRBDVolumeDriver struct { config string // ceph config file to read volumes map[string]*Volume // track locally mounted volumes m *sync.Mutex // mutex to guard operations that change volume maps or use conn - - useGoCeph bool // whether to setup/use go-ceph lib methods (default: false - use shell cli) - conn *rados.Conn // create a connection for each API operation - ioctx *rados.IOContext // context for requested pool } // newCephRBDVolumeDriver builds the driver struct, reads config file and connects to cluster -func newCephRBDVolumeDriver(pluginName, cluster, userName, defaultPoolName, rootBase, config string, useGoCeph bool) cephRBDVolumeDriver { +func newCephRBDVolumeDriver(pluginName, cluster, userName, defaultPoolName, rootBase, config string) cephRBDVolumeDriver { // the root mount dir will be based on docker default root and plugin name - pool added later per volume mountDir := filepath.Join(rootBase, pluginName) log.Printf("INFO: newCephRBDVolumeDriver: setting base mount dir=%s", mountDir) // fill everything except the connection and context driver := cephRBDVolumeDriver{ - name: pluginName, - cluster: cluster, - user: userName, - pool: defaultPoolName, - root: mountDir, - config: config, - volumes: map[string]*Volume{}, - m: &sync.Mutex{}, - useGoCeph: useGoCeph, + name: pluginName, + cluster: cluster, + user: userName, + pool: defaultPoolName, + root: mountDir, + config: config, + volumes: map[string]*Volume{}, + m: &sync.Mutex{}, } return driver } -// Capabilities -// Scope: global - images managed using this plugin can be considered "global" -// TODO: make configurable -func (d cephRBDVolumeDriver) Capabilities(r dkvolume.Request) dkvolume.Response { - return dkvolume.Response{ - Capabilities: dkvolume.Capability{ - Scope: "global", - }, - } -} - // ************************************************************ // -// Implement the Docker VolumeDriver API via dkvolume interface +// Implement the Docker Volume Driver interface // -// Using https://github.com/docker/go-plugins-helpers/tree/master/volume +// Using https://github.com/docker/go-plugins-helpers/ // // ************************************************************ +// Capabilities +// Scope: global - images managed using rbd plugin can be considered "global" +func (d cephRBDVolumeDriver) Capabilities() *volume.CapabilitiesResponse { + return &volume.CapabilitiesResponse{ + Capabilities: volume.Capability{ + Scope: "global", + }, + } +} + // Create will ensure the RBD image requested is available. Plugin requires -// --create option to provision new RBD images. +// --create option flag to be able to provision new RBD images. // // Docker Volume Create Options: // size - in MB @@ -173,7 +141,7 @@ func (d cephRBDVolumeDriver) Capabilities(r dkvolume.Request) dkvolume.Response // { "Err": null } // Respond with a string error if an error occurred. // -func (d cephRBDVolumeDriver) Create(r dkvolume.Request) dkvolume.Response { +func (d cephRBDVolumeDriver) Create(r *volume.CreateRequest) error { log.Printf("INFO: API Create(%q)", r) d.m.Lock() defer d.m.Unlock() @@ -181,7 +149,7 @@ func (d cephRBDVolumeDriver) Create(r dkvolume.Request) dkvolume.Response { return d.createImage(r) } -func (d cephRBDVolumeDriver) createImage(r dkvolume.Request) dkvolume.Response { +func (d cephRBDVolumeDriver) createImage(r *volume.CreateRequest) error { log.Printf("INFO: createImage(%q)", r) fstype := *defaultImageFSType @@ -190,7 +158,7 @@ func (d cephRBDVolumeDriver) createImage(r dkvolume.Request) dkvolume.Response { pool, name, size, err := d.parseImagePoolNameSize(r.Name) if err != nil { log.Printf("ERROR: parsing volume: %s", err) - return dkvolume.Response{Err: err.Error()} + return err } // Options to override from `docker volume create -o OPT=VAL ...` @@ -214,40 +182,30 @@ func (d cephRBDVolumeDriver) createImage(r dkvolume.Request) dkvolume.Response { // do we already know about this volume? return early if _, found := d.volumes[mount]; found { log.Println("INFO: Volume is already in known mounts: " + mount) - return dkvolume.Response{} - } - - // otherwise, connect to Ceph and check ceph rbd api for it - if d.useGoCeph { - err = d.connect(pool) - if err != nil { - log.Printf("ERROR: unable to connect to ceph and access pool: %s", err) - return dkvolume.Response{Err: err.Error()} - } - defer d.shutdown() + return nil } exists, err := d.rbdImageExists(pool, name) if err != nil { log.Printf("ERROR: checking for RBD Image: %s", err) - return dkvolume.Response{Err: err.Error()} + return err } if !exists { if !*canCreateVolumes { errString := fmt.Sprintf("Ceph RBD Image not found: %s", name) log.Println("ERROR: " + errString) - return dkvolume.Response{Err: errString} + return errors.New(errString) } // try to create it ... use size and default fs-type err = d.createRBDImage(pool, name, size, fstype) if err != nil { errString := fmt.Sprintf("Unable to create Ceph RBD Image(%s): %s", name, err) log.Println("ERROR: " + errString) - return dkvolume.Response{Err: errString} + return errors.New(errString) } } - return dkvolume.Response{} + return nil } // POST /VolumeDriver.Remove @@ -260,7 +218,7 @@ func (d cephRBDVolumeDriver) createImage(r dkvolume.Request) dkvolume.Response { // { "Err": null } // Respond with a string error if an error occurred. // -func (d cephRBDVolumeDriver) Remove(r dkvolume.Request) dkvolume.Response { +func (d cephRBDVolumeDriver) Remove(r *volume.RemoveRequest) error { log.Printf("INFO: API Remove(%s)", r) d.m.Lock() defer d.m.Unlock() @@ -269,7 +227,7 @@ func (d cephRBDVolumeDriver) Remove(r dkvolume.Request) dkvolume.Response { pool, name, _, err := d.parseImagePoolNameSize(r.Name) if err != nil { log.Printf("ERROR: parsing volume: %s", err) - return dkvolume.Response{Err: err.Error()} + return err } mount := d.mountpoint(pool, name) @@ -279,25 +237,15 @@ func (d cephRBDVolumeDriver) Remove(r dkvolume.Request) dkvolume.Response { log.Printf("WARN: Volume is not in known mounts: %s", mount) } - // connect to Ceph and check ceph rbd api for it - if d.useGoCeph { - err = d.connect(pool) - if err != nil { - log.Printf("ERROR: unable to connect to ceph and access pool: %s", err) - return dkvolume.Response{Err: err.Error()} - } - defer d.shutdown() - } - exists, err := d.rbdImageExists(pool, name) if err != nil { log.Printf("ERROR: checking for RBD Image: %s", err) - return dkvolume.Response{Err: err.Error()} + return err } if !exists { errString := fmt.Sprintf("Ceph RBD Image not found: %s", name) log.Println("ERROR: " + errString) - return dkvolume.Response{Err: errString} + return errors.New(errString) } // attempt to gain lock before remove - lock seems to disappear after rm (but not after rename) @@ -305,7 +253,7 @@ func (d cephRBDVolumeDriver) Remove(r dkvolume.Request) dkvolume.Response { if err != nil { errString := fmt.Sprintf("Unable to lock image for remove: %s", name) log.Println("ERROR: " + errString) - return dkvolume.Response{Err: errString} + return errors.New(errString) } // remove action can be: ignore, delete or rename @@ -316,18 +264,19 @@ func (d cephRBDVolumeDriver) Remove(r dkvolume.Request) dkvolume.Response { errString := fmt.Sprintf("Unable to remove Ceph RBD Image(%s): %s", name, err) log.Println("ERROR: " + errString) defer d.unlockImage(pool, name, locker) - return dkvolume.Response{Err: errString} + return errors.New(errString) } defer d.unlockImage(pool, name, locker) } else if removeActionFlag == "rename" { // just rename it (in case needed later, or can be culled via script) + // TODO: maybe add a timestamp? err = d.renameRBDImage(pool, name, "zz_"+name) if err != nil { errString := fmt.Sprintf("Unable to rename with zz_ prefix: RBD Image(%s): %s", name, err) log.Println("ERROR: " + errString) // unlock by old name defer d.unlockImage(pool, name, locker) - return dkvolume.Response{Err: errString} + return errors.New(errString) } // unlock by new name defer d.unlockImage(pool, "zz_"+name, locker) @@ -337,7 +286,7 @@ func (d cephRBDVolumeDriver) Remove(r dkvolume.Request) dkvolume.Response { } delete(d.volumes, mount) - return dkvolume.Response{} + return nil } // Mount will Ceph Map the RBD image to the local kernel and create a mount @@ -356,7 +305,7 @@ func (d cephRBDVolumeDriver) Remove(r dkvolume.Request) dkvolume.Response { // made available, and/or a string error if an error occurred. // // TODO: utilize the new MountRequest.ID field to track volumes -func (d cephRBDVolumeDriver) Mount(r dkvolume.MountRequest) dkvolume.Response { +func (d cephRBDVolumeDriver) Mount(r *volume.MountRequest) (*volume.MountResponse, error) { log.Printf("INFO: API Mount(%s)", r) d.m.Lock() defer d.m.Unlock() @@ -365,24 +314,16 @@ func (d cephRBDVolumeDriver) Mount(r dkvolume.MountRequest) dkvolume.Response { pool, name, _, err := d.parseImagePoolNameSize(r.Name) if err != nil { log.Printf("ERROR: parsing volume: %s", err) - return dkvolume.Response{Err: err.Error()} + return nil, err } mount := d.mountpoint(pool, name) - // FIXME: this is failing - see error below - for now we just attempt to grab a lock - // check that the image is not locked already - //locked, err := d.rbdImageIsLocked(name) - //if locked || err != nil { - // log.Printf("ERROR: checking for RBD Image(%s) lock: %s", name, err) - // return dkvolume.Response{Err: "RBD Image locked"} - //} - // attempt to lock locker, err := d.lockImage(pool, name) if err != nil { log.Printf("ERROR: locking RBD Image(%s): %s", name, err) - return dkvolume.Response{Err: "Unable to get Exclusive Lock"} + return nil, errors.New("Unable to get Exclusive Lock") } // map and mount the RBD image -- these are OS level commands, not avail in go-ceph @@ -393,7 +334,7 @@ func (d cephRBDVolumeDriver) Mount(r dkvolume.MountRequest) dkvolume.Response { log.Printf("ERROR: mapping RBD Image(%s) to kernel device: %s", name, err) // failsafe: need to release lock defer d.unlockImage(pool, name, locker) - return dkvolume.Response{Err: "Unable to map"} + return nil, errors.New("Unable to map kernel device") } // determine device FS type @@ -411,7 +352,7 @@ func (d cephRBDVolumeDriver) Mount(r dkvolume.MountRequest) dkvolume.Response { // failsafe: need to release lock and unmap kernel device defer d.unmapImageDevice(device) defer d.unlockImage(pool, name, locker) - return dkvolume.Response{Err: "Image filesystem has errors, requires manual repairs"} + return nil, errors.New("Image filesystem has errors, requires manual repairs") } // check for mountdir - create if necessary @@ -421,7 +362,7 @@ func (d cephRBDVolumeDriver) Mount(r dkvolume.MountRequest) dkvolume.Response { // failsafe: need to release lock and unmap kernel device defer d.unmapImageDevice(device) defer d.unlockImage(pool, name, locker) - return dkvolume.Response{Err: "Unable to make mountdir"} + return nil, errors.New("Unable to make mountdir") } // mount @@ -431,19 +372,20 @@ func (d cephRBDVolumeDriver) Mount(r dkvolume.MountRequest) dkvolume.Response { // need to release lock and unmap kernel device defer d.unmapImageDevice(device) defer d.unlockImage(pool, name, locker) - return dkvolume.Response{Err: "Unable to mount device"} + return nil, errors.New("Unable to mount device") } // if all that was successful - add to our list of volumes d.volumes[mount] = &Volume{ - name: name, - device: device, - locker: locker, - fstype: fstype, - pool: pool, + Name: name, + Device: device, + Locker: locker, + FStype: fstype, + Pool: pool, + ID: r.ID, } - return dkvolume.Response{Mountpoint: mount} + return &volume.MountResponse{Mountpoint: mount}, nil } // Get the list of volumes registered with the plugin. @@ -460,60 +402,74 @@ func (d cephRBDVolumeDriver) Mount(r dkvolume.MountRequest) dkvolume.Response { // respective paths on the host filesystem (where the volumes have been // made available). // -func (d cephRBDVolumeDriver) List(r dkvolume.Request) dkvolume.Response { - vols := make([]*dkvolume.Volume, 0, len(d.volumes)) - // for each registered mountpoint - for k, v := range d.volumes { - // append it and its name to the result - vols = append(vols, &dkvolume.Volume{ - Name: v.name, - Mountpoint: k, - }) +func (d cephRBDVolumeDriver) List() (*volume.ListResponse, error) { + volNames, err := d.rbdList() + if err != nil { + return nil, err + } + vols := make([]*volume.Volume, 0, len(volNames)) + for _, name := range volNames { + apiVol := &volume.Volume{Name: name} + + // for each registered vol, add Mountpoint + mounted, ok := d.volumes[name] + if ok { + apiVol.Mountpoint = d.mountpoint(mounted.Pool, mounted.Name) + } + + vols = append(vols, apiVol) } log.Printf("INFO: List request => %s", vols) - return dkvolume.Response{Volumes: vols} + return &volume.ListResponse{Volumes: vols}, nil +} + +// rbdList performs an `rbd ls` on the default pool +func (d *cephRBDVolumeDriver) rbdList() ([]string, error) { + result, err := d.rbdsh(d.pool, "ls") + if err != nil { + return nil, err + } + // split into lines - should be one rbd image name per line + return strings.Split(result, "\n"), nil } // Get the volume info. // // POST /VolumeDriver.Get // -// Request: +// GetRequest: // { "Name": "volume_name" } // Docker needs reminding of the path to the volume on the host. // -// Response: -// { "Volume": { "Name": "volume_name", "Mountpoint": "/path/to/directory/on/host" }, "Err": null } -// Respond with a tuple containing the name of the queried volume and the -// path on the host filesystem where the volume has been made available, -// and/or a string error if an error occurred. +// GetResponse: +// { "Volume": { "Name": "volume_name", "Mountpoint": "/path/to/directory/on/host" }} // -func (d cephRBDVolumeDriver) Get(r dkvolume.Request) dkvolume.Response { +func (d cephRBDVolumeDriver) Get(r *volume.GetRequest) (*volume.GetResponse, error) { // parse full image name for optional/default pieces pool, name, _, err := d.parseImagePoolNameSize(r.Name) if err != nil { log.Printf("ERROR: parsing volume: %s", err) - return dkvolume.Response{Err: err.Error()} + return nil, err } // Check to see if the image exists exists, err := d.rbdImageExists(pool, name) if err != nil { log.Printf("WARN: checking for RBD Image: %s", err) - return dkvolume.Response{Err: err.Error()} + return nil, err } mountPath := d.mountpoint(pool, name) if !exists { log.Printf("WARN: Image %s does not exist", r.Name) delete(d.volumes, mountPath) - return dkvolume.Response{Err: fmt.Sprintf("Image %s does not exist", r.Name)} + return nil, fmt.Errorf("Image %s does not exist", r.Name) } log.Printf("INFO: Get request(%s) => %s", name, mountPath) // TODO: what to do if the mountpoint registry (d.volumes) has a different name? - return dkvolume.Response{Volume: &dkvolume.Volume{Name: r.Name, Mountpoint: mountPath}} + return &volume.GetResponse{Volume: &volume.Volume{Name: r.Name, Mountpoint: mountPath}}, nil } // Path returns the path to host directory mountpoint for volume. @@ -531,17 +487,17 @@ func (d cephRBDVolumeDriver) Get(r dkvolume.Request) dkvolume.Response { // // NOTE: this method does not require the Ceph connection // -func (d cephRBDVolumeDriver) Path(r dkvolume.Request) dkvolume.Response { +func (d cephRBDVolumeDriver) Path(r *volume.PathRequest) (*volume.PathResponse, error) { // parse full image name for optional/default pieces pool, name, _, err := d.parseImagePoolNameSize(r.Name) if err != nil { log.Printf("ERROR: parsing volume: %s", err) - return dkvolume.Response{Err: err.Error()} + return nil, err } mountPath := d.mountpoint(pool, name) log.Printf("INFO: API Path request(%s) => %s", name, mountPath) - return dkvolume.Response{Mountpoint: mountPath} + return &volume.PathResponse{Mountpoint: mountPath}, nil } // POST /VolumeDriver.Unmount @@ -549,22 +505,15 @@ func (d cephRBDVolumeDriver) Path(r dkvolume.Request) dkvolume.Response { // - assuming writes are finished and no other containers using same disk on this host? // Request: -// { "Name": "volume_name" } +// { "Name": "volume_name", ID: "client-id" } // Indication that Docker no longer is using the named volume. This is // called once per container stop. Plugin may deduce that it is safe to // deprovision it at this point. // // Response: -// { "Err": null } -// Respond with a string error if an error occurred. -// -// FIXME: TODO: we are getting an Unmount call from docker daemon after a -// failed Mount (e.g. device locked), which causes the device to be -// unmounted/unmapped/unlocked while possibly in use by another container -- -// revisit the API, are we doing something wrong or perhaps we can fail sooner +// Respond with error or nil // -// TODO: utilize the new UnmountRequest.ID field to track volumes -func (d cephRBDVolumeDriver) Unmount(r dkvolume.UnmountRequest) dkvolume.Response { +func (d cephRBDVolumeDriver) Unmount(r *volume.UnmountRequest) error { log.Printf("INFO: API Unmount(%s)", r) d.m.Lock() defer d.m.Unlock() @@ -575,63 +524,66 @@ func (d cephRBDVolumeDriver) Unmount(r dkvolume.UnmountRequest) dkvolume.Respons pool, name, _, err := d.parseImagePoolNameSize(r.Name) if err != nil { log.Printf("ERROR: parsing volume: %s", err) - return dkvolume.Response{Err: err.Error()} + return err } mount := d.mountpoint(pool, name) - // connect to Ceph so we can manipulate RBD Image - if d.useGoCeph { - err = d.connect(pool) - if err != nil { - log.Printf("ERROR: unable to connect to ceph and access pool: %s", err) - return dkvolume.Response{Err: err.Error()} - } - defer d.shutdown() - } - // check if it's in our mounts - we may not know about it if plugin was started late? vol, found := d.volumes[mount] if !found { - log.Printf("WARN: Volume is not in known mounts: will attempt limited Unmount: %s/%s", pool, name) + // FIXME: is this an error or just a log and a return nil? + //return fmt.Errorf("WARN: Volume is not in known mounts: ignoring request to unmount: %s/%s", pool, name) + log.Printf("WARN: Volume is not in known mounts: ignoring request to unmount: %s/%s", pool, name) + return nil + /** // set up a fake Volume with defaults ... // - device is /dev/rbd// in newer ceph versions // - assume we are the locker (will fail if locked from another host) vol = &Volume{ - pool: pool, - name: name, - device: fmt.Sprintf("/dev/rbd/%s/%s", pool, name), - locker: d.localLockerCookie(), + Pool: pool, + Name: name, + Device: fmt.Sprintf("/dev/rbd/%s/%s", pool, name), + Locker: d.localLockerCookie(), + ID, r.ID, } + */ + } + + // if found - double check ID + if vol.ID != r.ID { + log.Printf("WARN: Volume client ID(%s) does not match requestor id(%s) for %s/%s", + vol.ID, r.ID, pool, name) + return nil } // unmount // NOTE: this might succeed even if device is still in use inside container. device will dissappear from host side but still be usable inside container :( - err = d.unmountDevice(vol.device) + err = d.unmountDevice(vol.Device) if err != nil { - log.Printf("ERROR: unmounting device(%s): %s", vol.device, err) + log.Printf("ERROR: unmounting device(%s): %s", vol.Device, err) // failsafe: will still attempt to unmap and unlock err_msgs = append(err_msgs, "Error unmounting device") } // unmap - err = d.unmapImageDevice(vol.device) + err = d.unmapImageDevice(vol.Device) if err != nil { - log.Printf("ERROR: unmapping image device(%s): %s", vol.device, err) + log.Printf("ERROR: unmapping image device(%s): %s", vol.Device, err) // NOTE: rbd unmap exits 16 if device is still being used - unlike umount. try to recover differently in that case if rbdUnmapBusyRegexp.MatchString(err.Error()) { // can't always re-mount and not sure if we should here ... will be cleaned up once original container goes away log.Printf("WARN: unmap failed due to busy device, early exit from this Unmount request.") - return dkvolume.Response{Err: err.Error()} + return err } // other error, failsafe: proceed to attempt to unlock err_msgs = append(err_msgs, "Error unmapping kernel device") } // unlock - err = d.unlockImage(vol.pool, vol.name, vol.locker) + err = d.unlockImage(vol.Pool, vol.Name, vol.Locker) if err != nil { - log.Printf("ERROR: unlocking RBD image(%s): %s", vol.name, err) + log.Printf("ERROR: unlocking RBD image(%s): %s", vol.Name, err) err_msgs = append(err_msgs, "Error unlocking image") } @@ -640,79 +592,18 @@ func (d cephRBDVolumeDriver) Unmount(r dkvolume.UnmountRequest) dkvolume.Respons // check for piled up errors if len(err_msgs) > 0 { - return dkvolume.Response{Err: strings.Join(err_msgs, ", ")} + return errors.New(strings.Join(err_msgs, ", ")) } - return dkvolume.Response{} + return nil } +// // END Docker VolumeDriver Plugin API methods +// // *************************************************************************** - -// shutdown and connect are used when d.useGoCeph == true - -// shutdown closes the connection - maybe not needed unless we recreate conn? -// more info: -// - https://github.com/ceph/go-ceph/blob/f251b53/rados/ioctx.go#L140 -// - http://ceph.com/docs/master/rados/api/librados/ -func (d *cephRBDVolumeDriver) shutdown() { - log.Println("INFO: Ceph RBD Driver shutdown() called") - if d.ioctx != nil { - d.ioctx.Destroy() - } - if d.conn != nil { - d.conn.Shutdown() - } -} - -// connect builds up the ceph conn and default pool -func (d *cephRBDVolumeDriver) connect(pool string) error { - log.Printf("INFO: connect() to Ceph via go-ceph, with pool: %s", pool) - - // create the go-ceph Client Connection - var cephConn *rados.Conn - var err error - if d.cluster == "" { - cephConn, err = rados.NewConnWithUser(d.user) - } else { - // FIXME: TODO: can't seem to use a cluster name -- get error -22 from noahdesu/go-ceph/rados: - // panic: Unable to create ceph connection to cluster=ceph with user=admin: rados: ret=-22 - cephConn, err = rados.NewConnWithClusterAndUser(d.cluster, d.user) - } - if err != nil { - log.Printf("ERROR: Unable to create ceph connection to cluster=%s with user=%s: %s", d.cluster, d.user, err) - return err - } - - // read ceph.conf and setup connection - if d.config == "" { - err = cephConn.ReadDefaultConfigFile() - } else { - err = cephConn.ReadConfigFile(d.config) - } - if err != nil { - log.Printf("ERROR: Unable to read ceph config: %s", err) - return err - } - - err = cephConn.Connect() - if err != nil { - log.Printf("ERROR: Unable to connect to Ceph: %s", err) - return err - } - - // can now set conn in driver - d.conn = cephConn - - // setup the requested pool context - ioctx, err := d.goceph_openContext(pool) - if err != nil { - return err - } - d.ioctx = ioctx - - return nil -} +// *************************************************************************** +// // mountpoint returns the expected path on host func (d *cephRBDVolumeDriver) mountpoint(pool, name string) string { @@ -773,14 +664,6 @@ func (d *cephRBDVolumeDriver) parseImagePoolNameSize(fullname string) (pool stri // rbdImageExists will check for an existing Ceph RBD Image func (d *cephRBDVolumeDriver) rbdImageExists(pool, findName string) (bool, error) { - if d.useGoCeph { - return d.goceph_rbdImageExists(pool, findName) - } - return d.sh_rbdImageExists(pool, findName) -} - -// sh_rbdImageExists uses rbd info to check for ceph rbd image -func (d *cephRBDVolumeDriver) sh_rbdImageExists(pool, findName string) (bool, error) { _, err := d.rbdsh(pool, "info", findName) if err != nil { // NOTE: even though method signature returns err - we take the error @@ -791,58 +674,8 @@ func (d *cephRBDVolumeDriver) sh_rbdImageExists(pool, findName string) (bool, er return true, nil } -func (d *cephRBDVolumeDriver) goceph_rbdImageExists(pool, findName string) (bool, error) { - log.Printf("INFO: checking if rbdImageExists(%s/%s)", pool, findName) - if findName == "" { - return false, fmt.Errorf("Empty Ceph RBD Image name") - } - - ctx, err := d.goceph_openContext(pool) - if err != nil { - return false, err - } - defer d.goceph_shutdownContext(ctx) - - img := rbd.GetImage(ctx, findName) - err = img.Open(true) - defer img.Close() - if err != nil { - if err == rbd.RbdErrorNotFound { - log.Printf("INFO: Ceph RBD Image ('%s') not found: %s", findName, err) - return false, nil - } - return false, err - } - return true, nil -} - -// goceph_shutdownContext will destroy any non-default ioctx -func (d *cephRBDVolumeDriver) goceph_shutdownContext(ioctx *rados.IOContext) { - if ioctx != nil { - ioctx.Destroy() - } -} - -// goceph_openContext provides access to a specific Ceph Pool -func (d *cephRBDVolumeDriver) goceph_openContext(pool string) (*rados.IOContext, error) { - // setup the requested pool context - ioctx, err := d.conn.OpenIOContext(pool) - if err != nil { - // TODO: make sure we aren't hiding a useful error struct by casting to string? - msg := fmt.Sprintf("Unable to open context(%s): %s", pool, err) - log.Printf("ERROR: " + msg) - return ioctx, errors.New(msg) - } - return ioctx, nil -} - // createRBDImage will create a new Ceph block device and make a filesystem on it func (d *cephRBDVolumeDriver) createRBDImage(pool string, name string, size int, fstype string) error { - // NOTE: there is no goceph_ version of this func - but parts of sh version do (lock/unlock) - return d.sh_createRBDImage(pool, name, size, fstype) -} - -func (d *cephRBDVolumeDriver) sh_createRBDImage(pool string, name string, size int, fstype string) error { log.Printf("INFO: Attempting to create new RBD Image: (%s/%s, %s, %s)", pool, name, size, fstype) // check that fs is valid type (needs mkfs.fstype in PATH) @@ -852,14 +685,13 @@ func (d *cephRBDVolumeDriver) sh_createRBDImage(pool string, name string, size i return errors.New(msg) } - // TODO: create a go-ceph Create(..) func for this? - - // create the block device image with format=2 (v2) + // create the block device image with format=2 (v2) - features seem heavily dependent on version and configuration of RBD pools // should we enable all v2 image features?: +1: layering support +2: striping v2 support +4: exclusive locking support +8: object map support // NOTE: i tried but "2015-08-02 20:24:36.726758 7f87787907e0 -1 librbd: librbd does not support requested features." // NOTE: I also tried just image-features=4 (locking) - but map will fail: // sudo rbd unmap mynewvol => rbd: 'mynewvol' is not a block device, rbd: unmap failed: (22) Invalid argument // "--image-features", strconv.Itoa(4), + _, err = d.rbdsh( pool, "create", "--image-format", strconv.Itoa(2), @@ -873,7 +705,6 @@ func (d *cephRBDVolumeDriver) sh_createRBDImage(pool string, name string, size i // lock it temporarily for fs creation lockname, err := d.lockImage(pool, name) if err != nil { - // TODO: defer image delete? return err } @@ -892,7 +723,8 @@ func (d *cephRBDVolumeDriver) sh_createRBDImage(pool string, name string, size i return err } - // TODO: should we chown/chmod the directory? e.g. non-root container users won't be able to write + // TODO: should we chown/chmod the directory? e.g. non-root container users + // won't be able to write. where to get the preferred user id? // unmap err = d.unmapImageDevice(device) @@ -912,13 +744,6 @@ func (d *cephRBDVolumeDriver) sh_createRBDImage(pool string, name string, size i // rbdImageIsLocked returns true if named image is already locked func (d *cephRBDVolumeDriver) rbdImageIsLocked(pool, name string) (bool, error) { - if d.useGoCeph { - return d.goceph_rbdImageIsLocked(pool, name) - } - return d.sh_rbdImageIsLocked(pool, name) -} - -func (d *cephRBDVolumeDriver) sh_rbdImageIsLocked(pool, name string) (bool, error) { // check the output for a lock -- if blank or error, assume not locked (?) out, err := d.rbdsh(pool, "lock", "ls", name) if err != nil || out != "" { @@ -928,51 +753,8 @@ func (d *cephRBDVolumeDriver) sh_rbdImageIsLocked(pool, name string) (bool, erro return true, nil } -// FIXME: getting panics when trying to run below go-ceph code, at ListLockers(): -// -// see https://github.com/yp-engineering/rbd-docker-plugin/issues/3 -// -func (d *cephRBDVolumeDriver) goceph_rbdImageIsLocked(pool, name string) (bool, error) { - if pool == "" || name == "" { - return true, errors.New("rbdImageIsLocked: pool and name required") - } - - // make the image struct - rbdImage := rbd.GetImage(d.ioctx, name) - - // open it (read-only) - err := rbdImage.Open(true) - if err != nil { - log.Printf("ERROR: opening rbd image(%s): %s", name, err) - return true, err - } - defer rbdImage.Close() - - // check for locks -- for our purposes, with even one lock we consider image locked - //lockers := []rbd.Locker{} - //lockers := make([]rbd.Locker, 10) - tag, lockers, err := rbdImage.ListLockers() - if err != nil { - log.Printf("ERROR: retrieving Lockers list for Image(%s): %s", name, err) - return true, err - } - if len(lockers) > 0 { - log.Printf("WARN: RBD Image is locked: tag=%s, lockers=%q", tag, lockers) - return true, nil - } - - return false, nil -} - // lockImage locks image and returns locker cookie name func (d *cephRBDVolumeDriver) lockImage(pool, imagename string) (string, error) { - if d.useGoCeph { - return d.goceph_lockImage(pool, imagename) - } - return d.sh_lockImage(pool, imagename) -} - -func (d *cephRBDVolumeDriver) sh_lockImage(pool, imagename string) (string, error) { cookie := d.localLockerCookie() _, err := d.rbdsh(pool, "lock", "add", imagename, cookie) if err != nil { @@ -981,29 +763,6 @@ func (d *cephRBDVolumeDriver) sh_lockImage(pool, imagename string) (string, erro return cookie, nil } -func (d *cephRBDVolumeDriver) goceph_lockImage(pool, imagename string) (string, error) { - log.Printf("INFO: lockImage(%s/%s)", pool, imagename) - - // build image struct - rbdImage := rbd.GetImage(d.ioctx, imagename) - - // open it (read-only) - err := rbdImage.Open(true) - if err != nil { - log.Printf("ERROR: opening rbd image(%s): %s", imagename, err) - return "", err - } - defer rbdImage.Close() - - // lock it using hostname - locker := d.localLockerCookie() - err = rbdImage.LockExclusive(locker) - if err != nil { - return locker, err - } - return locker, nil -} - // localLockerCookie returns the Hostname func (d *cephRBDVolumeDriver) localLockerCookie() string { host, err := os.Hostname() @@ -1023,13 +782,6 @@ func (d *cephRBDVolumeDriver) unlockImage(pool, imagename, locker string) error } log.Printf("INFO: unlockImage(%s/%s, %s)", pool, imagename, locker) - if d.useGoCeph { - return d.goceph_unlockImage(pool, imagename, locker) - } - return d.sh_unlockImage(pool, imagename, locker) -} - -func (d *cephRBDVolumeDriver) sh_unlockImage(pool, imagename, locker string) error { // first - we need to discover the client id of the locker -- so we have to // `rbd lock list` and grep out fields out, err := d.rbdsh(pool, "lock", "list", imagename) @@ -1063,33 +815,10 @@ func (d *cephRBDVolumeDriver) sh_unlockImage(pool, imagename, locker string) err return nil } -func (d *cephRBDVolumeDriver) goceph_unlockImage(pool, imagename, locker string) error { - // build image struct - rbdImage := rbd.GetImage(d.ioctx, imagename) - - // open it (read-only) - //err := rbdImage.Open(true) - err := rbdImage.Open() - if err != nil { - log.Printf("ERROR: opening rbd image(%s): %s", imagename, err) - return err - } - defer rbdImage.Close() - return rbdImage.Unlock(locker) -} - // removeRBDImage will remove a Ceph RBD image - no undo available func (d *cephRBDVolumeDriver) removeRBDImage(pool, name string) error { log.Println("INFO: Remove RBD Image(%s/%s)", pool, name) - if d.useGoCeph { - return d.goceph_removeRBDImage(pool, name) - } - return d.sh_removeRBDImage(pool, name) -} - -// sh_removeRBDImage will remove a Ceph RBD image - no undo available -func (d *cephRBDVolumeDriver) sh_removeRBDImage(pool, name string) error { // remove the block device image _, err := d.rbdsh(pool, "rm", name) @@ -1099,28 +828,10 @@ func (d *cephRBDVolumeDriver) sh_removeRBDImage(pool, name string) error { return nil } -func (d *cephRBDVolumeDriver) goceph_removeRBDImage(pool, name string) error { - // build image struct - rbdImage := rbd.GetImage(d.ioctx, name) - - // remove the block device image - return rbdImage.Remove() -} - // renameRBDImage will move a Ceph RBD image to new name func (d *cephRBDVolumeDriver) renameRBDImage(pool, name, newname string) error { log.Println("INFO: Rename RBD Image(%s/%s -> %s)", pool, name, newname) - if d.useGoCeph { - return d.goceph_renameRBDImage(pool, name, newname) - } - return d.sh_renameRBDImage(pool, name, newname) -} - -// sh_renameRBDImage will move a Ceph RBD image to new name -func (d *cephRBDVolumeDriver) sh_renameRBDImage(pool, name, newname string) error { - log.Println("INFO: Rename RBD Image(%s/%s -> %s)", pool, name, newname) - out, err := d.rbdsh(pool, "rename", name, newname) if err != nil { log.Printf("ERROR: unable to rename: %s: %s", err, out) @@ -1129,21 +840,6 @@ func (d *cephRBDVolumeDriver) sh_renameRBDImage(pool, name, newname string) erro return nil } -func (d *cephRBDVolumeDriver) goceph_renameRBDImage(pool, name, newname string) error { - // build image struct - rbdImage := rbd.GetImage(d.ioctx, name) - - // rename the block device image - return rbdImage.Rename(newname) -} - -// -// NOTE: the following are Shell commands for low level kernel RBD or Device -// operations - there are no go-ceph lib alternatives -// - -// RBD subcommands - // mapImage will map the RBD Image to a kernel device func (d *cephRBDVolumeDriver) mapImage(pool, imagename string) (string, error) { device, err := d.rbdsh(pool, "map", imagename) diff --git a/driver_test.go b/driver_test.go index 14e1674..10c9827 100644 --- a/driver_test.go +++ b/driver_test.go @@ -11,7 +11,7 @@ import ( "os" "testing" - dkvolume "github.com/docker/go-plugins-helpers/volume" + "github.com/docker/go-plugins-helpers/volume" "github.com/stretchr/testify/assert" ) @@ -37,15 +37,13 @@ func TestMain(m *testing.M) { "", "admin", "rbd", - dkvolume.DefaultDockerRootDirectory, + volume.DefaultDockerRootDirectory, cephConf, - false, ) - defer testDriver.shutdown() - handler := dkvolume.NewHandler(testDriver) + handler := volume.NewHandler(testDriver) // Serve won't return so spin off routine - go handler.ServeUnix("", TEST_SOCKET_PATH) + go handler.ServeUnix(TEST_SOCKET_PATH, 0) os.Exit(m.Run()) } diff --git a/main.go b/main.go index 080d7dd..26e2eb4 100644 --- a/main.go +++ b/main.go @@ -15,7 +15,7 @@ import ( "path/filepath" "syscall" - dkvolume "github.com/docker/go-plugins-helpers/volume" + "github.com/docker/go-plugins-helpers/volume" ) var ( @@ -30,12 +30,11 @@ var ( cephCluster = flag.String("cluster", "", "Ceph cluster") // less likely to run multiple clusters on same hardware defaultCephPool = flag.String("pool", "rbd", "Default Ceph Pool for RBD operations") pluginDir = flag.String("plugins", "/run/docker/plugins", "Docker plugin directory for socket") - rootMountDir = flag.String("mount", dkvolume.DefaultDockerRootDirectory, "Mount directory for volumes on host") + rootMountDir = flag.String("mount", volume.DefaultDockerRootDirectory, "Mount directory for volumes on host") logDir = flag.String("logdir", "/var/log", "Logfile directory") canCreateVolumes = flag.Bool("create", false, "Can auto Create RBD Images") defaultImageSizeMB = flag.Int("size", 20*1024, "RBD Image size to Create (in MB) (default: 20480=20GB)") defaultImageFSType = flag.String("fs", "xfs", "FS type for the created RBD Image (must have mkfs.type)") - useGoCeph = flag.Bool("go-ceph", false, "Use go-ceph library (default: false)") ) // setup a validating flag for remove action @@ -90,16 +89,15 @@ func main() { defer shutdownLogging(logFile) log.Printf("INFO: starting rbd-docker-plugin version %s", VERSION) - log.Printf("INFO: canCreateVolumes=%q, removeAction=%q", *canCreateVolumes, removeActionFlag) + log.Printf("INFO: canCreateVolumes=%v, removeAction=%q", *canCreateVolumes, removeActionFlag) log.Printf( - "INFO: Setting up Ceph Driver for PluginID=%s, cluster=%s, user=%s, pool=%s, mount=%s, config=%s, go-ceph=%s", + "INFO: Setting up Ceph Driver for PluginID=%s, cluster=%s, ceph-user=%s, pool=%s, mount=%s, config=%s", *pluginName, *cephCluster, *cephUser, *defaultCephPool, *rootMountDir, *cephConfigFile, - *useGoCeph, ) // double check for config file - required especially for non-standard configs @@ -118,14 +116,10 @@ func main() { *defaultCephPool, *rootMountDir, *cephConfigFile, - *useGoCeph, ) - if *useGoCeph { - defer d.shutdown() - } log.Println("INFO: Creating Docker VolumeDriver Handler") - h := dkvolume.NewHandler(d) + h := volume.NewHandler(d) socket := socketPath() log.Printf("INFO: Opening Socket for Docker to connect: %s", socket) @@ -145,18 +139,14 @@ func main() { switch sig { case syscall.SIGTERM, syscall.SIGKILL: log.Printf("INFO: received TERM or KILL signal: %s", sig) - // close up conn and logs - if *useGoCeph { - d.shutdown() - } shutdownLogging(logFile) os.Exit(0) } } }() - // NOTE: pass empty string for group to skip broken chgrp in dkvolume lib - err = h.ServeUnix("", socket) + // open socket + err = h.ServeUnix(socket, currentGid()) if err != nil { log.Printf("ERROR: Unable to create UNIX socket: %v", err) diff --git a/unlock_test.go b/unlock_test.go index fde8a16..4e3f181 100644 --- a/unlock_test.go +++ b/unlock_test.go @@ -9,7 +9,6 @@ import ( "bytes" "fmt" "log" - "os" "os/exec" "strings" "testing" @@ -27,109 +26,16 @@ var ( testImage = "rbd-test" ) -func TestGoCephConnection(t *testing.T) { - var err error - - config := os.Getenv("CEPH_CONF") - - // connect to default RBD pool - err = testDriver.connect(testPool) - assert.Nil(t, err, err) - defer testDriver.shutdown() - - // check if we need to make the image - imageAlreadyExists, err := testDriver.rbdImageExists(testPool, testImage) - assert.Nil(t, err, fmt.Sprintf("Unable to check if image already exists: %s", err)) - - if imageAlreadyExists { - log.Printf("NOTE: image already exists: %s", testImage) - } else { - // make an image and format it - do this via command line because ... - // to avoid issues with go-ceph and/or our implementation using it - _, err = test_sh("rbd", "--conf", config, "create", testImage, "--size", "1024") - assert.Nil(t, err, fmt.Sprintf("Unable to create new image: %s", err)) - - // FIXME: TODO: this is hanging for some reason ?? why -- - testDevice, err := test_sh("sudo", "rbd", "--conf", config, "--pool", testPool, "map", testImage) - // try other shell ... never hung before ? - //testDevice, err := testDriver.mapImage(testPool, testImage) - assert.Nil(t, err, fmt.Sprintf("Unable to map image: %s", err)) - assert.NotEqual(t, testDevice, "", fmt.Sprintf("Got an empty device name: '%s'", testDevice)) - - out, err := test_sh("sudo", "mkfs.xfs", testDevice) - log.Printf("DEBUG: mkfs output: ...\n%s", out) - assert.Nil(t, err, fmt.Sprintf("Unable to mkfs.xfs: %s", err)) - - // unmap device to get ready to use via go-ceph lib - _, err = test_sh("sudo", "rbd", "--conf", config, "--pool", testPool, "unmap", testDevice) - assert.Nil(t, err, fmt.Sprintf("Unable to unmap new fs image: %s", err)) - } - - //**************************************** - // now try some go-ceph func - - // check that it exists - t1_bool, err := testDriver.rbdImageExists(testPool, testImage) - assert.Equal(t, true, t1_bool, fmt.Sprintf("Unable to find image after create: %s", err)) - - // try an unlock image - just in case ? - /** - err = testDriver.unlockImage(testPool, testImage, "") - if err != nil { - log.Printf("Expected failure to unlock image, but maybe for wrong reason: %s", err) - } else { - log.Printf("Expected failure didn't fail: image was already locked and we unlocked it.") - } - */ - - // check that it exists (again) - t2_bool, err := testDriver.rbdImageExists(testPool, testImage) - assert.Equal(t, true, t2_bool, fmt.Sprintf("Unable to find image after create: %s", err)) - - // lock image - locker, err := testDriver.lockImage(testPool, testImage) - assert.Nil(t, err, fmt.Sprintf("Unable to get exclusive lock on image: %s", err)) - assert.NotEqual(t, locker, "", fmt.Sprintf("Got an empty Locker name: '%s'", locker)) - - // can we list the lockers ? can we even get a valid open image handle? - /** big fat panic deep in go-ceph c-lib interaction - img := rbd.GetImage(testDriver.ioctx, testImage) - err = img.Open(true) - assert.Nil(t, err, fmt.Sprintf("Unable to open image via go-ceph: %s", err)) - - tag, lockers, err := img.ListLockers() - assert.Nil(t, err, fmt.Sprintf("Unable to list lockers for image: %s", err)) - log.Printf("GetImage list lockers results: tag=%s, lockers=%q", tag, lockers) - */ - - // shutdown / reconnect the ceph client - testDriver.shutdown() - err = testDriver.connect(testPool) - assert.Nil(t, err, fmt.Sprintf("Error reconnecting: %s", err)) - - // check that it exists again (e.g. in order to unlock it) - t3_bool, err := testDriver.rbdImageExists(testPool, testImage) - assert.Equal(t, true, t3_bool, fmt.Sprintf("Unable to find image after create: %s", err)) - - // unlock image - err = testDriver.unlockImage(testPool, testImage, locker) - assert.Nil(t, err, fmt.Sprintf("Unable to unlock image: %s", err)) - - // check that it exists again (e.g. because sanity) - t4_bool, err := testDriver.rbdImageExists(testPool, testImage) - assert.Equal(t, true, t4_bool, fmt.Sprintf("Unable to find image after create: %s", err)) -} - func TestShUnlockImage(t *testing.T) { // lock it first ... ? - locker, err := testDriver.sh_lockImage(testPool, testImage) + locker, err := testDriver.lockImage(testPool, testImage) if err != nil { log.Printf("WARN: Unable to lock image in preparation for test: %s", err) locker = testDriver.localLockerCookie() } // now unlock it - err = testDriver.sh_unlockImage(testPool, testImage, locker) + err = testDriver.unlockImage(testPool, testImage, locker) assert.Nil(t, err, fmt.Sprintf("Unable to unlock image using sh rbd: %s", err)) } diff --git a/utils.go b/utils.go index eddbd35..2db4a80 100644 --- a/utils.go +++ b/utils.go @@ -6,6 +6,8 @@ import ( "fmt" "log" "os/exec" + "os/user" + "strconv" "strings" "time" ) @@ -14,6 +16,21 @@ var ( defaultShellTimeout = 2 * 60 * time.Second ) +// returns current user gid or 0 +func currentGid() int { + gid := 0 + current, err := user.Current() + if err != nil { + return 0 + } + gid, err = strconv.Atoi(current.Gid) + if err != nil { + return 0 + } + + return gid +} + // sh is a simple os.exec Command tool, returns trimmed string output func sh(name string, args ...string) (string, error) { cmd := exec.Command(name, args...) From 17f68e07038939d2a8d314d66aafc5ae5b0661a7 Mon Sep 17 00:00:00 2001 From: Damian Martinez Date: Mon, 28 Aug 2017 09:04:37 -0700 Subject: [PATCH 4/5] Adding golang/dep tool use in Make targets and trying to get a good set of revisions --- Gopkg.lock | 23 ++--------------------- Gopkg.toml | 9 +++++++++ Makefile | 18 +++++++++++++----- 3 files changed, 24 insertions(+), 26 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 69bc9f1..e0a3e4c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -7,12 +7,6 @@ revision = "78439966b38d69bf38227fbf57ac8a6fee70f69a" version = "v0.4.5" -[[projects]] - name = "github.com/Sirupsen/logrus" - packages = ["."] - revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e" - version = "v1.0.3" - [[projects]] name = "github.com/coreos/go-systemd" packages = ["activation"] @@ -28,8 +22,7 @@ [[projects]] name = "github.com/docker/go-connections" packages = ["sockets"] - revision = "990a1a1a70b0da4c4cb70e117971a4f0babfbf1a" - version = "v0.2.1" + revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" [[projects]] branch = "master" @@ -37,12 +30,6 @@ packages = ["sdk","volume"] revision = "a9ef19c479cb60e751efa55f7f2b265776af1abf" -[[projects]] - name = "github.com/opencontainers/runc" - packages = ["libcontainer/user"] - revision = "baf6536d6259209c3edfa2b22237af82942d3dfa" - version = "v0.1.1" - [[projects]] name = "github.com/pmezard/go-difflib" packages = ["difflib"] @@ -55,12 +42,6 @@ revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" version = "v1.1.4" -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = ["ssh/terminal"] - revision = "eb71ad9bd329b5ac0fd0148dd99bd62e8be8e035" - [[projects]] branch = "master" name = "golang.org/x/net" @@ -70,7 +51,7 @@ [[projects]] branch = "master" name = "golang.org/x/sys" - packages = ["unix","windows"] + packages = ["windows"] revision = "2d6f6f883a06fc0d5f4b14a81e4c28705ea64c15" [solve-meta] diff --git a/Gopkg.toml b/Gopkg.toml index 39dd351..12826e2 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -28,3 +28,12 @@ [[constraint]] name = "github.com/stretchr/testify" version = "1.1.4" + +[[constraint]] + name = "github.com/docker/go-connections" + # NOTE: these are broken but requested by another dep: + #revision = "990a1a1a70b0da4c4cb70e117971a4f0babfbf1a" + #version = "v0.2.1" + # NOTE: This rev of master works: + ##revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" + revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" diff --git a/Makefile b/Makefile index 0a62af8..046ccac 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # building the rbd docker plugin golang binary with version # makefile mostly used for packing a tpkg -.PHONY: all build install clean test version setup systemd +.PHONY: all build install clean test version setup systemd dep-tool IMAGE_PATH=ypengineering/rbd-docker-plugin TAG?=latest @@ -11,7 +11,8 @@ SUDO?= TMPDIR?=/tmp INSTALL?=install -TPKG_VERSION=$(VERSION)-2 +#TPKG_VERSION=$(VERSION)-2 +TPKG_VERSION=$(VERSION) BINARY=rbd-docker-plugin @@ -37,13 +38,19 @@ BIN_FILES=dist/$(BINARY) check-ceph-rbd-docker-plugin.sh # in the container. all: build +dep-tool: + go get -u github.com/golang/dep/cmd/dep + +vendor: dep-tool + dep ensure + # set VERSION from version.go, eval into Makefile for inclusion into tpkg.yml version: version.go $(eval VERSION := $(shell grep "VERSION" version.go | cut -f2 -d'"')) build: dist/$(BINARY) -dist/$(BINARY): $(PKG_SRC) +dist/$(BINARY): $(PKG_SRC) vendor go build -v -x -o dist/$(BINARY) . install: build test @@ -52,12 +59,13 @@ install: build test clean: go clean rm -f dist/$(BINARY) + rm -fr vendor/ uninstall: @$(RM) -iv `which $(BINARY)` # FIXME: TODO: this micro-osd script leaves ceph-mds laying around -- fix it up -test: +test: vendor TMP_DIR=$$(mktemp -d) && \ ./micro-osd.sh $$TMP_DIR && \ export CEPH_CONF=$${TMP_DIR}/ceph.conf && \ @@ -68,7 +76,7 @@ test: # use existing ceph installation instead of micro-osd.sh - expecting CEPH_CONF to be set ... CEPH_CONF ?= /etc/ceph/ceph.conf -local_test: +local_test: vendor @echo "Using CEPH_CONF=$(CEPH_CONF)" test -n "${CEPH_CONF}" && \ $(SUDO) rbd ls && \ From 1b41d0b6319a953fb5d6cf8a7c583bdd3df6ff0d Mon Sep 17 00:00:00 2001 From: Damian Martinez Date: Mon, 28 Aug 2017 13:01:37 -0700 Subject: [PATCH 5/5] Update Changelog and Readme for v2.0 --- CHANGELOG.md | 16 ++++++++ Makefile | 1 - README.md | 113 ++++++++++++++++++++++++--------------------------- version.go | 2 +- 4 files changed, 70 insertions(+), 62 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ddea58..a2154f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,22 @@ We attempt to adhere to [Semantic Versioning](http://semver.org/). ### Removed ### Changed +## [2.0.0] - 2017-08-25 +### Added +- add golang/dep support for a repeatable build + +### Removed +- Removed all usage of go-ceph library. Simplifies the code and now only +shelling out to 'rbd' executable + +### Changed +- Updated docker/go-plugin-helpers and latest sdk api +- Fixed List() to call out to 'rbd ls' instead of relying on in memory per-host list + +## [1.5.2] - 2017-08 +### Changed +- Remove cron script installation from tpkg + ## [1.5.1] - 2016-09-20 ### Changed - Update XFS checks to mount/unmount to clear any disk logs diff --git a/Makefile b/Makefile index 046ccac..c6a3678 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,6 @@ INSTALL?=install #TPKG_VERSION=$(VERSION)-2 TPKG_VERSION=$(VERSION) - BINARY=rbd-docker-plugin PKG_SRC=main.go driver.go version.go PKG_SRC_TEST=$(PKG_SRC) driver_test.go unlock_test.go diff --git a/README.md b/README.md index f30052f..511186f 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,15 @@ -# Ceph Rados Block Device Docker VolumeDriver Plugin +# Simple Ceph RBD Docker VolumeDriver Plugin * Use Case: Persistent Storage for a Single Docker Container - * an RBD Image can only be used by 1 Docker Container at a time + * one RBD Image can only be used by one Docker Container at a time * Plugin is a separate process running alongside Docker Daemon - * plugin can only be configured for a single Ceph User (currently no - good way to pass things from docker -> plugin, get only volume name) + * plugin can be configured for a single Ceph User * run multiple plugin instances for varying configs (ceph user, default pool, default size) - * can pass extra config via volume name to override default pool and creation size: + * OPTIONAL: pass extra config via volume name to override default pool and creation size: * docker run --volume-driver rbd -v poolname/imagename@size:/mnt/disk1 ... -* plugin supports all Docker VolumeDriver Plugin API commands: +* plugin supports all Docker VolumeDriver Plugin API commands (Volume Plugin API v1.12.x) * Create - can provision Ceph RBD Image in a pool of a certain size * controlled by `--create` boolean flag (default false) * default size from `--size` flag (default 20480 = 20GB) @@ -22,29 +21,40 @@ - ''ignore'' - the call to delete the ceph rbd volume is ignored (default) - ''rename'' - will cause image to be renamed with _zz_ prefix for later culling - ''delete'' - will actually delete ceph rbd image (destructive) - * Get, List + * Get, List - Return information on accessible RBD volumes ## Plugin Setup -Plugin is a standalone process and places a Socket file in a known -location. Generally need to start this before running Docker. It does -not daemonize as it is expected to be controlled by systemd, so if you -need it in the background, use normal shell process control (&). +Plugin is a standalone process and places a Socket file in a known location; +needs to start before Docker. It does not daemonize by default, so if you need +it in the background, use normal shell process control (&). -The socket has a name (sans .sock) which is used to refer to the plugin -via the `--volume-driver=name` docker CLI option, allowing multiple +The driver has a name, also used to name the socket, which is used to refer to +the plugin via the `--volume-driver=name` docker CLI option, allowing multiple uniquely named plugin instances with different default configurations. -The default name for the socket is "rbd", so you would refer to -`--volume-driver rbd` from docker. +For the default name is "rbd", use `--volume-driver rbd` from docker. General build/run requirements: -* librados2-devel and librbd1-devel for go-ceph -* /usr/bin/rbd for mapping and unmapping to kernel -* /usr/sbin/mkfs.xfs for fs creation +* /usr/bin/rbd for manipulating Ceph RBD images +* /usr/sbin/mkfs.xfs for fs creation (default fstype) * /usr/bin/mount and /usr/bin/umount +* golang/dep tool + +Tested with Ceph version 0.94.2 on Centos 7.1 host with Docker 1.12 + +### Building rbd-docker-plugin + +Clone the repo and use the Makefile: + + make + +To get `dist/rbd-docker-plugin` binary. Or the equivalent shell commands: + + go get -u github.com/golang/dep/cmd/dep + dep ensure + go build -v -x -o dist/rbd-docker-plugin . -Tested with Ceph version 0.94.2 on Centos 7.1 host with Docker 1.8. ### Commandline Options @@ -55,12 +65,11 @@ Tested with Ceph version 0.94.2 on Centos 7.1 host with Docker 1.8. --logdir="/var/log": Logfile directory for RBD Docker Plugin --mount="/var/lib/docker/volumes": Mount directory for volumes on host --name="rbd": Docker plugin name for use on --volume-driver option - --plugin-dir="/run/docker/plugins": Docker plugin directory for socket --pool="rbd": Default Ceph Pool for RBD operations --remove=false: Can Remove (destroy) RBD Images (default: false, volume will be renamed zz_name) --size=20480: RBD Image size to Create (in MB) (default: 20480=20GB -### Running Plugin +### Start the Plugin Start with the default options: @@ -68,7 +77,6 @@ Start with the default options: * no creation or removal of volumes sudo rbd-docker-plugin - # docker run --volume-driver rbd -v ... For Debugging: send log to STDERR: @@ -87,16 +95,20 @@ To allow creation and removal: sudo rbd-docker-plugin --create --remove +Then you would be able to use RBD volumes via Docker CLI: + + docker run --volume-driver rbd -v ... + ### Testing -Use with docker 1.8+ which has the `--volume-driver` support. +Can test using docker engine 1.8+ which has `--volume-driver` support. * https://docker.com/ -Alternatively, you can POST json to the socket to test the functionality -manually. If your curl is new enough (v7.40+), you can use the -`--unix-socket` option and syntax. You can also use [this golang -version](https://github.com/Soulou/curl-unix-socket) instead: +Alternatively, you can POST json to the socket to manually test. If your curl +is new enough (v7.40+), you can use the `--unix-socket` option and syntax. You +can also use [this golang version](https://github.com/Soulou/curl-unix-socket) +instead: go get github.com/Soulou/curl-unix-socket @@ -141,24 +153,25 @@ Once you have that you can POST json to the plugin: ## Examples -If you need persistent storage for your application container, you can use a Ceph Block Device as a persistent disk. +If you need persistent storage for your application container, you can use a +Ceph Rados Block Device (RBD) as a persistent disk. You can provision the Block Device and Filesystem first, or allow a -sufficiently configured Plugin instance create it for you. -This plugin can create RBD images with XFS filesystem. +sufficiently configured Plugin instance create it for you. This plugin can +create RBD images with XFS filesystem. 1. (Optional) Provision RBD Storage yourself * `sudo rbd create --size 1024 foo` * `sudo rbd map foo` => /dev/rbd1 * `sudo mkfs.xfs /dev/rbd1` * `sudo rbd unmap /dev/rbd1` -2. Or Run the RBD Docker Plugin with `--create` option flag +2. Or Run the RBD Docker Plugin with `--create` option flag and just request a volume * `sudo rbd-docker-plugin --create` -3. Requesting and Using Storage +3. Requesting and Using Volumes * `docker run --volume-driver=rbd --volume foo:/mnt/foo -it ubuntu /bin/bash` - * Volume will be locked, mapped and mounted to Host and bind-mounted to container at `/mnt/foo` + * Volume "foo" will be locked, mapped and mounted to Host and bind-mounted to container at `/mnt/foo` * When container exits, the volume will be unmounted, unmapped and unlocked - * You can control the RBD Pool and initial Size using this syntax: + * You can control the RBD Pool and initial Size using this syntax sugar: * foo@1024 => pool=rbd (default), image=foo, size 1GB * deep/foo => pool=deep, image=foo and default `--size` (20GB) * deep/foo@1024 => pool=deep, image=foo, size 1GB @@ -166,44 +179,24 @@ This plugin can create RBD images with XFS filesystem. ### Misc -* RBD Snapshots: `sudo rbd snap create --image foo --snap foosnap` -* Resize RBD image: +* Create RBD Snapshots: `sudo rbd snap create --image foo --snap foosnap` +* Resize RBD Volume: * set max size: `sudo rbd resize --size 2048 --image foo` * map/mount and then fix XFS: `sudo xfs_growfs -d /mnt/foo` -## TODO - -* add cluster config options to support non-default clusters -* figure out how to test - * do we need a working ceph cluster? - * docker containers with ceph? ## Links -### Docker API Documentation -- [Experimental: Extend Docker with a Plugin](https://github.com/docker/docker/blob/master/experimental/plugins.md) -- [Experimental: Docker Plugin API](https://github.com/docker/docker/blob/master/experimental/plugin_api.md) -- [Experimental: Docker Volume Plugins](https://github.com/docker/docker/blob/master/experimental/plugins_volume.md) +- [Legacy Plugins](https://docs.docker.com/engine/extend/legacy_plugins/) + - [Volume plugins](https://docs.docker.com/engine/extend/plugins_volume/) -### Code Examples and Libraries - -- https://plugins-demo-2015.github.io -- [Flocker Docker Plugin](https://docs.clusterhq.com/en/1.0.3/labs/docker-plugin.html) -- VolumeDriver golang framework: https://github.com/docker/go-plugins-helpers/tree/master/volume -- GlusterFS Example: https://github.com/calavera/docker-volume-glusterfs -- KeyWhiz example: https://github.com/calavera/docker-volume-keywhiz - Ceph Rados, RBD golang lib: https://github.com/ceph/go-ceph -Related Projects -- https://github.com/AcalephStorage/docker-volume-ceph-rbd -- https://github.com/contiv/volplugin - # Packaging -Using [tpkg](http://tpkg.github.io) package to distribute and specify -native package dependencies. Tested with Centos 7.1 and yum/rpm -packages. +Using [tpkg](http://tpkg.github.io) to distribute and specify native package +dependencies. Tested with Centos 7.1 and yum/rpm packages. # License diff --git a/version.go b/version.go index e9b3b9d..f6fc50a 100644 --- a/version.go +++ b/version.go @@ -3,4 +3,4 @@ // license that can be found in the LICENSE file. package main -const VERSION = "1.5.1" +const VERSION = "2.0.0"